repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
CPES-Power-and-Energy-Systems/interoperable-recommender-tso
energy_app/packages/forecast-api/forecast_api/models/optimization/opt_algorithms/bayesian_opt/bayesian_optimization.py
[ { "identifier": "GaussianProcess", "path": "energy_app/packages/forecast-api/forecast_api/models/optimization/opt_algorithms/bayesian_opt/helpers.py", "snippet": "class GaussianProcess(BaseEstimator, RegressorMixin):\n \"\"\"The legacy Gaussian Process model class.\n\n .. deprecated:: 0.18\n This class will be removed in 0.20.\n Use the :class:`GaussianProcessRegressor` instead.\n\n Read more in the :ref:`User Guide <gaussian_process>`.\n\n Parameters\n ----------\n regr : string or callable, optional\n A regression function returning an array of outputs of the linear\n regression functional basis. The number of observations n_samples\n should be greater than the size p of this basis.\n Default assumes a simple constant regression trend.\n Available built-in regression models are::\n\n 'constant', 'linear', 'quadratic'\n\n corr : string or callable, optional\n A stationary autocorrelation function returning the autocorrelation\n between two points x and x'.\n Default assumes a squared-exponential autocorrelation model.\n Built-in correlation models are::\n\n 'absolute_exponential', 'squared_exponential',\n 'generalized_exponential', 'cubic', 'linear'\n\n beta0 : double array_like, optional\n The regression weight vector to perform Ordinary Kriging (OK).\n Default assumes Universal Kriging (UK) so that the vector beta of\n regression weights is estimated using the maximum likelihood\n principle.\n\n storage_mode : string, optional\n A string specifying whether the Cholesky decomposition of the\n correlation matrix should be stored in the class (storage_mode =\n 'full') or not (storage_mode = 'light').\n Default assumes storage_mode = 'full', so that the\n Cholesky decomposition of the correlation matrix is stored.\n This might be a useful parameter when one is not interested in the\n MSE and only plan to estimate the BLUP, for which the correlation\n matrix is not required.\n\n verbose : boolean, optional\n A boolean specifying the verbose level.\n Default is verbose = False.\n\n theta0 : double array_like, optional\n An array with shape (n_features, ) or (1, ).\n The parameters in the autocorrelation model.\n If thetaL and thetaU are also specified, theta0 is considered as\n the starting point for the maximum likelihood estimation of the\n best set of parameters.\n Default assumes isotropic autocorrelation model with theta0 = 1e-1.\n\n thetaL : double array_like, optional\n An array with shape matching theta0's.\n Lower bound on the autocorrelation parameters for maximum\n likelihood estimation.\n Default is None, so that it skips maximum likelihood estimation and\n it uses theta0.\n\n thetaU : double array_like, optional\n An array with shape matching theta0's.\n Upper bound on the autocorrelation parameters for maximum\n likelihood estimation.\n Default is None, so that it skips maximum likelihood estimation and\n it uses theta0.\n\n normalize : boolean, optional\n Input X and observations y are centered and reduced wrt\n means and standard deviations estimated from the n_samples\n observations provided.\n Default is normalize = True so that data is normalized to ease\n maximum likelihood estimation.\n\n nugget : double or ndarray, optional\n Introduce a nugget effect to allow smooth predictions from noisy\n data. If nugget is an ndarray, it must be the same length as the\n number of data points used for the fit.\n The nugget is added to the diagonal of the assumed training covariance;\n in this way it acts as a Tikhonov regularization in the problem. In\n the special case of the squared exponential correlation function, the\n nugget mathematically represents the variance of the input values.\n Default assumes a nugget close to machine precision for the sake of\n robustness (nugget = 10. * MACHINE_EPSILON).\n\n optimizer : string, optional\n A string specifying the optimization algorithm to be used.\n Default uses 'fmin_cobyla' algorithm from scipy.optimize.\n Available optimizers are::\n\n 'fmin_cobyla', 'Welch'\n\n 'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.\n It consists in iterating over several one-dimensional optimizations\n instead of running one single multi-dimensional optimization.\n\n random_start : int, optional\n The number of times the Maximum Likelihood Estimation should be\n performed from a random starting point.\n The first MLE always uses the specified starting point (theta0),\n the next starting points are picked at random according to an\n exponential distribution (log-uniform on [thetaL, thetaU]).\n Default does not use random starting point (random_start = 1).\n\n random_state : int, RandomState instance or None, optional (default=None)\n The generator used to shuffle the sequence of coordinates of theta in\n the Welch optimizer. If int, random_state is the seed used by the\n random number generator; If RandomState instance, random_state is the\n random number generator; If None, the random number generator is the\n RandomState instance used by `np.random`.\n\n Attributes\n ----------\n theta_ : array\n Specified theta OR the best set of autocorrelation parameters (the \\\n sought maximizer of the reduced likelihood function).\n\n reduced_likelihood_function_value_ : array\n The optimal reduced likelihood function value.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.gaussian_process import GaussianProcess\n >>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T\n >>> y = (X * np.sin(X)).ravel()\n >>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)\n >>> gp.fit(X, y) # doctest: +ELLIPSIS\n GaussianProcess(beta0=None...\n ...\n\n Notes\n -----\n The presentation implementation is based on a translation of the DACE\n Matlab toolbox, see reference [NLNS2002]_.\n\n References\n ----------\n\n .. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.\n Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)\n http://imedea.uib-csic.es/master/cambioglobal/Modulo_V_cod101615/Lab/lab_maps/krigging/DACE-krigingsoft/dace/dace.pdf\n\n .. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,\n and M.D. Morris (1992). Screening, predicting, and computer\n experiments. Technometrics, 34(1) 15--25.`\n http://www.jstor.org/stable/1269548\n \"\"\"\n\n _regression_types = {\n 'constant': regression.constant,\n 'linear': regression.linear,\n 'quadratic': regression.quadratic}\n\n _correlation_types = {\n 'absolute_exponential': correlation.absolute_exponential,\n 'squared_exponential': correlation.squared_exponential,\n 'generalized_exponential': correlation.generalized_exponential,\n 'cubic': correlation.cubic,\n 'linear': correlation.linear}\n\n _optimizer_types = [\n 'fmin_cobyla',\n 'Welch']\n\n def __init__(self, regr='constant', corr='squared_exponential', beta0=None,\n storage_mode='full', verbose=False, theta0=1e-1,\n thetaL=None, thetaU=None, optimizer='fmin_cobyla',\n random_start=1, normalize=True,\n nugget=10. * MACHINE_EPSILON, random_state=None):\n\n self.regr = regr\n self.corr = corr\n self.beta0 = beta0\n self.storage_mode = storage_mode\n self.verbose = verbose\n self.theta0 = theta0\n self.thetaL = thetaL\n self.thetaU = thetaU\n self.normalize = normalize\n self.nugget = nugget\n self.optimizer = optimizer\n self.random_start = random_start\n self.random_state = random_state\n\n def fit(self, X, y):\n \"\"\"\n The Gaussian Process model fitting method.\n\n Parameters\n ----------\n X : double array_like\n An array with shape (n_samples, n_features) with the input at which\n observations were made.\n\n y : double array_like\n An array with shape (n_samples, ) or shape (n_samples, n_targets)\n with the observations of the output to be predicted.\n\n Returns\n -------\n gp : self\n A fitted Gaussian Process model object awaiting data to perform\n predictions.\n \"\"\"\n # Run input checks\n self._check_params()\n\n self.random_state = check_random_state(self.random_state)\n\n # Force data to 2D numpy.array\n X, y = check_X_y(X, y, multi_output=True, y_numeric=True)\n self.y_ndim_ = y.ndim\n if y.ndim == 1:\n y = y[:, np.newaxis]\n\n # Check shapes of DOE & observations\n n_samples, n_features = X.shape\n _, n_targets = y.shape\n\n # Run input checks\n self._check_params(n_samples)\n\n # Normalize data or don't\n if self.normalize:\n X_mean = np.mean(X, axis=0)\n X_std = np.std(X, axis=0)\n y_mean = np.mean(y, axis=0)\n y_std = np.std(y, axis=0)\n X_std[X_std == 0.] = 1.\n y_std[y_std == 0.] = 1.\n # center and scale X if necessary\n X = (X - X_mean) / X_std\n y = (y - y_mean) / y_std\n else:\n X_mean = np.zeros(1)\n X_std = np.ones(1)\n y_mean = np.zeros(1)\n y_std = np.ones(1)\n\n # Calculate matrix of distances D between samples\n D, ij = l1_cross_distances(X)\n if (np.min(np.sum(D, axis=1)) == 0. and self.corr != correlation.pure_nugget): # noqa\n raise Exception(\"Multiple input features cannot have the same\"\n \" target value.\")\n\n # Regression matrix and parameters\n F = self.regr(X)\n n_samples_F = F.shape[0]\n if F.ndim > 1:\n p = F.shape[1]\n else:\n p = 1\n if n_samples_F != n_samples:\n raise Exception(\"Number of rows in F and X do not match. Most \"\n \"likely something is going wrong with the \"\n \"regression model.\")\n if p > n_samples_F:\n raise Exception((\"Ordinary least squares problem is undetermined \"\n \"n_samples=%d must be greater than the \"\n \"regression model size p=%d.\") % (n_samples, p))\n if self.beta0 is not None:\n if self.beta0.shape[0] != p:\n raise Exception(\"Shapes of beta0 and F do not match.\")\n\n # Set attributes\n self.X = X\n self.y = y\n self.D = D\n self.ij = ij\n self.F = F\n self.X_mean, self.X_std = X_mean, X_std\n self.y_mean, self.y_std = y_mean, y_std\n\n # Determine Gaussian Process model parameters\n if self.thetaL is not None and self.thetaU is not None:\n # Maximum Likelihood Estimation of the parameters\n if self.verbose:\n print(\"Performing Maximum Likelihood Estimation of the \"\n \"autocorrelation parameters...\")\n self.theta_, self.reduced_likelihood_function_value_, par = \\\n self._arg_max_reduced_likelihood_function()\n if np.isinf(self.reduced_likelihood_function_value_):\n raise Exception(\"Bad parameter region. \"\n \"Try increasing upper bound\")\n\n else:\n # Given parameters\n if self.verbose:\n print(\"Given autocorrelation parameters. \"\n \"Computing Gaussian Process model parameters...\")\n self.theta_ = self.theta0\n self.reduced_likelihood_function_value_, par = \\\n self.reduced_likelihood_function()\n if np.isinf(self.reduced_likelihood_function_value_):\n raise Exception(\"Bad point. Try increasing theta0.\")\n\n self.beta = par['beta']\n self.gamma = par['gamma']\n self.sigma2 = par['sigma2']\n self.C = par['C']\n self.Ft = par['Ft']\n self.G = par['G']\n\n if self.storage_mode == 'light':\n # Delete heavy data (it will be computed again if required)\n # (it is required only when MSE is wanted in self.predict)\n if self.verbose:\n print(\"Light storage mode specified. \"\n \"Flushing autocorrelation matrix...\")\n self.D = None\n self.ij = None\n self.F = None\n self.C = None\n self.Ft = None\n self.G = None\n\n return self\n\n def predict(self, X, eval_MSE=False, batch_size=None):\n \"\"\"\n This function evaluates the Gaussian Process model at x.\n\n Parameters\n ----------\n X : array_like\n An array with shape (n_eval, n_features) giving the point(s) at\n which the prediction(s) should be made.\n\n eval_MSE : boolean, optional\n A boolean specifying whether the Mean Squared Error should be\n evaluated or not.\n Default assumes evalMSE = False and evaluates only the BLUP (mean\n prediction).\n\n batch_size : integer, optional\n An integer giving the maximum number of points that can be\n evaluated simultaneously (depending on the available memory).\n Default is None so that all given points are evaluated at the same\n time.\n\n Returns\n -------\n y : array_like, shape (n_samples, ) or (n_samples, n_targets)\n An array with shape (n_eval, ) if the Gaussian Process was trained\n on an array of shape (n_samples, ) or an array with shape\n (n_eval, n_targets) if the Gaussian Process was trained on an array\n of shape (n_samples, n_targets) with the Best Linear Unbiased\n Prediction at x.\n\n MSE : array_like, optional (if eval_MSE == True)\n An array with shape (n_eval, ) or (n_eval, n_targets) as with y,\n with the Mean Squared Error at x.\n \"\"\"\n check_is_fitted(self, \"X\")\n\n # Check input shapes\n X = check_array(X)\n n_eval, _ = X.shape\n n_samples, n_features = self.X.shape\n n_samples_y, n_targets = self.y.shape\n\n # Run input checks\n self._check_params(n_samples)\n\n if X.shape[1] != n_features:\n raise ValueError((\"The number of features in X (X.shape[1] = %d) \"\n \"should match the number of features used \"\n \"for fit() \"\n \"which is %d.\") % (X.shape[1], n_features))\n\n if batch_size is None:\n # No memory management\n # (evaluates all given points in a single batch run)\n\n # Normalize input\n X = (X - self.X_mean) / self.X_std\n\n # Initialize output\n y = np.zeros(n_eval)\n if eval_MSE:\n MSE = np.zeros(n_eval)\n\n # Get pairwise componentwise L1-distances to the input training set\n dx = manhattan_distances(X, Y=self.X, sum_over_features=False)\n # Get regression function and correlation\n f = self.regr(X)\n r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)\n\n # Scaled predictor\n y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)\n\n # Predictor\n y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)\n\n if self.y_ndim_ == 1:\n y = y.ravel()\n\n # Mean Squared Error\n if eval_MSE:\n C = self.C\n if C is None:\n # Light storage mode (need to recompute C, F, Ft and G)\n if self.verbose:\n print(\"This GaussianProcess used 'light' storage mode \"\n \"at instantiation. Need to recompute \"\n \"autocorrelation matrix...\")\n reduced_likelihood_function_value, par = \\\n self.reduced_likelihood_function()\n self.C = par['C']\n self.Ft = par['Ft']\n self.G = par['G']\n\n rt = linalg.solve_triangular(self.C, r.T, lower=True)\n\n if self.beta0 is None:\n # Universal Kriging\n u = linalg.solve_triangular(self.G.T,\n np.dot(self.Ft.T, rt) - f.T,\n lower=True)\n else:\n # Ordinary Kriging\n u = np.zeros((n_targets, n_eval))\n\n MSE = np.dot(self.sigma2.reshape(n_targets, 1),\n (1. - (rt ** 2.).sum(axis=0)\n + (u ** 2.).sum(axis=0))[np.newaxis, :])\n MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)\n\n # Mean Squared Error might be slightly negative depending on\n # machine precision: force to zero!\n MSE[MSE < 0.] = 0.\n\n if self.y_ndim_ == 1:\n MSE = MSE.ravel()\n\n return y, MSE\n\n else:\n\n return y\n\n else:\n # Memory management\n\n if type(batch_size) is not int or batch_size <= 0:\n raise Exception(\"batch_size must be a positive integer\")\n\n if eval_MSE:\n\n y, MSE = np.zeros(n_eval), np.zeros(n_eval)\n for k in range(max(1, int(n_eval / batch_size))):\n batch_from = k * batch_size\n batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])\n y[batch_from:batch_to], MSE[batch_from:batch_to] = \\\n self.predict(X[batch_from:batch_to],\n eval_MSE=eval_MSE, batch_size=None)\n\n return y, MSE\n\n else:\n\n y = np.zeros(n_eval)\n for k in range(max(1, int(n_eval / batch_size))):\n batch_from = k * batch_size\n batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])\n y[batch_from:batch_to] = \\\n self.predict(X[batch_from:batch_to],\n eval_MSE=eval_MSE, batch_size=None)\n\n return y\n\n def reduced_likelihood_function(self, theta=None):\n \"\"\"\n This function determines the BLUP parameters and evaluates the reduced\n likelihood function for the given autocorrelation parameters theta.\n\n Maximizing this function wrt the autocorrelation parameters theta is\n equivalent to maximizing the likelihood of the assumed joint Gaussian\n distribution of the observations y evaluated onto the design of\n experiments X.\n\n Parameters\n ----------\n theta : array_like, optional\n An array containing the autocorrelation parameters at which the\n Gaussian Process model parameters should be determined.\n Default uses the built-in autocorrelation parameters\n (ie ``theta = self.theta_``).\n\n Returns\n -------\n reduced_likelihood_function_value : double\n The value of the reduced likelihood function associated to the\n given autocorrelation parameters theta.\n\n par : dict\n A dictionary containing the requested Gaussian Process model\n parameters:\n\n - ``sigma2`` is the Gaussian Process variance.\n - ``beta`` is the generalized least-squares regression weights for\n Universal Kriging or given beta0 for Ordinary Kriging.\n - ``gamma`` is the Gaussian Process weights.\n - ``C`` is the Cholesky decomposition of the correlation\n matrix [R].\n - ``Ft`` is the solution of the linear equation system\n [R] x Ft = F\n - ``G`` is the QR decomposition of the matrix Ft.\n \"\"\"\n check_is_fitted(self, \"X\")\n\n if theta is None:\n # Use built-in autocorrelation parameters\n theta = self.theta_\n\n # Initialize output\n reduced_likelihood_function_value = - np.inf\n par = {}\n\n # Retrieve data\n n_samples = self.X.shape[0]\n D = self.D\n ij = self.ij\n F = self.F\n\n if D is None:\n # Light storage mode (need to recompute D, ij and F)\n D, ij = l1_cross_distances(self.X)\n if (np.min(np.sum(D, axis=1)) == 0.\n and self.corr != correlation.pure_nugget):\n raise Exception(\"Multiple X are not allowed\")\n F = self.regr(self.X)\n\n # Set up R\n r = self.corr(theta, D)\n R = np.eye(n_samples) * (1. + self.nugget)\n R[ij[:, 0], ij[:, 1]] = r\n R[ij[:, 1], ij[:, 0]] = r\n\n # Cholesky decomposition of R\n try:\n C = linalg.cholesky(R, lower=True)\n except linalg.LinAlgError:\n return reduced_likelihood_function_value, par\n\n # Get generalized least squares solution\n Ft = linalg.solve_triangular(C, F, lower=True)\n Q, G = linalg.qr(Ft, mode='economic')\n\n sv = linalg.svd(G, compute_uv=False)\n rcondG = sv[-1] / sv[0]\n if rcondG < 1e-10:\n # Check F\n sv = linalg.svd(F, compute_uv=False)\n condF = sv[0] / sv[-1]\n if condF > 1e15:\n raise Exception(\"F is too ill conditioned. Poor combination \"\n \"of regression model and observations.\")\n else:\n # Ft is too ill conditioned, get out (try different theta)\n return reduced_likelihood_function_value, par\n\n Yt = linalg.solve_triangular(C, self.y, lower=True)\n if self.beta0 is None:\n # Universal Kriging\n beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))\n else:\n # Ordinary Kriging\n beta = np.array(self.beta0)\n\n rho = Yt - np.dot(Ft, beta)\n sigma2 = (rho ** 2.).sum(axis=0) / n_samples\n # The determinant of R is equal to the squared product of the diagonal\n # elements of its Cholesky decomposition C\n detR = (np.diag(C) ** (2. / n_samples)).prod()\n\n # Compute/Organize output\n reduced_likelihood_function_value = - sigma2.sum() * detR\n par['sigma2'] = sigma2 * self.y_std ** 2.\n par['beta'] = beta\n par['gamma'] = linalg.solve_triangular(C.T, rho)\n par['C'] = C\n par['Ft'] = Ft\n par['G'] = G\n\n return reduced_likelihood_function_value, par\n\n def _arg_max_reduced_likelihood_function(self):\n \"\"\"\n This function estimates the autocorrelation parameters theta as the\n maximizer of the reduced likelihood function.\n (Minimization of the opposite reduced likelihood function is used for\n convenience)\n\n Parameters\n ----------\n self : All parameters are stored in the Gaussian Process model object.\n\n Returns\n -------\n optimal_theta : array_like\n The best set of autocorrelation parameters (the sought maximizer of\n the reduced likelihood function).\n\n optimal_reduced_likelihood_function_value : double\n The optimal reduced likelihood function value.\n\n optimal_par : dict\n The BLUP parameters associated to thetaOpt.\n \"\"\"\n\n # Initialize output\n best_optimal_theta = []\n best_optimal_rlf_value = []\n best_optimal_par = []\n\n if self.verbose:\n print(\"The chosen optimizer is: \" + str(self.optimizer))\n if self.random_start > 1:\n print(str(self.random_start) + \" random starts are required.\")\n\n percent_completed = 0.\n\n # Force optimizer to fmin_cobyla if the model is meant to be isotropic\n if self.optimizer == 'Welch' and self.theta0.size == 1:\n self.optimizer = 'fmin_cobyla'\n\n if self.optimizer == 'fmin_cobyla':\n\n def minus_reduced_likelihood_function(log10t):\n return - self.reduced_likelihood_function(\n theta=10. ** log10t)[0]\n\n constraints = []\n for i in range(self.theta0.size):\n constraints.append(lambda log10t, i=i:\n log10t[i] - np.log10(self.thetaL[0, i]))\n constraints.append(lambda log10t, i=i:\n np.log10(self.thetaU[0, i]) - log10t[i])\n\n for k in range(self.random_start):\n\n if k == 0:\n # Use specified starting point as first guess\n theta0 = self.theta0\n else:\n # Generate a random starting point log10-uniformly\n # distributed between bounds\n log10theta0 = (np.log10(self.thetaL)\n + self.random_state.rand(*self.theta0.shape)\n * np.log10(self.thetaU / self.thetaL))\n theta0 = 10. ** log10theta0\n\n # Run Cobyla\n try:\n log10_optimal_theta = \\\n optimize.fmin_cobyla(minus_reduced_likelihood_function,\n np.log10(theta0).ravel(),\n constraints)\n except ValueError as ve:\n print(\"Optimization failed. Try increasing the ``nugget``\")\n raise ve\n\n optimal_theta = 10. ** log10_optimal_theta\n optimal_rlf_value, optimal_par = \\\n self.reduced_likelihood_function(theta=optimal_theta)\n\n # Compare the new optimizer to the best previous one\n if k > 0:\n if optimal_rlf_value > best_optimal_rlf_value:\n best_optimal_rlf_value = optimal_rlf_value\n best_optimal_par = optimal_par\n best_optimal_theta = optimal_theta\n else:\n best_optimal_rlf_value = optimal_rlf_value\n best_optimal_par = optimal_par\n best_optimal_theta = optimal_theta\n if self.verbose and self.random_start > 1:\n if (20 * k) / self.random_start > percent_completed:\n percent_completed = (20 * k) / self.random_start\n print(\"%s completed\" % (5 * percent_completed))\n\n optimal_rlf_value = best_optimal_rlf_value\n optimal_par = best_optimal_par\n optimal_theta = best_optimal_theta\n\n elif self.optimizer == 'Welch':\n\n # Backup of the given attributes\n theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU\n corr = self.corr\n verbose = self.verbose\n\n # This will iterate over fmin_cobyla optimizer\n self.optimizer = 'fmin_cobyla'\n self.verbose = False\n\n # Initialize under isotropy assumption\n if verbose:\n print(\"Initialize under isotropy assumption...\")\n self.theta0 = check_array(self.theta0.min())\n self.thetaL = check_array(self.thetaL.min())\n self.thetaU = check_array(self.thetaU.max())\n theta_iso, optimal_rlf_value_iso, par_iso = \\\n self._arg_max_reduced_likelihood_function()\n optimal_theta = theta_iso + np.zeros(theta0.shape)\n\n # Iterate over all dimensions of theta allowing for anisotropy\n if verbose:\n print(\"Now improving allowing for anisotropy...\")\n for i in self.random_state.permutation(theta0.size):\n if verbose:\n print(\"Proceeding along dimension %d...\" % (i + 1))\n self.theta0 = check_array(theta_iso)\n self.thetaL = check_array(thetaL[0, i])\n self.thetaU = check_array(thetaU[0, i])\n\n def corr_cut(t, d):\n return corr(check_array(np.hstack(\n [\n optimal_theta[0][0:i], t[0],\n optimal_theta[0][(i + 1)::]\n ]\n )), d)\n\n self.corr = corr_cut\n optimal_theta[0, i], optimal_rlf_value, optimal_par = \\\n self._arg_max_reduced_likelihood_function()\n\n # Restore the given attributes\n self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU\n self.corr = corr\n self.optimizer = 'Welch'\n self.verbose = verbose\n\n else:\n\n raise NotImplementedError(\"This optimizer ('%s') is not \"\n \"implemented yet. Please contribute!\"\n % self.optimizer)\n\n return optimal_theta, optimal_rlf_value, optimal_par\n\n def _check_params(self, n_samples=None):\n\n # Check regression model\n if not callable(self.regr):\n if self.regr in self._regression_types:\n self.regr = self._regression_types[self.regr]\n else:\n raise ValueError(\"regr should be one of %s or callable, \"\n \"%s was given.\"\n % (self._regression_types.keys(), self.regr))\n\n # Check regression weights if given (Ordinary Kriging)\n if self.beta0 is not None:\n self.beta0 = np.atleast_2d(self.beta0)\n if self.beta0.shape[1] != 1:\n # Force to column vector\n self.beta0 = self.beta0.T\n\n # Check correlation model\n if not callable(self.corr):\n if self.corr in self._correlation_types:\n self.corr = self._correlation_types[self.corr]\n else:\n raise ValueError(\"corr should be one of %s or callable, \"\n \"%s was given.\"\n % (self._correlation_types.keys(), self.corr))\n\n # Check storage mode\n if self.storage_mode != 'full' and self.storage_mode != 'light':\n raise ValueError(\"Storage mode should either be 'full' or \"\n \"'light', %s was given.\" % self.storage_mode)\n\n # Check correlation parameters\n self.theta0 = np.atleast_2d(self.theta0)\n lth = self.theta0.size\n\n if self.thetaL is not None and self.thetaU is not None:\n self.thetaL = np.atleast_2d(self.thetaL)\n self.thetaU = np.atleast_2d(self.thetaU)\n if self.thetaL.size != lth or self.thetaU.size != lth:\n raise ValueError(\"theta0, thetaL and thetaU must have the \"\n \"same length.\")\n if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):\n raise ValueError(\"The bounds must satisfy O < thetaL <= \"\n \"thetaU.\")\n\n elif self.thetaL is None and self.thetaU is None:\n if np.any(self.theta0 <= 0):\n raise ValueError(\"theta0 must be strictly positive.\")\n\n elif self.thetaL is None or self.thetaU is None:\n raise ValueError(\"thetaL and thetaU should either be both or \"\n \"neither specified.\")\n\n # Force verbose type to bool\n self.verbose = bool(self.verbose)\n\n # Force normalize type to bool\n self.normalize = bool(self.normalize)\n\n # Check nugget value\n self.nugget = np.asarray(self.nugget)\n if np.any(self.nugget) < 0.:\n raise ValueError(\"nugget must be positive or zero.\")\n if (n_samples is not None\n and self.nugget.shape not in [(), (n_samples,)]):\n raise ValueError(\"nugget must be either a scalar \"\n \"or array of length n_samples.\")\n\n # Check optimizer\n if self.optimizer not in self._optimizer_types:\n raise ValueError(\"optimizer should be one of %s\"\n % self._optimizer_types)\n\n # Force random_start type to int\n self.random_start = int(self.random_start)" }, { "identifier": "UtilityFunction", "path": "energy_app/packages/forecast-api/forecast_api/models/optimization/opt_algorithms/bayesian_opt/helpers.py", "snippet": "class UtilityFunction(object):\n \"\"\"\n An object to compute the acquisition functions.\n \"\"\"\n\n def __init__(self, kind, kappa, xi):\n \"\"\"\n If UCB is to be used, a constant kappa is needed.\n \"\"\"\n self.kappa = kappa\n self.xi = xi\n if kind not in ['ucb', 'ei', 'poi']:\n err = \"The utility function \" \\\n \"{} has not been implemented, \" \\\n \"please choose one of ucb, ei, or poi.\".format(kind)\n raise NotImplementedError(err)\n else:\n self.kind = kind\n\n def utility(self, x, gp, y_max):\n if self.kind == 'ucb':\n return self._ucb(x, gp, self.kappa)\n if self.kind == 'ei':\n return self._ei(x, gp, y_max, self.xi)\n if self.kind == 'poi':\n return self._poi(x, gp, y_max, self.xi)\n\n @staticmethod\n def _ucb(x, gp, kappa):\n mean, var = gp.predict(x, eval_MSE=True)\n return mean + kappa * np.sqrt(var)\n\n @staticmethod\n def _ei(x, gp, y_max, xi):\n mean, var = gp.predict(x, eval_MSE=True)\n\n # Avoid points with zero variance\n var = np.maximum(var, 1e-9 + 0 * var)\n\n z = (mean - y_max - xi) / np.sqrt(var)\n return (mean - y_max - xi) * norm.cdf(z) + np.sqrt(var) * norm.pdf(z)\n\n @staticmethod\n def _poi(x, gp, y_max, xi):\n mean, var = gp.predict(x, eval_MSE=True)\n\n # Avoid points with zero variance\n var = np.maximum(var, 1e-9 + 0 * var)\n\n z = (mean - y_max - xi) / np.sqrt(var)\n return norm.cdf(z)" }, { "identifier": "unique_rows", "path": "energy_app/packages/forecast-api/forecast_api/models/optimization/opt_algorithms/bayesian_opt/helpers.py", "snippet": "def unique_rows(a):\n \"\"\"\n A functions to trim repeated rows that may appear when optimizing.\n This is necessary to avoid the sklearn GP object from breaking\n\n :param a: array to trim repeated rows from\n\n :return: mask of unique rows\n \"\"\"\n\n # Sort array and kep track of where things should go back to\n order = np.lexsort(a.T)\n reorder = np.argsort(order)\n\n a = a[order]\n diff = np.diff(a, axis=0)\n ui = np.ones(len(a), 'bool')\n ui[1:] = (diff != 0).any(axis=1)\n\n return ui[reorder]" }, { "identifier": "PrintLog", "path": "energy_app/packages/forecast-api/forecast_api/models/optimization/opt_algorithms/bayesian_opt/helpers.py", "snippet": "class PrintLog(object):\n\n def __init__(self, params):\n\n self.ymax = None\n self.xmax = None\n self.params = params\n self.ite = 1\n\n self.start_time = datetime.now()\n self.last_round = datetime.now()\n\n # sizes of parameters name and all\n self.sizes = [max(len(ps), 7) for ps in params]\n\n # Sorted indexes to access parameters\n self.sorti = sorted(range(len(self.params)),\n key=self.params.__getitem__)\n\n def reset_timer(self):\n self.start_time = datetime.now()\n self.last_round = datetime.now()\n\n def print_header(self, initialization=True):\n\n if initialization:\n print(\"{}Initialization{}\".format(BColours.RED,\n BColours.ENDC))\n else:\n print(\"{}Bayesian Optimization{}\".format(BColours.RED,\n BColours.ENDC))\n\n print(BColours.BLUE + \"-\" * (29 + sum([s + 5 for s in self.sizes]))\n + BColours.ENDC)\n\n print(\"{0:>{1}}\".format(\"Step\", 5), end=\" | \")\n print(\"{0:>{1}}\".format(\"Time\", 6), end=\" | \")\n print(\"{0:>{1}}\".format(\"Value\", 10), end=\" | \")\n\n for index in self.sorti:\n print(\"{0:>{1}}\".format(self.params[index],\n self.sizes[index] + 2),\n end=\" | \")\n print('')\n\n def print_step(self, x, y, warning=False):\n\n print(\"{:>5d}\".format(self.ite), end=\" | \")\n\n m, s = divmod((datetime.now() - self.last_round).total_seconds(), 60)\n print(\"{:>02d}m{:>02d}s\".format(int(m), int(s)), end=\" | \")\n\n if self.ymax is None or self.ymax < y:\n self.ymax = y\n self.xmax = x\n print(\"{0}{2: >10.5f}{1}\".format(BColours.MAGENTA,\n BColours.ENDC,\n y),\n end=\" | \")\n\n for index in self.sorti:\n print(\"{0}{2: >{3}.{4}f}{1}\".format(BColours.GREEN,\n BColours.ENDC,\n x[index],\n self.sizes[index] + 2,\n min(self.sizes[index] - 3,\n 6 - 2)),\n end=\" | \")\n else:\n print(\"{: >10.5f}\".format(y), end=\" | \")\n for index in self.sorti:\n print(\"{0: >{1}.{2}f}\".format(x[index],\n self.sizes[index] + 2,\n min(self.sizes[index] - 3,\n 6 - 2)),\n end=\" | \")\n\n if warning:\n print(\"{}Warning: Test point chose at \"\n \"random due to repeated sample.{}\".format(BColours.RED,\n BColours.ENDC))\n\n print()\n\n self.last_round = datetime.now()\n self.ite += 1\n\n def print_summary(self):\n pass" } ]
import numpy as np from .helpers import GaussianProcess from scipy.optimize import minimize from .helpers import UtilityFunction, unique_rows, PrintLog
10,671
""" # Start with the lower bound as the argmax x_max = bounds[:, 0] max_acq = None x_tries = np.random.uniform(bounds[:, 0], bounds[:, 1], size=(100, bounds.shape[0])) for x_try in x_tries: # Find the minimum of minus the acquisition function res = minimize(lambda x: -ac(x.reshape(1, -1), gp=gp, y_max=y_max), x_try.reshape(1, -1), bounds=bounds, method="L-BFGS-B") # Store it if better than previous minimum(maximum). if max_acq is None or -res.fun >= max_acq: x_max = res.x max_acq = -res.fun # Clip output to make sure it lies within the bounds. Due to floating # point technicalities this is not always the case. return np.clip(x_max, bounds[:, 0], bounds[:, 1]) def matern52(theta, d): """ Matern 5/2 correlation model.:: theta, d --> r(theta, d) = (1+sqrt(5)*r + 5/3*r^2)*exp(-sqrt(5)*r) n where r = sqrt(sum (d_i)^2 / (theta_i)^2 ) i = 1 Parameters ---------- theta : array_like An array with shape 1 (isotropic) or n (anisotropic) giving the autocorrelation parameter(s). d : array_like An array with shape (n_eval, n_features) giving the componentwise distances between locations x and x' at which the correlation model should be evaluated. Returns ------- r : array_like An array with shape (n_eval, ) containing the values of the autocorrelation modle. """ theta = np.asarray(theta, dtype=np.float) d = np.asarray(d, dtype=np.float) if d.ndim > 1: n_features = d.shape[1] else: n_features = 1 if theta.size == 1: r = np.sqrt(np.sum(d ** 2, axis=1)) / theta[0] elif theta.size != n_features: raise ValueError("Length of theta must be 1 or %s" % n_features) else: r = np.sqrt(np.sum(d ** 2 / theta.reshape(1, n_features) ** 2, axis=1)) return (1 + np.sqrt(5) * r + 5 / 3. * r ** 2) * np.exp(-np.sqrt(5) * r) class BayesianOptimization(object): def __init__(self, f, pbounds, verbose=1): """ :param f: Function to be maximized. :param pbounds: Dictionary with parameters names as keys and a tuple with minimum and maximum values. :param verbose: Whether or not to print progress. """ # Store the original dictionary self.pbounds = pbounds # Get the name of the parameters self.keys = list(pbounds.keys()) # Find number of parameters self.dim = len(pbounds) # Create an array with parameters bounds self.bounds = [] for key in self.pbounds.keys(): self.bounds.append(self.pbounds[key]) self.bounds = np.asarray(self.bounds) # Some function to be optimized self.f = f # Initialization flag self.initialized = False # Initialization lists --- stores starting points before process begins self.init_points = [] self.x_init = [] self.y_init = [] # Numpy array place holders self.X = None self.Y = None # Counter of iterations self.i = 0 # Since scipy 0.16 passing lower and upper bound to theta seems to be # broken. However, there is a lot of development going on around GP # is scikit-learn. So I'll pick the easy route here and simple specify # only theta0.
""" BAYESIAN OPTIMIZATION MODULE - Version 0.1.0 Created by Fernando Nogueira (fmfn). Available in - https://github.com/fmfn/BayesianOptimization """ __author__ = 'fmfn' def acq_max(ac, gp, y_max, bounds): """ A function to find the maximum of the acquisition function using the 'L-BFGS-B' method. Parameters ---------- :param ac: The acquisition function object that return its point-wise value. :param gp: A gaussian process fitted to the relevant data. :param y_max: The current maximum known value of the target function. :param bounds: The variables bounds to limit the search of the acq max. Returns ------- :return: x_max, The arg max of the acquisition function. """ # Start with the lower bound as the argmax x_max = bounds[:, 0] max_acq = None x_tries = np.random.uniform(bounds[:, 0], bounds[:, 1], size=(100, bounds.shape[0])) for x_try in x_tries: # Find the minimum of minus the acquisition function res = minimize(lambda x: -ac(x.reshape(1, -1), gp=gp, y_max=y_max), x_try.reshape(1, -1), bounds=bounds, method="L-BFGS-B") # Store it if better than previous minimum(maximum). if max_acq is None or -res.fun >= max_acq: x_max = res.x max_acq = -res.fun # Clip output to make sure it lies within the bounds. Due to floating # point technicalities this is not always the case. return np.clip(x_max, bounds[:, 0], bounds[:, 1]) def matern52(theta, d): """ Matern 5/2 correlation model.:: theta, d --> r(theta, d) = (1+sqrt(5)*r + 5/3*r^2)*exp(-sqrt(5)*r) n where r = sqrt(sum (d_i)^2 / (theta_i)^2 ) i = 1 Parameters ---------- theta : array_like An array with shape 1 (isotropic) or n (anisotropic) giving the autocorrelation parameter(s). d : array_like An array with shape (n_eval, n_features) giving the componentwise distances between locations x and x' at which the correlation model should be evaluated. Returns ------- r : array_like An array with shape (n_eval, ) containing the values of the autocorrelation modle. """ theta = np.asarray(theta, dtype=np.float) d = np.asarray(d, dtype=np.float) if d.ndim > 1: n_features = d.shape[1] else: n_features = 1 if theta.size == 1: r = np.sqrt(np.sum(d ** 2, axis=1)) / theta[0] elif theta.size != n_features: raise ValueError("Length of theta must be 1 or %s" % n_features) else: r = np.sqrt(np.sum(d ** 2 / theta.reshape(1, n_features) ** 2, axis=1)) return (1 + np.sqrt(5) * r + 5 / 3. * r ** 2) * np.exp(-np.sqrt(5) * r) class BayesianOptimization(object): def __init__(self, f, pbounds, verbose=1): """ :param f: Function to be maximized. :param pbounds: Dictionary with parameters names as keys and a tuple with minimum and maximum values. :param verbose: Whether or not to print progress. """ # Store the original dictionary self.pbounds = pbounds # Get the name of the parameters self.keys = list(pbounds.keys()) # Find number of parameters self.dim = len(pbounds) # Create an array with parameters bounds self.bounds = [] for key in self.pbounds.keys(): self.bounds.append(self.pbounds[key]) self.bounds = np.asarray(self.bounds) # Some function to be optimized self.f = f # Initialization flag self.initialized = False # Initialization lists --- stores starting points before process begins self.init_points = [] self.x_init = [] self.y_init = [] # Numpy array place holders self.X = None self.Y = None # Counter of iterations self.i = 0 # Since scipy 0.16 passing lower and upper bound to theta seems to be # broken. However, there is a lot of development going on around GP # is scikit-learn. So I'll pick the easy route here and simple specify # only theta0.
self.gp = GaussianProcess(corr=matern52,
0
2023-11-17 09:23:38+00:00
12k
PlaxtonFlarion/NexaFlow
frameflow/framix.py
[ { "identifier": "Show", "path": "frameflow/show.py", "snippet": "class Show(object):\n\n console = Console()\n\n @staticmethod\n def retry_fail_logo():\n logo = \"\"\"[bold]\n ╔════════════════════════════════╗\n ║ Retry Failed ║\n ╚════════════════════════════════╝\n\n 抱歉,尝试次数已达上限,无法完成操作。\n 请稍后再试或联系技术支持寻求帮助。\n\n 您的理解与耐心是我们不断进步的动力!\n \"\"\"\n Show.console.print(logo)\n\n @staticmethod\n def connect_fail_logo():\n logo = \"\"\"[bold]\n ╔════════════════════════════════╗\n ║ Connect Failed ║\n ╚════════════════════════════════╝\n\n 🚫 连接超时 - 程序退出 🚫\n\n 由于长时间无法建立连接,程序现在将自动退出。\n 请检查您的设备或联系技术支持。\n 感谢您的耐心,期待下次再见!\n \"\"\"\n Show.console.print(logo)\n\n @staticmethod\n def major_logo():\n logo = \"\"\"[bold #D0D0D0]\n ███╗ ██╗███████╗██╗ ██╗ █████╗ ███████╗██╗ ██████╗ ██╗ ██╗\n ██╔██╗ ██║██╔════╝╚██╗██╔╝██╔══██╗ ██╔════╝██║ ██╔═══██╗██║ ██║\n ██║╚██╗██║█████╗ ╚███╔╝ ███████║ █████╗ ██║ ██║ ██║██║ █╗ ██║\n ██║ ╚████║██╔══╝ ██╔██╗ ██╔══██║ ██╔══╝ ██║ ██║ ██║██║███╗██║\n ██║ ╚███║███████╗██╔╝ ██╗██║ ██║ ██║ ███████╗╚██████╔╝╚███╔███╔╝\n ╚═╝ ╚══╝╚══════╝╚═╝ ╚═╝╚═╝ ╚═╝ ╚═╝ ╚══════╝ ╚═════╝ ╚══╝╚══╝\n \"\"\"\n Show.console.print(logo)\n\n @staticmethod\n def minor_logo():\n logo = \"\"\"[bold #D0D0D0]\n ███████╗██████╗ █████╗ ███╗ ███╗██╗██╗ ██╗\n ██╔════╝██╔══██╗██╔══██╗ ████╗ ████║██║╚██╗██╔╝\n █████╗ ██████╔╝███████║ ██╔████╔██║██║ ╚███╔╝\n ██╔══╝ ██╔══██╗██╔══██║ ██║╚██╔╝██║██║ ██╔██╗\n ██║ ██║ ██║██║ ██║ ██║ ╚═╝ ██║██║██╔╝ ██╗\n ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═╝ ╚═╝ ╚═╝╚═╝╚═╝ ╚═╝\n \"\"\"\n Show.console.print(logo)\n\n @staticmethod\n def help_document():\n table_major = Table(\n title=\"[bold #FF851B]NexaFlow Framix Main Command Line\",\n header_style=\"bold #FF851B\", title_justify=\"center\",\n show_header=True, show_lines=True\n )\n table_major.add_column(\"主要命令\", justify=\"center\", width=12)\n table_major.add_column(\"参数类型\", justify=\"center\", width=12)\n table_major.add_column(\"传递次数\", justify=\"center\", width=8)\n table_major.add_column(\"附加命令\", justify=\"center\", width=8)\n table_major.add_column(\"功能说明\", justify=\"center\", width=22)\n\n table_major.add_row(\n \"[bold #FFDC00]--flick\", \"[bold #7FDBFF]布尔\", \"[bold #8A8A8A]一次\", \"[bold #D7FF00]支持\", \"[bold #39CCCC]录制分析视频帧\"\n )\n table_major.add_row(\n \"[bold #FFDC00]--alone\", \"[bold #7FDBFF]布尔\", \"[bold #8A8A8A]一次\", \"\", \"[bold #39CCCC]录制视频\"\n )\n table_major.add_row(\n \"[bold #FFDC00]--paint\", \"[bold #7FDBFF]布尔\", \"[bold #8A8A8A]一次\", \"[bold #D7FF00]支持\", \"[bold #39CCCC]绘制分割线条\"\n )\n table_major.add_row(\n \"[bold #FFDC00]--input\", \"[bold #7FDBFF]视频文件\", \"[bold #FFAFAF]多次\", \"[bold #D7FF00]支持\", \"[bold #39CCCC]分析单个视频\"\n )\n table_major.add_row(\n \"[bold #FFDC00]--whole\", \"[bold #7FDBFF]视频集合\", \"[bold #FFAFAF]多次\", \"[bold #D7FF00]支持\", \"[bold #39CCCC]分析全部视频\"\n )\n table_major.add_row(\n \"[bold #FFDC00]--merge\", \"[bold #7FDBFF]报告集合\", \"[bold #FFAFAF]多次\", \"\", \"[bold #39CCCC]聚合报告\"\n )\n table_major.add_row(\n \"[bold #FFDC00]--train\", \"[bold #7FDBFF]视频文件\", \"[bold #FFAFAF]多次\", \"\", \"[bold #39CCCC]归类图片文件\"\n )\n table_major.add_row(\n \"[bold #FFDC00]--build\", \"[bold #7FDBFF]图片集合\", \"[bold #FFAFAF]多次\", \"\", \"[bold #39CCCC]训练模型文件\"\n )\n\n table_minor = Table(\n title=\"[bold #FF851B]NexaFlow Framix Extra Command Line\",\n header_style=\"bold #FF851B\", title_justify=\"center\",\n show_header=True, show_lines=True\n )\n table_minor.add_column(\"附加命令\", justify=\"center\", width=12)\n table_minor.add_column(\"参数类型\", justify=\"center\", width=12)\n table_minor.add_column(\"传递次数\", justify=\"center\", width=8)\n table_minor.add_column(\"默认状态\", justify=\"center\", width=8)\n table_minor.add_column(\"功能说明\", justify=\"center\", width=22)\n\n table_minor.add_row(\n \"[bold #FFDC00]--boost\", \"[bold #7FDBFF]布尔\", \"[bold #8A8A8A]一次\", \"[bold #AFAFD7]关闭\", \"[bold #39CCCC]快速模式\"\n )\n table_minor.add_row(\n \"[bold #FFDC00]--color\", \"[bold #7FDBFF]布尔\", \"[bold #8A8A8A]一次\", \"[bold #AFAFD7]关闭\", \"[bold #39CCCC]彩色模式\"\n )\n table_minor.add_row(\n \"[bold #FFDC00]--focus\", \"[bold #7FDBFF]布尔\", \"[bold #8A8A8A]一次\", \"[bold #AFAFD7]关闭\", \"[bold #39CCCC]转换视频\"\n )\n table_minor.add_row(\n \"[bold #FFDC00]--shape\", \"[bold #7FDBFF]数值\", \"[bold #8A8A8A]一次\", \"[bold #AFAFD7]自动\", \"[bold #39CCCC]图片尺寸\"\n )\n table_minor.add_row(\n \"[bold #FFDC00]--scale\", \"[bold #7FDBFF]数值\", \"[bold #8A8A8A]一次\", \"[bold #AFAFD7]自动\", \"[bold #39CCCC]缩放比例\"\n )\n table_minor.add_row(\n \"[bold #FFDC00]--crops\", \"[bold #7FDBFF]坐标\", \"[bold #FFAFAF]多次\", \"[bold #AFAFD7]自动\", \"[bold #39CCCC]获取区域\"\n )\n table_minor.add_row(\n \"[bold #FFDC00]--omits\", \"[bold #7FDBFF]坐标\", \"[bold #FFAFAF]多次\", \"[bold #AFAFD7]自动\", \"[bold #39CCCC]忽略区域\"\n )\n Show.major_logo()\n Show.console.print(table_major)\n Show.minor_logo()\n Show.console.print(table_minor)\n with Progress() as progress:\n task = progress.add_task(\"[bold #FFFFD7]Framix Terminal Command.\", total=100)\n while not progress.finished:\n progress.update(task, advance=1)\n time.sleep(0.1)\n\n @staticmethod\n def tips_document():\n table = Table(show_header=True, header_style=\"bold #D7FF00\", show_lines=True)\n table.add_column(\"选项\", justify=\"center\", width=12)\n table.add_column(\"参数\", justify=\"center\", width=12)\n table.add_column(\"说明\", justify=\"center\", width=44)\n table.add_row(\"[bold #FFAFAF]header\", \"[bold #AFD7FF]标题名\", \"[bold #DADADA]生成一个新标题文件夹\")\n table.add_row(\"[bold #FFAFAF]serial\", \"\", \"[bold #DADADA]重新选择已连接的设备\")\n table.add_row(\"[bold #FFAFAF]deploy\", \"\", \"[bold #DADADA]重新部署视频分析配置\")\n table.add_row(\"[bold #FFAFAF]******\", \"\", \"[bold #DADADA]任意数字代表录制时长\")\n Show.console.print(table)" }, { "identifier": "Manage", "path": "frameflow/manage.py", "snippet": "class Manage(object):\n\n def __init__(self, adb: str):\n self.__adb = adb\n\n async def current_device(self) -> dict[str, \"Device\"]:\n\n async def check(serial: str) -> \"Device\":\n brand, version = await asyncio.gather(\n Terminal.cmd_line(self.__adb, \"-s\", serial, \"wait-for-usb-device\", \"shell\", \"getprop\", \"ro.product.brand\"),\n Terminal.cmd_line(self.__adb, \"-s\", serial, \"wait-for-usb-device\", \"shell\", \"getprop\", \"ro.build.version.release\")\n )\n return Device(self.__adb, serial, brand, version)\n\n device_dict = {}\n devices = await Terminal.cmd_line(self.__adb, \"devices\")\n serial_list = [i.split()[0] for i in devices.split(\"\\n\")[1:]]\n if len(serial_list) > 0:\n tasks = [check(serial) for serial in serial_list]\n result = await asyncio.gather(*tasks)\n device_dict = {str(idx + 1): c for idx, c in enumerate(result)}\n return device_dict\n\n async def operate_device(self) -> list[\"Device\"]:\n final = []\n while True:\n device_dict: dict[str, \"Device\"] = await self.current_device()\n if len(device_dict) > 0:\n for k, v in device_dict.items():\n final.append(v)\n Show.console.print(f\"[bold][bold yellow]已连接设备[/bold yellow] [{k}] {v}\")\n\n if len(device_dict) > 1:\n try:\n action = Prompt.ask(\"[bold #5FD7FF]请输入编号选择一台设备\")\n final = final if action == \"000\" else [device_dict[action]]\n except KeyError:\n final.clear()\n Show.console.print(f\"[bold red]没有该序号,请重新选择 ...[/bold red]\\n\")\n continue\n\n if len(final) == 1:\n Show.console.print(f\"[bold]<Link> <单设备模式>\")\n else:\n Show.console.print(f\"[bold]<Link> <多设备模式>\")\n\n return final\n\n else:\n Show.console.print(f\"[bold yellow]设备未连接,等待设备连接 ...\")\n await asyncio.sleep(5)" }, { "identifier": "Deploy", "path": "frameflow/parameters.py", "snippet": "class Deploy(object):\n\n _deploys = {\n \"boost\": False,\n \"color\": False,\n \"focus\": False,\n \"target_size\": (350, 700),\n \"fps\": 60,\n \"compress_rate\": 0.5,\n \"threshold\": 0.97,\n \"offset\": 3,\n \"window_size\": 1,\n \"step\": 1,\n \"block\": 6,\n \"window_coefficient\": 2,\n \"crops\": [],\n \"omits\": []\n }\n\n def __init__(\n self,\n boost: bool = None,\n color: bool = None,\n focus: bool = None,\n target_size: tuple = None,\n fps: int = None,\n compress_rate: int | float = None,\n threshold: int | float = None,\n offset: int = None,\n window_size: int = None,\n step: int = None,\n block: int = None,\n window_coefficient: int = None,\n crops: list = None,\n omits: list = None\n ):\n\n self._deploys[\"boost\"] = boost or False\n self._deploys[\"color\"] = color or False\n self._deploys[\"focus\"] = focus or False\n self._deploys[\"target_size\"] = target_size or (350, 700)\n self._deploys[\"fps\"] = fps or 60\n self._deploys[\"compress_rate\"] = compress_rate or 0.5\n self._deploys[\"threshold\"] = threshold or 0.97\n self._deploys[\"offset\"] = offset or 3\n self._deploys[\"window_size\"] = window_size or 1\n self._deploys[\"step\"] = step or 1\n self._deploys[\"block\"] = block or 6\n self._deploys[\"window_coefficient\"] = window_coefficient or 2\n self._deploys[\"crops\"] = crops or []\n self._deploys[\"omits\"] = omits or []\n\n @property\n def boost(self):\n return self._deploys[\"boost\"]\n\n @property\n def color(self):\n return self._deploys[\"color\"]\n\n @property\n def focus(self):\n return self._deploys[\"focus\"]\n\n @property\n def target_size(self):\n return self._deploys[\"target_size\"]\n\n @property\n def fps(self):\n return self._deploys[\"fps\"]\n\n @property\n def compress_rate(self):\n return self._deploys[\"compress_rate\"]\n\n @property\n def threshold(self):\n return self._deploys[\"threshold\"]\n\n @property\n def offset(self):\n return self._deploys[\"offset\"]\n\n @property\n def window_size(self):\n return self._deploys[\"window_size\"]\n\n @property\n def step(self):\n return self._deploys[\"step\"]\n\n @property\n def block(self):\n return self._deploys[\"block\"]\n\n @property\n def window_coefficient(self):\n return self._deploys[\"window_coefficient\"]\n\n @property\n def crops(self):\n return self._deploys[\"crops\"]\n\n @property\n def omits(self):\n return self._deploys[\"omits\"]\n\n def load_deploy(self, deploy_file: str) -> bool:\n is_load: bool = False\n try:\n with open(file=deploy_file, mode=\"r\", encoding=\"utf-8\") as f:\n data = json.loads(f.read())\n boost_mode = boost_data.lower() if isinstance(boost_data := data.get(\"boost\", \"false\"), str) else \"false\"\n color_mode = color_data.lower() if isinstance(color_data := data.get(\"color\", \"false\"), str) else \"false\"\n focus_mode = focus_data.lower() if isinstance(focus_data := data.get(\"focus\", \"false\"), str) else \"false\"\n self._deploys[\"boost\"] = True if boost_mode == \"true\" else False\n self._deploys[\"color\"] = True if color_mode == \"true\" else False\n self._deploys[\"focus\"] = True if focus_mode == \"true\" else False\n size = data.get(\"target_size\", (350, 700))\n self._deploys[\"target_size\"] = tuple(\n max(100, min(3000, int(i))) for i in re.findall(r\"-?\\d*\\.?\\d+\", size)\n ) if isinstance(size, str) else size\n self._deploys[\"fps\"] = max(15, min(60, data.get(\"fps\", 60)))\n self._deploys[\"compress_rate\"] = max(0, min(1, data.get(\"compress_rate\", 0.5)))\n self._deploys[\"threshold\"] = max(0, min(1, data.get(\"threshold\", 0.97)))\n self._deploys[\"offset\"] = max(1, data.get(\"offset\", 3))\n self._deploys[\"window_size\"] = max(1, data.get(\"window_size\", 1))\n self._deploys[\"step\"] = max(1, data.get(\"step\", 1))\n self._deploys[\"block\"] = max(1, min(int(min(self.target_size[0], self.target_size[1]) / 10), data.get(\"block\", 6)))\n self._deploys[\"window_coefficient\"] = max(2, data.get(\"window_coefficient\", 2))\n\n # Crops Hook\n crops_list = data.get(\"crops\", [])\n for hook_dict in crops_list:\n if len(\n data_list := [\n value for value in hook_dict.values() if isinstance(value, int | float)\n ]\n ) == 4 and sum(data_list) > 0:\n self._deploys[\"crops\"].append(\n (hook_dict[\"x\"], hook_dict[\"y\"], hook_dict[\"x_size\"], hook_dict[\"y_size\"])\n )\n if len(self.crops) >= 2:\n self._deploys[\"crops\"] = list(set(self.crops))\n\n # Omits Hook\n omits_list = data.get(\"omits\", [])\n for hook_dict in omits_list:\n if len(\n data_list := [\n value for value in hook_dict.values() if isinstance(value, int | float)\n ]\n ) == 4 and sum(data_list) > 0:\n self._deploys[\"omits\"].append(\n (hook_dict[\"x\"], hook_dict[\"y\"], hook_dict[\"x_size\"], hook_dict[\"y_size\"])\n )\n if len(self.omits) >= 2:\n self._deploys[\"omits\"] = list(set(self.omits))\n except FileNotFoundError:\n logger.debug(\"未找到部署文件,使用默认参数 ...\")\n except json.decoder.JSONDecodeError:\n logger.debug(\"部署文件解析错误,文件格式不正确,使用默认参数 ...\")\n else:\n logger.debug(\"读取部署文件,使用部署参数 ...\")\n is_load = True\n finally:\n return is_load\n\n def dump_deploy(self, deploy_file: str) -> None:\n os.makedirs(os.path.dirname(deploy_file), exist_ok=True)\n\n with open(file=deploy_file, mode=\"w\", encoding=\"utf-8\") as f:\n f.writelines('{')\n for k, v in self._deploys.items():\n f.writelines('\\n')\n if isinstance(v, bool):\n f.writelines(f' \"{k}\": \"{v}\",')\n elif k == \"target_size\":\n f.writelines(f' \"{k}\": \"{v}\",')\n elif k == \"crops\" or k == \"omits\":\n if len(v) == 0:\n default = '{\"x\": 0, \"y\": 0, \"x_size\": 0, \"y_size\": 0}'\n f.writelines(f' \"{k}\": [\\n')\n f.writelines(f' {default}\\n')\n f.writelines(' ],') if k == \"crops\" else f.writelines(' ]')\n else:\n f.writelines(f' \"{k}\": [\\n')\n for index, i in enumerate(v):\n x, y, x_size, y_size = i\n new_size = f'{{\"x\": {x}, \"y\": {y}, \"x_size\": {x_size}, \"y_size\": {y_size}}}'\n if (index + 1) == len(v):\n f.writelines(f' {new_size}\\n')\n else:\n f.writelines(f' {new_size},\\n')\n f.writelines(' ],') if k == \"crops\" else f.writelines(' ]')\n else:\n f.writelines(f' \"{k}\": {v},')\n f.writelines('\\n}')\n\n def view_deploy(self) -> None:\n\n title_color = \"#af5fd7\"\n col_1_color = \"#d75f87\"\n col_2_color = \"#87afd7\"\n col_3_color = \"#00af5f\"\n\n table = Table(\n title=f\"[bold {title_color}]Framix Analyzer Deploy\",\n header_style=f\"bold {title_color}\", title_justify=\"center\",\n show_header=True\n )\n table.add_column(\"配置\", no_wrap=True)\n table.add_column(\"参数\", no_wrap=True, max_width=12)\n table.add_column(\"范围\", no_wrap=True)\n table.add_column(\"效果\", no_wrap=True)\n\n table.add_row(\n f\"[bold {col_1_color}]快速模式\",\n f\"[bold {col_2_color}]{self.boost}\",\n f\"[bold][[bold {col_3_color}]T | F[/bold {col_3_color}] ]\",\n f\"[bold green]开启[/bold green]\" if self.boost else \"[bold red]关闭[/bold red]\",\n )\n table.add_row(\n f\"[bold {col_1_color}]彩色模式\",\n f\"[bold {col_2_color}]{self.color}\",\n f\"[bold][[bold {col_3_color}]T | F[/bold {col_3_color}] ]\",\n f\"[bold green]开启[/bold green]\" if self.color else \"[bold red]关闭[/bold red]\",\n )\n table.add_row(\n f\"[bold {col_1_color}]视频转换\",\n f\"[bold {col_2_color}]{self.focus}\",\n f\"[bold][[bold {col_3_color}]T | F[/bold {col_3_color}] ]\",\n f\"[bold green]开启[/bold green]\" if self.focus else \"[bold red]关闭[/bold red]\",\n )\n table.add_row(\n f\"[bold {col_1_color}]图像尺寸\",\n f\"[bold {col_2_color}]{self.target_size}\",\n f\"[bold][[bold {col_3_color}]? , ?[/bold {col_3_color}] ]\",\n f\"[bold]宽 [bold red]{self.target_size[0]}[/bold red] 高 [bold red]{self.target_size[1]}[/bold red]\",\n )\n table.add_row(\n f\"[bold {col_1_color}]视频帧率\",\n f\"[bold {col_2_color}]{self.fps}\",\n f\"[bold][[bold {col_3_color}]15, 60[/bold {col_3_color}]]\",\n f\"[bold]转换视频为 [bold red]{self.fps}[/bold red] 帧每秒\",\n )\n table.add_row(\n f\"[bold {col_1_color}]压缩率\",\n f\"[bold {col_2_color}]{self.compress_rate}\",\n f\"[bold][[bold {col_3_color}]0 , 1[/bold {col_3_color}] ]\",\n f\"[bold]压缩视频大小为原来的 [bold red]{int(self.compress_rate * 100)}%[/bold red]\",\n )\n table.add_row(\n f\"[bold {col_1_color}]相似度\",\n f\"[bold {col_2_color}]{self.threshold}\",\n f\"[bold][[bold {col_3_color}]0 , 1[/bold {col_3_color}] ]\",\n f\"[bold]阈值超过 [bold red]{self.threshold}[/bold red] 的帧为稳定帧\",\n )\n table.add_row(\n f\"[bold {col_1_color}]补偿值\",\n f\"[bold {col_2_color}]{self.offset}\",\n f\"[bold][[bold {col_3_color}]0 , ?[/bold {col_3_color}] ]\",\n f\"[bold]合并 [bold red]{self.offset}[/bold red] 个变化不大的稳定区间\",\n )\n table.add_row(\n f\"[bold {col_1_color}]片段数量\",\n f\"[bold {col_2_color}]{self.window_size}\",\n f\"[bold][[bold {col_3_color}]1 , ?[/bold {col_3_color}] ]\",\n f\"[bold]每次处理 [bold red]{self.window_size}[/bold red] 个帧片段\",\n )\n table.add_row(\n f\"[bold {col_1_color}]处理数量\",\n f\"[bold {col_2_color}]{self.step}\",\n f\"[bold][[bold {col_3_color}]1 , ?[/bold {col_3_color}] ]\",\n f\"[bold]每个片段处理 [bold red]{self.step}[/bold red] 个帧图像\",\n )\n table.add_row(\n f\"[bold {col_1_color}]切分程度\",\n f\"[bold {col_2_color}]{self.block}\",\n f\"[bold][[bold {col_3_color}]1 , {int(min(self.target_size[0], self.target_size[1]) / 10)}[/bold {col_3_color}]]\",\n f\"[bold]每个帧图像切分为 [bold red]{self.block}[/bold red] 块\",\n )\n table.add_row(\n f\"[bold {col_1_color}]权重分布\",\n f\"[bold {col_2_color}]{self.window_coefficient}\",\n f\"[bold][[bold {col_3_color}]2 , ?[/bold {col_3_color}] ]\",\n f\"[bold]加权计算 [bold red]{self.window_coefficient}[/bold red]\",\n )\n table.add_row(\n f\"[bold {col_1_color}]获取区域\",\n f\"[bold {col_2_color}]{['!' for _ in range(len(self.crops))]}\",\n f\"[bold][[bold {col_3_color}]0 , 1[/bold {col_3_color}] ]\",\n f\"[bold]共 [bold red]{len(self.crops)}[/bold red] 个区域的图像参与计算\",\n )\n table.add_row(\n f\"[bold {col_1_color}]忽略区域\",\n f\"[bold {col_2_color}]{['!' for _ in range(len(self.omits))]}\",\n f\"[bold][[bold {col_3_color}]0 , 1[/bold {col_3_color}] ]\",\n f\"[bold]共 [bold red]{len(self.omits)}[/bold red] 个区域的图像不参与计算\",\n )\n Show.console.print(table)" }, { "identifier": "Option", "path": "frameflow/parameters.py", "snippet": "class Option(object):\n\n _options = {\n \"Total Path\": \"\"\n }\n\n @property\n def total_path(self):\n return self._options[\"Total Path\"]\n\n @total_path.setter\n def total_path(self, value: str):\n self._options[\"Total Path\"] = value\n\n def load_option(self, option_file: str) -> bool:\n is_load: bool = False\n try:\n with open(file=option_file, mode=\"r\", encoding=\"utf-8\") as f:\n data = json.loads(f.read())\n data_path = data.get(\"Total Path\", \"\")\n if data_path and os.path.isdir(data_path):\n if not os.path.exists(data_path):\n os.makedirs(data_path, exist_ok=True)\n self.total_path = data_path\n except FileNotFoundError:\n logger.debug(\"未找到配置文件,使用默认路径 ...\")\n except json.decoder.JSONDecodeError:\n logger.debug(\"配置文件解析错误,文件格式不正确,使用默认路径 ...\")\n else:\n logger.debug(\"读取配置文件,使用配置参数 ...\")\n is_load = True\n finally:\n return is_load\n\n def dump_option(self, option_file: str) -> None:\n os.makedirs(os.path.dirname(option_file), exist_ok=True)\n\n with open(file=option_file, mode=\"w\", encoding=\"utf-8\") as f:\n f.writelines('{')\n for k, v in self._options.items():\n f.writelines('\\n')\n f.writelines(f' \"{k}\": \"{v}\"')\n f.writelines('\\n}')" } ]
import os import re import sys import cv2 import time import shutil import random import asyncio import aiofiles import tempfile from loguru import logger from rich.prompt import Prompt from frameflow.show import Show from frameflow.manage import Manage from frameflow.parameters import Deploy, Option from nexaflow import toolbox from nexaflow.terminal import Terminal from nexaflow.skills.report import Report from nexaflow.video import VideoObject, VideoFrame from nexaflow.cutter.cutter import VideoCutter from nexaflow.hook import CropHook, OmitHook, FrameSaveHook, PaintCropHook, PaintOmitHook from nexaflow.classifier.keras_classifier import KerasClassifier from nexaflow.classifier.framix_classifier import FramixClassifier from PIL import Image, ImageDraw, ImageFont from multiprocessing import Pool, freeze_support from argparse import ArgumentParser
8,996
_tools_path = os.path.join(_job_path, "archivix", "tools") _model_path = os.path.join(_job_path, "archivix", "molds", "model.h5") _total_path = os.path.join(_job_path, "archivix", "pages") _major_path = os.path.join(_job_path, "archivix", "pages") _proto_path = os.path.join(_job_path, "archivix", "pages", "template_extra.html") _initial_report = os.path.join(_universal, "framix.report") _initial_deploy = os.path.join(_universal, "framix.source") _initial_option = os.path.join(_universal, "framix.source") if operation_system == "win32": _adb = os.path.join(_tools_path, "win", "platform-tools", "adb.exe") _ffmpeg = os.path.join(_tools_path, "win", "ffmpeg", "bin", "ffmpeg.exe") _scrcpy = os.path.join(_tools_path, "win", "scrcpy", "scrcpy.exe") elif operation_system == "darwin": _adb = os.path.join(_tools_path, "mac", "platform-tools", "adb") _ffmpeg = os.path.join(_tools_path, "mac", "ffmpeg", "bin", "ffmpeg") _scrcpy = os.path.join(_tools_path, "mac", "scrcpy", "bin", "scrcpy") else: Show.console.print("[bold]Only compatible with [bold red]Windows[/bold red] and [bold red]macOS[/bold red] platforms ...[bold]") time.sleep(5) sys.exit(1) os.environ["PATH"] = os.path.dirname(_adb) + os.path.pathsep + os.environ.get("PATH", "") os.environ["PATH"] = os.path.dirname(_ffmpeg) + os.path.pathsep + os.environ.get("PATH", "") os.environ["PATH"] = os.path.dirname(_scrcpy) + os.path.pathsep + os.environ.get("PATH", "") try: except (RuntimeError, ModuleNotFoundError) as err: Show.console.print(f"[bold red]Error: {err}") time.sleep(5) sys.exit(1) class Parser(object): @staticmethod def parse_cmd(): def parse_shape(dim_str): if dim_str: shape = [int(i) for i in re.split(r'[\s,;]+', dim_str)] return tuple(shape) if len(shape) == 2 else (shape[0], shape[0]) return None def parse_scale(dim_str): try: return int(dim_str) except ValueError: try: return float(dim_str) except ValueError: return None parser = ArgumentParser(description="Command Line Arguments Framix") parser.add_argument('--flick', action='store_true', help='录制分析视频帧') parser.add_argument('--alone', action='store_true', help='录制视频') parser.add_argument('--paint', action='store_true', help='绘制分割线条') parser.add_argument('--input', action='append', help='分析单个视频') parser.add_argument('--whole', action='append', help='分析全部视频') parser.add_argument('--merge', action='append', help='聚合报告') parser.add_argument('--train', action='append', help='归类图片文件') parser.add_argument('--build', action='append', help='训练模型文件') parser.add_argument('--boost', action='store_true', help='快速模式') parser.add_argument('--color', action='store_true', help='彩色模式') parser.add_argument('--focus', action='store_true', help='转换视频') parser.add_argument('--shape', nargs='?', const=None, type=parse_shape, help='图片尺寸') parser.add_argument('--scale', nargs='?', const=None, type=parse_scale, help='缩放比例') parser.add_argument('--crops', action='append', help='获取区域') parser.add_argument('--omits', action='append', help='忽略区域') parser.add_argument('--debug', action='store_true', help='调试模式') return parser.parse_args() class Missions(object): def __init__(self, *args, **kwargs): self.boost, self.color, self.focus, self.crops, self.omits, self.shape, self.scale = args self.model_path = kwargs["model_path"] self.total_path = kwargs["total_path"] self.major_path = kwargs["major_path"] self.proto_path = kwargs["proto_path"] self.initial_report = kwargs["initial_report"] self.initial_deploy = kwargs["initial_deploy"] self.initial_option = kwargs["initial_option"] self.adb = kwargs["adb"] self.ffmpeg = kwargs["ffmpeg"] self.scrcpy = kwargs["scrcpy"] @staticmethod def only_video(folder: str): class Entry(object): def __init__(self, title: str, place: str, sheet: list): self.title = title self.place = place self.sheet = sheet return [ Entry( os.path.basename(root), root, [os.path.join(root, f) for f in sorted(file) if "log" not in f] ) for root, _, file in os.walk(folder) if file ] def video_task(self, input_video): reporter = Report(total_path=self.initial_report) reporter.title = f"Framix_{time.strftime('%Y%m%d_%H%M%S')}_{os.getpid()}" reporter.query = f"{random.randint(10, 99)}" new_video_path = os.path.join(reporter.video_path, os.path.basename(input_video)) shutil.copy(input_video, new_video_path)
operation_system = sys.platform.strip().lower() work_platform = os.path.basename(os.path.abspath(sys.argv[0])).lower() exec_platform = ["framix.exe", "framix.bin", "framix", "framix.py"] if work_platform == "framix.exe": _job_path = os.path.dirname(os.path.abspath(sys.argv[0])) _universal = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0]))) elif work_platform == "framix.bin": _job_path = os.path.dirname(sys.executable) _universal = os.path.dirname(os.path.dirname(sys.executable)) elif work_platform == "framix": _job_path = os.path.dirname(sys.executable) _universal = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(sys.executable)))) elif work_platform == "framix.py": _job_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) _universal = os.path.dirname(os.path.abspath(__file__)) else: Show.console.print("[bold red]Only compatible with Windows and macOS platforms ...") time.sleep(5) sys.exit(1) _tools_path = os.path.join(_job_path, "archivix", "tools") _model_path = os.path.join(_job_path, "archivix", "molds", "model.h5") _total_path = os.path.join(_job_path, "archivix", "pages") _major_path = os.path.join(_job_path, "archivix", "pages") _proto_path = os.path.join(_job_path, "archivix", "pages", "template_extra.html") _initial_report = os.path.join(_universal, "framix.report") _initial_deploy = os.path.join(_universal, "framix.source") _initial_option = os.path.join(_universal, "framix.source") if operation_system == "win32": _adb = os.path.join(_tools_path, "win", "platform-tools", "adb.exe") _ffmpeg = os.path.join(_tools_path, "win", "ffmpeg", "bin", "ffmpeg.exe") _scrcpy = os.path.join(_tools_path, "win", "scrcpy", "scrcpy.exe") elif operation_system == "darwin": _adb = os.path.join(_tools_path, "mac", "platform-tools", "adb") _ffmpeg = os.path.join(_tools_path, "mac", "ffmpeg", "bin", "ffmpeg") _scrcpy = os.path.join(_tools_path, "mac", "scrcpy", "bin", "scrcpy") else: Show.console.print("[bold]Only compatible with [bold red]Windows[/bold red] and [bold red]macOS[/bold red] platforms ...[bold]") time.sleep(5) sys.exit(1) os.environ["PATH"] = os.path.dirname(_adb) + os.path.pathsep + os.environ.get("PATH", "") os.environ["PATH"] = os.path.dirname(_ffmpeg) + os.path.pathsep + os.environ.get("PATH", "") os.environ["PATH"] = os.path.dirname(_scrcpy) + os.path.pathsep + os.environ.get("PATH", "") try: except (RuntimeError, ModuleNotFoundError) as err: Show.console.print(f"[bold red]Error: {err}") time.sleep(5) sys.exit(1) class Parser(object): @staticmethod def parse_cmd(): def parse_shape(dim_str): if dim_str: shape = [int(i) for i in re.split(r'[\s,;]+', dim_str)] return tuple(shape) if len(shape) == 2 else (shape[0], shape[0]) return None def parse_scale(dim_str): try: return int(dim_str) except ValueError: try: return float(dim_str) except ValueError: return None parser = ArgumentParser(description="Command Line Arguments Framix") parser.add_argument('--flick', action='store_true', help='录制分析视频帧') parser.add_argument('--alone', action='store_true', help='录制视频') parser.add_argument('--paint', action='store_true', help='绘制分割线条') parser.add_argument('--input', action='append', help='分析单个视频') parser.add_argument('--whole', action='append', help='分析全部视频') parser.add_argument('--merge', action='append', help='聚合报告') parser.add_argument('--train', action='append', help='归类图片文件') parser.add_argument('--build', action='append', help='训练模型文件') parser.add_argument('--boost', action='store_true', help='快速模式') parser.add_argument('--color', action='store_true', help='彩色模式') parser.add_argument('--focus', action='store_true', help='转换视频') parser.add_argument('--shape', nargs='?', const=None, type=parse_shape, help='图片尺寸') parser.add_argument('--scale', nargs='?', const=None, type=parse_scale, help='缩放比例') parser.add_argument('--crops', action='append', help='获取区域') parser.add_argument('--omits', action='append', help='忽略区域') parser.add_argument('--debug', action='store_true', help='调试模式') return parser.parse_args() class Missions(object): def __init__(self, *args, **kwargs): self.boost, self.color, self.focus, self.crops, self.omits, self.shape, self.scale = args self.model_path = kwargs["model_path"] self.total_path = kwargs["total_path"] self.major_path = kwargs["major_path"] self.proto_path = kwargs["proto_path"] self.initial_report = kwargs["initial_report"] self.initial_deploy = kwargs["initial_deploy"] self.initial_option = kwargs["initial_option"] self.adb = kwargs["adb"] self.ffmpeg = kwargs["ffmpeg"] self.scrcpy = kwargs["scrcpy"] @staticmethod def only_video(folder: str): class Entry(object): def __init__(self, title: str, place: str, sheet: list): self.title = title self.place = place self.sheet = sheet return [ Entry( os.path.basename(root), root, [os.path.join(root, f) for f in sorted(file) if "log" not in f] ) for root, _, file in os.walk(folder) if file ] def video_task(self, input_video): reporter = Report(total_path=self.initial_report) reporter.title = f"Framix_{time.strftime('%Y%m%d_%H%M%S')}_{os.getpid()}" reporter.query = f"{random.randint(10, 99)}" new_video_path = os.path.join(reporter.video_path, os.path.basename(input_video)) shutil.copy(input_video, new_video_path)
deploy = Deploy(
2
2023-11-13 05:27:34+00:00
12k
OpenBMB/XAgent
XAgent/inner_loop_search_algorithms/ReACT.py
[ { "identifier": "CONFIG", "path": "XAgent/config.py", "snippet": "CONFIG = XAgentConfig.get_default_config()" }, { "identifier": "BaseAgent", "path": "XAgent/agent/base_agent.py", "snippet": "class BaseAgent(metaclass=abc.ABCMeta):\n \"\"\"\n The BaseAgent class abstracts the essential attributes and methods for classes,\n which inherit it. It is a metaclass of the Abstract Base Class (abc module).\n\n Attributes:\n abilities (set): A set of RequiredAbilities, which are necessary skills for BaseAgent.\n \"\"\"\n\n abilities = set([\n RequiredAbilities.plan_generation,\n RequiredAbilities.plan_refinement,\n RequiredAbilities.task_evaluator,\n RequiredAbilities.tool_tree_search,\n RequiredAbilities.reflection,\n RequiredAbilities.summarization,\n ])\n\n def __init__(self, config, prompt_messages: List[Message] = None):\n \"\"\"\n Constructs an agent object with set abilities, configuration settings,\n and initial set of prompt messages.\n\n Args:\n config (obj): Configuration settings for agent.\n prompt_messages (List): Initial set of messages user gives to interact with the agent.\n \"\"\"\n logger.typewriter_log(\n f\"Constructing an Agent:\",\n Fore.YELLOW,\n self.__class__.__name__,\n )\n self.config = config\n self.prompt_messages = prompt_messages\n self.usage = { }\n\n @abc.abstractmethod\n def parse(self,**args) -> (LLMStatusCode, Message, dict):\n \"\"\"\n Abstract method that needs to be implemented by the subclasses.\n Required for parsing the given arguments.\n \"\"\"\n pass \n\n def fill_in_placeholders(self, placeholders: dict):\n \"\"\"\n Fills in placeholders defined in the input with the corresponding values.\n \n Args:\n placeholders (dict): A dictionary containing keys as placeholders and values as their replacements.\n\n Returns:\n filled_messages: A copy of the initial prompt_messages with placeholders replaced with their corresponding values.\n \"\"\"\n filled_messages = deepcopy(self.prompt_messages)\n for message in filled_messages:\n role = message.role\n if role in placeholders:\n for key, value in placeholders[role].items():\n message.content = message.content.replace(\"{{\" + str(key) + \"}}\", str(value))\n return filled_messages\n\n def generate(self,\n messages:list[dict]|list[Message],\n arguments:dict=None,\n functions:list[dict]=None,\n function_call:dict=None,\n stop:dict=None,\n *args,**kwargs):\n \"\"\"\n Generates a response from the AI model, using the given messages, arguments, functions,\n and a function call.\n\n Args:\n messages (list[dict]|list[Message]): A list of messages with which to interact with the AI model.\n arguments (dict, optional): A dictionary containing arguments to use for AI model responses.\n functions (list[dict], optional): A list of dictionaries representing functions to use for AI model responses.\n function_call (dict, optional): A dictionary representing a function call to use for AI model responses.\n stop (dict, optional): A dictionary that signifies when to stop the conversation with the AI model.\n *args: Variable list of arguments. \n **kwargs: Arbitrary keyword arguments.\n\n Returns:\n message (dict): A message generated by the AI model.\n tokens (int): Number of tokens used in generating the AI model's response.\n \"\"\"\n if isinstance(messages[0],Message):\n messages = [message.raw() for message in messages]\n if functions is not None and len(functions) == 1 and function_call is None:\n function_call = {'name':functions[0]['name']} # must call at least one function\n match CONFIG.default_request_type:\n case 'openai':\n if arguments is not None:\n if functions is None or len(functions) == 0:\n functions = [{\n 'name':'reasoning',\n 'parameters':arguments\n }]\n function_call = {'name':'reasoning'}\n elif len(functions) == 1:\n for k,v in arguments['properties'].items():\n functions[0]['parameters']['properties'][k] = v\n if k in arguments['required']:\n functions[0]['parameters']['required'].append(k)\n else:\n raise NotImplementedError(\"Not implemented for multiple functions with arguments\")\n \n response = objgenerator.chatcompletion(\n messages=messages,\n functions=functions,\n function_call=function_call,\n stop=stop,\n *args,**kwargs)\n \n message = {}\n function_call_args:dict = json5.loads(response[\"choices\"][0][\"message\"][\"function_call\"]['arguments'])\n \n if arguments is not None:\n message['arguments'] = {\n k: function_call_args.pop(k)\n for k in arguments['properties'].keys() if k in function_call_args\n }\n if len(function_call_args) > 0:\n message['function_call'] = {\n 'name': response['choices'][0]['message']['function_call']['name'],\n 'arguments': function_call_args\n }\n\n case 'xagent':\n response = objgenerator.chatcompletion(\n messages=messages,\n arguments=arguments,\n functions=functions,\n function_call=function_call,\n stop=stop,\n *args,**kwargs)\n message = json5.loads(response[\"choices\"][0][\"message\"]['content'])\n case _:\n raise NotImplementedError(f\"Request type {CONFIG.default_request_type} not implemented\")\n \n tokens = response[\"usage\"]\n return message, tokens" }, { "identifier": "summarize_action", "path": "XAgent/agent/summarize.py", "snippet": "SINGLE_ACTION_MAX_LENGTH = CONFIG.summary['single_action_max_length']\nMAX_RETURN_LENGTH = CONFIG.summary['max_return_length']\nMAX_PLAN_LENGTH = CONFIG.max_plan_length\ndef summarize_action(action_process:list[dict], task:str,)->(list[str],str):\n def generate_func_args(args:dict,black_list=[])->str:\ndef summarize_plan(plans:dict)->str:\n def recursive_summary(plan:dict,):" }, { "identifier": "XAgentCoreComponents", "path": "XAgent/core.py", "snippet": "class XAgentCoreComponents(metaclass=abc.ABCMeta):\n \"\"\"\n XAgent 核心组件集 / XAgent Core Components\n Components:\n logger: 日志 / logger\n recorder: 运行记录 / running recorder\n toolserver_interface: 工具服务接口 / tool server interface\n function_handler: 功能处理器 / function handler\n working_memory_function: 工作记忆 / working memory\n agent_dispatcher: 代理调度器 / agent dispatcher\n vector_db_interface: 向量数据库接口 / vector db interface\n interaction: 交互 / interaction\n\n\n 组件集中的所有组件全局唯一 / all components in the component set are globally unique\n\n \"\"\"\n\n global_recorder = None\n\n def __init__(self) -> None:\n self.interaction = None\n self.logger = None\n self.recorder = None\n self.toolserver_interface = None\n self.function_handler = None\n self.tool_functions_description_list = []\n self.function_list = []\n self.working_memory_function = None\n self.agent_dispatcher = None\n self.vector_db_interface = None\n self.base_dir = \"\"\n self.extract_dir = \"\"\n self.available_agents = [\n PlanGenerateAgent,\n PlanRefineAgent,\n ToolAgent,\n ReflectAgent,\n ]\n\n def register_interaction(self,\n interaction: XAgentInteraction):\n \"\"\"\n register an interaction to the core components\n \"\"\"\n self.interaction = interaction\n\n def register_logger(self):\n \"\"\"\n register a logger to the core components\n \"\"\"\n self.base_dir = os.path.join(\n os.path.join(XAgentServerEnv.base_dir,\n \"localstorage\",\n \"interact_records\"),\n datetime.now().strftime(\"%Y-%m-%d\"),\n self.interaction.base.interaction_id)\n if not os.path.exists(self.base_dir):\n os.makedirs(self.base_dir, exist_ok=True)\n\n self.extract_dir = os.path.join(self.base_dir, \"workspace\")\n if not os.path.exists(self.extract_dir):\n os.makedirs(self.extract_dir, exist_ok=True)\n self.logger = self.interaction.logger\n\n def resister_recorder(self, param: XAgentParam):\n \"\"\"\n register a recorder to the core components\n \"\"\"\n self.recorder = RunningRecoder(\n record_id=self.interaction.base.interaction_id,\n newly_start=param.newly_created,\n root_dir=self.base_dir,\n logger=self.logger\n )\n if param.newly_created:\n self.recorder.regist_query(param.query)\n self.recorder.regist_config(param.config)\n else:\n self.recorder.load_from_db(self.interaction.base.recorder_root_dir)\n self.recorder.regist_query(param.query)\n self.recorder.regist_config(param.config)\n\n XAgentCoreComponents.global_recorder = self.recorder\n\n def register_toolserver_interface(self, param: XAgentParam):\n \"\"\"\n register a tool server interface to the core components\n \"\"\"\n self.logger.info(\"register tool server interface\")\n self.toolserver_interface = ToolServerInterface(\n self.recorder, logger=self.logger)\n self.logger.info(\"lazy init tool server interface\")\n self.toolserver_interface.lazy_init(config=param.config)\n # to download all files\n self.interaction.register_toolserver_interface(\n self.toolserver_interface)\n\n def register_function_handler(self, config):\n \"\"\"\n register a function handler to the core components\n \"\"\"\n self.logger.info(\"register function handler\")\n self.function_handler = FunctionHandler(\n toolserver_interface=self.toolserver_interface,\n config=config,\n interaction=self.interaction,\n recorder=self.recorder,\n logger=self.logger)\n\n def register_working_memory_function(self):\n \"\"\"\n register a working memory agent to the core components\n \"\"\"\n # working memory function is used for\n # communication between different agents that handle different subtasks\n self.logger.info(\"register working memory function\")\n self.working_memory_agent = WorkingMemoryAgent(logger=self.logger)\n self.working_memory_function = WorkingMemoryAgent.get_working_memory_function()\n\n def register_agent_dispatcher(self, param: XAgentParam):\n \"\"\"\n register a agent dispatcher to the core components\n \"\"\"\n self.logger.info(\"register agent dispatcher\")\n self.agent_dispatcher = XAgentDispatcher(param.config,\n enable=False,\n logger=self.logger)\n for agent in self.available_agents:\n self.agent_dispatcher.regist_agent(agent)\n\n def register_vector_db_interface(self):\n \"\"\"\n register a vector db interface to the core components\n \"\"\"\n # self.vector_db_interface = VectorDBInterface()\n pass\n\n def register_all(self, param: XAgentParam, interaction: XAgentInteraction):\n \"\"\"\n register all components to the core components\n \"\"\"\n self.register_interaction(interaction)\n self.register_logger()\n self.resister_recorder(param)\n self.register_toolserver_interface(param)\n self.register_function_handler(param.config)\n self.register_working_memory_function()\n self.register_agent_dispatcher(param=param)\n self.register_vector_db_interface()\n\n def build(self, param: XAgentParam, interaction: XAgentInteraction):\n \"\"\"\n start all components\n \"\"\"\n self.register_all(param, interaction)\n self.logger.info(\"build all components, done!\")\n\n subtask_functions, self.tool_functions_description_list = self.function_handler.get_functions(\n param.config)\n self.function_list = subtask_functions + self.working_memory_function\n\n def start(self):\n \"\"\"\n start all components\n \"\"\"\n self.logger.info(\"start all components\")\n\n def close(self):\n \"\"\"\n close all components\n \"\"\"\n self.toolserver_interface.download_all_files()\n self.toolserver_interface.close()\n\n def print_task_save_items(self,\n item: TaskSaveItem,\n ) -> None:\n\n self.logger.typewriter_log(\n f\"Task Name:\", Fore.YELLOW, f\"{item.name}\"\n )\n self.logger.typewriter_log(\n f\"Task Goal:\", Fore.YELLOW, f\"{item.goal}\"\n )\n self.logger.typewriter_log(\n f\"Task Prior-Criticism:\", Fore.YELLOW, f\"{item.prior_plan_criticism}\"\n )\n if len(item.posterior_plan_reflection) > 0:\n self.logger.typewriter_log(\n f\"Task Posterior-Criticism:\", Fore.YELLOW\n )\n for line in item.posterior_plan_reflection:\n line = line.lstrip(\"- \")\n self.logger.typewriter_log(\"- \", Fore.GREEN, line.strip())\n if len(item.milestones) > 0:\n self.logger.typewriter_log(\n f\"Task Milestones:\", Fore.YELLOW,\n )\n for line in item.milestones:\n line = line.lstrip(\"- \")\n self.logger.typewriter_log(\"- \", Fore.GREEN, line.strip())\n # if len(item.expected_tools) > 0:\n # logger.typewriter_log(\n # f\"Expected Tools:\", Fore.YELLOW,\n # )\n # for line in item.expected_tools:\n # line = f\"{line['tool_name']}: {line['reason']}\".lstrip(\"- \")\n # logger.typewriter_log(\"- \", Fore.GREEN, line.strip())\n if len(item.tool_reflection) > 0:\n self.logger.typewriter_log(\n f\"Posterior Tool Reflections:\", Fore.YELLOW,\n )\n for line in item.tool_reflection:\n line = f\"{line['target_tool_name']}: {line['reflection']}\".lstrip(\n \"- \")\n self.logger.typewriter_log(\"- \", Fore.GREEN, line.strip())\n\n self.logger.typewriter_log(\n f\"Task Status:\", Fore.YELLOW, f\"{item.status.name}\"\n )\n if item.action_list_summary != \"\":\n self.logger.typewriter_log(\n f\"Action Summary:\", Fore.YELLOW, f\"{item.action_list_summary}\"\n )\n\n def print_assistant_thoughts(\n self,\n # ai_name: object,\n assistant_reply_json_valid: object,\n speak_mode: bool = False,\n ) -> None:\n assistant_thoughts_reasoning = None\n assistant_thoughts_plan = None\n assistant_thoughts_speak = None\n assistant_thoughts_criticism = None\n\n assistant_thoughts = assistant_reply_json_valid.get(\"thoughts\", {})\n assistant_thoughts = assistant_thoughts.get(\"properties\", {})\n assistant_thoughts_text = assistant_thoughts.get(\"thought\")\n if assistant_thoughts:\n assistant_thoughts_reasoning = assistant_thoughts.get(\"reasoning\")\n assistant_thoughts_plan = assistant_thoughts.get(\"plan\")\n assistant_thoughts_criticism = assistant_thoughts.get(\"criticism\")\n if assistant_thoughts_text is not None and assistant_thoughts_text != \"\":\n self.logger.typewriter_log(\n f\"THOUGHTS:\", Fore.YELLOW, f\"{assistant_thoughts_text}\"\n )\n if assistant_thoughts_reasoning is not None and assistant_thoughts_reasoning != \"\":\n self.logger.typewriter_log(\n \"REASONING:\", Fore.YELLOW, f\"{assistant_thoughts_reasoning}\")\n\n if assistant_thoughts_plan is not None and len(assistant_thoughts_plan) > 0:\n self.logger.typewriter_log(\"PLAN:\", Fore.YELLOW, \"\")\n # If it's a list, join it into a string\n if isinstance(assistant_thoughts_plan, list):\n assistant_thoughts_plan = \"\\n\".join(assistant_thoughts_plan)\n elif isinstance(assistant_thoughts_plan, dict):\n assistant_thoughts_plan = str(assistant_thoughts_plan)\n\n # Split the input_string using the newline character and dashes\n lines = assistant_thoughts_plan.split(\"\\n\")\n for line in lines:\n line = line.lstrip(\"- \")\n self.logger.typewriter_log(\"- \", Fore.GREEN, line.strip())\n\n if assistant_thoughts_criticism is not None and assistant_thoughts_criticism != \"\":\n self.logger.typewriter_log(\n \"CRITICISM:\", Fore.YELLOW, f\"{assistant_thoughts_criticism}\")\n return {\n \"thoughts\": assistant_thoughts_text,\n \"reasoning\": assistant_thoughts_reasoning,\n \"plan\": assistant_thoughts_plan,\n \"criticism\": assistant_thoughts_criticism,\n \"node_id\": uuid.uuid4().hex\n }" }, { "identifier": "ToolNode", "path": "XAgent/data_structure/node.py", "snippet": "class ToolNode(Node):\n \"\"\"\n Class representing a tool node in the XAgent's data structure.\n \n A tool node has a “father” that represents its parent node, \"children\" that represents its child nodes, \n and “data” containing metadata about node's status, command, tool's output, and thoughts properties.\n It also carries a message history and a workspace hash id.\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialize a new tool node.\n\n Setup father, children, expand_num, data, history, workspace_hash_id attributes for the instance.\n \"\"\"\n\n self.father: ToolNode = None\n self.children: list[ToolNode] = []\n self.expand_num = 0\n self.data = {\n \"content\": \"\",\n \"thoughts\": {\n \"properties\": {\n \"thought\": \"\",\n \"reasoning\": \"\",\n \"plan\": \"\",\n \"criticism\": \"\",\n },\n },\n \"command\": {\n \"properties\": {\n \"name\": \"\",\n \"args\": \"\",\n },\n },\n \"tool_output\": \"\",\n \"tool_status_code\": ToolCallStatusCode.TOOL_CALL_SUCCESS,\n }\n self.history: MessageHistory = MessageHistory()\n self.workspace_hash_id = \"\"\n\n @property\n def process(self):\n \"\"\"\n Generate a list of data from current node up to root node.\n\n Returns:\n data (List): A list of data from current node up to root node.\n \"\"\"\n\n data = []\n now_node = self\n while now_node.father != None:\n data = [now_node.data] + data\n now_node = now_node.father\n return data\n\n def to_json(self):\n \"\"\"\n Convert the data attribute of the instance to a JSON-compatible format.\n\n Returns:\n data (Dict): The data attribute of the instance in a JSON-compatible format.\n \"\"\"\n\n data = deepcopy(self.data)\n data[\"tool_status_code\"] = data[\"tool_status_code\"].name\n return data\n\n def get_depth(self):\n \"\"\"\n Calculate the depth of current node in the tree.\n\n Returns:\n depth (int): The depth of the node. Return 0 if the node is a root node.\n \"\"\"\n \n if self.father == None:\n return 0\n return self.father.get_depth() + 1\n \n def get_subtree_size(self):\n \"\"\"\n Calculate the size of the subtree rooted at current node.\n\n Returns:\n size (int): The size of the subtree rooted at current node.\n \"\"\"\n \n if self.children == []:\n return 1\n now_size = 1\n for child in self.children:\n now_size += child.get_subtree_size()\n return now_size" }, { "identifier": "TaskSearchTree", "path": "XAgent/data_structure/tree.py", "snippet": "class TaskSearchTree:\n \"\"\"\n TaskSearchTree represents a tree data structure with specific task searching behavior. \n\n Attributes:\n root (ToolNode): Root node of the tree.\n now_expand_num (int): Maintains current expanding number for nodes during traversal.\n \"\"\"\n \n def __init__(self):\n \"\"\"Initializes TaskSearchTree with a root ToolNode and default expanding number.\"\"\"\n self.root: ToolNode = ToolNode()\n self.root.expand_num = 0\n self.now_expand_num = 1\n\n def get_depth(self):\n \"\"\"\n Gets the depth of the tree from the current root node.\n\n Returns:\n int: The depth of the tree\n \"\"\"\n return self.root.get_depth()\n \n def get_subtree_size(self):\n \"\"\"\n Gets the number of nodes (or size) of the subtree from the current root node.\n\n Returns:\n int: The number of nodes in the subtree\n \"\"\"\n return self.root.get_subtree_size()\n \n def make_father_relation(self, father, child):\n \"\"\"\n Establishes a parent-child relationship between two given nodes.\n\n Args:\n father (ToolNode): The parent node in the relation.\n child (ToolNode): The child node in the relation.\n\n Raises:\n TypeError: If the father or child is not a ToolNode instance.\n \"\"\"\n if not (isinstance(father, ToolNode) and isinstance(child, ToolNode)):\n raise TypeError(\"Father and child both need to be instances of ToolNode.\")\n\n child.expand_num = self.now_expand_num\n self.now_expand_num += 1\n\n child.father = father\n father.children.append(child)" }, { "identifier": "BaseSearchMethod", "path": "XAgent/inner_loop_search_algorithms/base_search.py", "snippet": "class BaseSearchMethod:\n \"\"\"The base class for all search methods. It defines the common elements and actions that all search \n methods have.\n \n Attributes:\n status (SearchMethodStatusCode): The status of the search method. It can be 'DOING', 'SUCCESS' or 'FAILED'.\n need_for_plan_refine (bool): A flag that indicates if the plan needs to be refined. It starts as False.\n \"\"\"\n def __init__(self):\n \"\"\"Initializes the search method instance and logs its creation.\"\"\"\n logger.typewriter_log(\n f\"Constructing a searching method:\",\n Fore.YELLOW,\n self.__class__.__name__,\n )\n self.status: SearchMethodStatusCode = SearchMethodStatusCode.DOING\n self.need_for_plan_refine: bool = False\n\n def run(self):\n \"\"\"A Placeholder function for running the search method. \n This should be implemented by all search method subclasses.\n \"\"\"\n pass\n\n def to_json(self):\n \"\"\"A Placeholder function for creating a json representation of the search method. \n This should be implemented by all search method subclasses.\n \"\"\"\n pass\n \n def get_finish_node(self):\n \"\"\"A Placeholder function for getting the final node of the search method run.\n This should be implemented by all search method subclasses.\n \"\"\"\n pass\n\n def status(self):\n \"\"\"Gets the current status of the search method.\n\n Returns:\n SearchMethodStatusCode: The current status of the search method.\n \"\"\"\n return self.status" }, { "identifier": "Message", "path": "XAgent/message_history.py", "snippet": "class Message:\n \"\"\"OpenAI Message class.\n\n A class representing a message from an agent, a user, or a system function.\n\n Attributes:\n role (MessageRole): Source of the message, can be either 'system', 'user', 'assistant', or 'function'.\n content (str): The actual content of the message.\n type (MessageType): The type of message, either 'ai_response' for AI dialogue messages or 'action_result' for results of API calls.\n function_call (dict): A dictionary representing the method invocation in programmable API calls.\n \"\"\"\n\n role: MessageRole\n content: str\n type: MessageType | None = None\n function_call: dict | None = None\n\n def raw(self) -> MessageDict:\n \"\"\"Extracts raw content of the message, stripping away other metadata.\n\n Returns:\n MessageDict: Dictionary containing 'role' and 'content'.\n \"\"\"\n data = {\"role\": self.role, \"content\": self.content}\n if self.function_call != None:\n data[\"function_call\"] = self.function_call\n return data\n \n def to_json(self):\n \"\"\"Convert the message into JSON format.\n\n Returns:\n MessageDict: JSON representation of the message.\n \"\"\"\n return self.raw()\n\n @classmethod\n def equal(cls, a: Message, b: Message):\n \"\"\"Checks if two messages are equal by comparing all their attributes.\n\n Args:\n a (Message): first message to be compared.\n b (Message): second message to be compared.\n\n Returns:\n bool: Returns True if both messages are equal in all their attributes; False otherwise.\n \"\"\"\n if a.role != b.role:\n return False\n if a.content != b.content:\n return False\n if a.type != b.type:\n return False\n if a.function_call != b.function_call:\n return False\n return True" }, { "identifier": "SearchMethodStatusCode", "path": "XAgent/utils.py", "snippet": "class SearchMethodStatusCode(Enum):\n \"\"\"\n Enumeration descsribing different status codes for search methods.\n \"\"\"\n DOING = 0\n SUCCESS = 1\n FAIL = 2\n HAVE_AT_LEAST_ONE_ANSWER = 3 " }, { "identifier": "ToolCallStatusCode", "path": "XAgent/utils.py", "snippet": "class ToolCallStatusCode(Enum):\n \"\"\"\n Enumeration descsribing different status codes for tool calls.\n \n The status codes are:\n - TOOL_CALL_FAILED\n - TOOL_CALL_SUCCESS\n - FORMAT_ERROR\n - HALLUCINATE_NAME\n - OTHER_ERROR\n - TIMEOUT_ERROR\n - TIME_LIMIT_EXCEEDED\n - SERVER_ERROR\n - SUBMIT_AS_SUCCESS\n - SUBMIT_AS_FAILED\n \"\"\"\n TOOL_CALL_FAILED = -1\n TOOL_CALL_SUCCESS = 0\n FORMAT_ERROR = 1\n HALLUCINATE_NAME = 2 \n OTHER_ERROR = 3 \n TIMEOUT_ERROR = 4\n TIME_LIMIT_EXCEEDED = 5\n SERVER_ERROR = 6\n \n SUBMIT_AS_SUCCESS = 7\n SUBMIT_AS_FAILED = 8\n def __str__(self):\n return self.__class__.__name__ + \": \" + self.name" } ]
import json from colorama import Fore from XAgent.config import CONFIG from XAgent.agent.base_agent import BaseAgent from XAgent.agent.summarize import summarize_action, summarize_plan, clip_text from XAgent.core import XAgentCoreComponents from XAgent.data_structure.node import ToolNode from XAgent.data_structure.tree import TaskSearchTree from XAgent.inner_loop_search_algorithms.base_search import BaseSearchMethod from XAgent.message_history import Message from XAgent.utils import SearchMethodStatusCode, ToolCallStatusCode
8,552
if "reasoning" in args.keys() and "reasoning" in assistant_thoughts.keys(): old["thoughts"]["properties"]["reasoning"] = args.get( "reasoning", assistant_thoughts_reasoning) if "plan" in args.keys() and "plan" in assistant_thoughts.keys(): old["thoughts"]["properties"]["plan"] = args.get( "plan", assistant_thoughts_plan) if "criticism" in args.keys() and "criticism" in assistant_thoughts.keys(): old["thoughts"]["properties"]["criticism"] = args.get( "criticism", assistant_thoughts_criticism) return old, True def generate_chain(self, config, agent: BaseAgent, arguments, functions, task_id, now_dealing_task, plan_agent): """ Run the chain search task. Args: config: Configuration for the search. agent: Base agent responsible for chain search. arguments: Arguments for the current task to be handled. functions: The available functions for use by agent. task_id: ID of the current task. Returns: None. Raises: None. """ self.tree_list.append(TaskSearchTree()) now_attempt_tree = self.tree_list[-1] now_node = now_attempt_tree.root while now_node.get_depth() < config.max_subtask_chain_length: self.xagent_core_components.logger.typewriter_log( "-=-=-=-=-=-=-= THOUGHTS, REASONING, PLAN AND CRITICISM WILL NOW BE VERIFIED BY AGENT -=-=-=-=-=-=-=", Fore.GREEN, "", ) if now_node.father != None: if self.xagent_core_components.interaction.interrupt: can_modify = self.get_origin_data(now_node.data) receive_data = self.xagent_core_components.interaction.receive( can_modify) data, rewrite_flag = self.rewrite_input_func( now_node.data, receive_data) now_node.data = data if rewrite_flag: self.xagent_core_components.logger.typewriter_log( "-=-=-=-=-=-=-= USER INPUT -=-=-=-=-=-=-=", Fore.GREEN, "", ) self.xagent_core_components.print_assistant_thoughts(now_node.data, False) self.xagent_core_components.logger.typewriter_log( "-=-=-=-=-=-=-= USER INPUT -=-=-=-=-=-=-=", Fore.GREEN, "", ) message_sequence = make_message(now_node=now_node, max_length=config.max_subtask_chain_length, config=config, now_dealing_task=now_dealing_task) function_call = None if now_node.get_depth() == config.max_subtask_chain_length - 1: function_call = {"name": "subtask_submit"} file_archi, _, = self.xagent_core_components.toolserver_interface.execute_command_client( "FileSystemEnv_print_filesys_struture", {"return_root": True}) file_archi, length = clip_text(file_archi, 1000, clip_end=True) human_prompt = "" if config.enable_ask_human_for_help: human_prompt = "- Use 'ask_human_for_help' when you need help, remember to be specific to your requirement to help user to understand your problem." else: human_prompt = "- Human is not available for help. You are not allowed to ask human for help in any form or channel. Solve the problem by yourself. If information is not enough, try your best to use default value." all_plan = plan_agent.latest_plan.to_json() if config.enable_summary: all_plan = summarize_plan(all_plan) else: all_plan = json.dumps(all_plan, indent=2, ensure_ascii=False) new_message, tokens = agent.parse( placeholders={ "system": { "all_plan": all_plan }, "user": { "workspace_files": file_archi, "subtask_id": now_dealing_task.get_subtask_id(to_str=True), "max_length": config.max_subtask_chain_length, "step_num": str(now_node.get_depth()+1), "human_help_prompt": human_prompt, } }, arguments=arguments, functions=functions, function_call=function_call, additional_messages=message_sequence, additional_insert_index=-1 ) new_tree_node = agent.message_to_tool_node(new_message) print_data = self.xagent_core_components.print_assistant_thoughts( new_tree_node.data, False ) tool_output, tool_output_status_code, need_for_plan_refine, using_tools = self.xagent_core_components.function_handler.handle_tool_call( new_tree_node) self.need_for_plan_refine = need_for_plan_refine now_attempt_tree.make_father_relation(now_node, new_tree_node) self.xagent_core_components.interaction.insert_data( data={**print_data, "using_tools": using_tools}, status="inner", current=task_id, is_include_pictures=self.is_include_pictures(using_tools)) now_node = new_tree_node
NOW_SUBTASK_PROMPT = ''' ''' def make_message(now_node: ToolNode, max_length, config, now_dealing_task): """ Function to generate messages for each node. Args: now_node: The current ToolNode instance. task_handler: Handler of the tasks. max_length: Maximum length of the subtask chain. config: The configuration settings. Returns: The sequence of messages for the current node. """ if CONFIG.enable_summary: terminal_task_info = summarize_plan( now_dealing_task.to_json()) else: terminal_task_info = json.dumps( now_dealing_task.to_json(), indent=2, ensure_ascii=False) message_sequence = [] now_subtask_prompt = f'''Now you will perform the following subtask:\n"""\n{terminal_task_info}\n"""\n''' message_sequence.append(Message("user", now_subtask_prompt)) action_process = now_node.process if config.enable_summary: action_process = summarize_action( action_process, terminal_task_info) user_prompt = f"""The following steps have been performed (you have already done the following and the current file contents are shown below):\n {action_process} """ message_sequence.append(Message("user", user_prompt)) return message_sequence class ReACTChainSearch(BaseSearchMethod): """ Class for ReACT chain search. It performs chain based searches for tasks. """ def __init__(self, xagent_core_components: XAgentCoreComponents): """ xagent_core_components: XAgentCoreComponents object, used to initialize ReACTChainSearch object Initializes ReACTChainSearch object. It maintains a list of trees to represent the processed tasks. """ super().__init__() self.tree_list = [] self.finish_node = None self.xagent_core_components = xagent_core_components def run(self, config, agent: BaseAgent, arguments, functions, task_id, now_dealing_task, plan_agent, max_try=1, max_answer=1): """ Runs the chain search task. Args: config: Configuration for the search. agent: Base agent responsible for chain search. arguments: Arguments for the current task to be handled. functions: The available functions for use by agent. task_id: ID of the current task. max_try: Maximum number of attempts. max_answer: Maximum number of answers to be received Returns: None Raises: None """ for _attempt_id in range(max_try): self.generate_chain(config, agent, arguments, functions, task_id, now_dealing_task, plan_agent) if self.status == SearchMethodStatusCode.HAVE_AT_LEAST_ONE_ANSWER: self.status = SearchMethodStatusCode.SUCCESS else: self.status = SearchMethodStatusCode.FAIL def get_finish_node(self): """ Function to retrieve the finished node in the task tree. Returns: The finished node. """ return self.finish_node def get_origin_data(self, data): """ Retrieves the initially entered data. Args: data: The initially entered data list. Returns: The initially entered data as a dictionary.: """ assistant_thoughts_reasoning = None assistant_thoughts_plan = None assistant_thoughts_speak = None assistant_thoughts_criticism = None assistant_thoughts = data.get("thoughts", {}) assistant_thoughts = assistant_thoughts.get("properties", {}) assistant_thoughts_text = assistant_thoughts.get("thought") if assistant_thoughts: assistant_thoughts_reasoning = assistant_thoughts.get("reasoning") assistant_thoughts_plan = assistant_thoughts.get("plan") assistant_thoughts_criticism = assistant_thoughts.get("criticism") return {"args": { "thoughts": assistant_thoughts_text, "reasoning": assistant_thoughts_reasoning, "plan": assistant_thoughts_plan, "criticism": assistant_thoughts_criticism }} def rewrite_input_func(self, old, new): """ Checks whether the new inputs are valid and if so updates the old input with the new one. Args: old: The old input entry. new: The new input entry to replace the old one. Returns: The updated input list and the rewrite status. """ if not isinstance(new, dict): pass if new is None: return old, False else: args = new.get("args", {}) assistant_thoughts_reasoning = None assistant_thoughts_plan = None assistant_thoughts_speak = None assistant_thoughts_criticism = None assistant_thoughts = old.get("thoughts", {}) assistant_thoughts = assistant_thoughts.get("properties", {}) assistant_thoughts_text = assistant_thoughts.get("thought") if assistant_thoughts: assistant_thoughts_reasoning = assistant_thoughts.get( "reasoning") assistant_thoughts_plan = assistant_thoughts.get("plan") assistant_thoughts_criticism = assistant_thoughts.get( "criticism") if "thoughts" in args.keys() and "thought" in assistant_thoughts.keys(): old["thoughts"]["properties"]["thought"] = args.get( "thoughts", assistant_thoughts_text) if "reasoning" in args.keys() and "reasoning" in assistant_thoughts.keys(): old["thoughts"]["properties"]["reasoning"] = args.get( "reasoning", assistant_thoughts_reasoning) if "plan" in args.keys() and "plan" in assistant_thoughts.keys(): old["thoughts"]["properties"]["plan"] = args.get( "plan", assistant_thoughts_plan) if "criticism" in args.keys() and "criticism" in assistant_thoughts.keys(): old["thoughts"]["properties"]["criticism"] = args.get( "criticism", assistant_thoughts_criticism) return old, True def generate_chain(self, config, agent: BaseAgent, arguments, functions, task_id, now_dealing_task, plan_agent): """ Run the chain search task. Args: config: Configuration for the search. agent: Base agent responsible for chain search. arguments: Arguments for the current task to be handled. functions: The available functions for use by agent. task_id: ID of the current task. Returns: None. Raises: None. """ self.tree_list.append(TaskSearchTree()) now_attempt_tree = self.tree_list[-1] now_node = now_attempt_tree.root while now_node.get_depth() < config.max_subtask_chain_length: self.xagent_core_components.logger.typewriter_log( "-=-=-=-=-=-=-= THOUGHTS, REASONING, PLAN AND CRITICISM WILL NOW BE VERIFIED BY AGENT -=-=-=-=-=-=-=", Fore.GREEN, "", ) if now_node.father != None: if self.xagent_core_components.interaction.interrupt: can_modify = self.get_origin_data(now_node.data) receive_data = self.xagent_core_components.interaction.receive( can_modify) data, rewrite_flag = self.rewrite_input_func( now_node.data, receive_data) now_node.data = data if rewrite_flag: self.xagent_core_components.logger.typewriter_log( "-=-=-=-=-=-=-= USER INPUT -=-=-=-=-=-=-=", Fore.GREEN, "", ) self.xagent_core_components.print_assistant_thoughts(now_node.data, False) self.xagent_core_components.logger.typewriter_log( "-=-=-=-=-=-=-= USER INPUT -=-=-=-=-=-=-=", Fore.GREEN, "", ) message_sequence = make_message(now_node=now_node, max_length=config.max_subtask_chain_length, config=config, now_dealing_task=now_dealing_task) function_call = None if now_node.get_depth() == config.max_subtask_chain_length - 1: function_call = {"name": "subtask_submit"} file_archi, _, = self.xagent_core_components.toolserver_interface.execute_command_client( "FileSystemEnv_print_filesys_struture", {"return_root": True}) file_archi, length = clip_text(file_archi, 1000, clip_end=True) human_prompt = "" if config.enable_ask_human_for_help: human_prompt = "- Use 'ask_human_for_help' when you need help, remember to be specific to your requirement to help user to understand your problem." else: human_prompt = "- Human is not available for help. You are not allowed to ask human for help in any form or channel. Solve the problem by yourself. If information is not enough, try your best to use default value." all_plan = plan_agent.latest_plan.to_json() if config.enable_summary: all_plan = summarize_plan(all_plan) else: all_plan = json.dumps(all_plan, indent=2, ensure_ascii=False) new_message, tokens = agent.parse( placeholders={ "system": { "all_plan": all_plan }, "user": { "workspace_files": file_archi, "subtask_id": now_dealing_task.get_subtask_id(to_str=True), "max_length": config.max_subtask_chain_length, "step_num": str(now_node.get_depth()+1), "human_help_prompt": human_prompt, } }, arguments=arguments, functions=functions, function_call=function_call, additional_messages=message_sequence, additional_insert_index=-1 ) new_tree_node = agent.message_to_tool_node(new_message) print_data = self.xagent_core_components.print_assistant_thoughts( new_tree_node.data, False ) tool_output, tool_output_status_code, need_for_plan_refine, using_tools = self.xagent_core_components.function_handler.handle_tool_call( new_tree_node) self.need_for_plan_refine = need_for_plan_refine now_attempt_tree.make_father_relation(now_node, new_tree_node) self.xagent_core_components.interaction.insert_data( data={**print_data, "using_tools": using_tools}, status="inner", current=task_id, is_include_pictures=self.is_include_pictures(using_tools)) now_node = new_tree_node
if tool_output_status_code == ToolCallStatusCode.SUBMIT_AS_SUCCESS:
9
2023-10-16 03:44:57+00:00
12k
PKU-YuanGroup/Video-LLaVA
llava/model/language_model/mpt/modeling_mpt.py
[ { "identifier": "attn_bias_shape", "path": "llava/model/language_model/mpt/attention.py", "snippet": "def attn_bias_shape(attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n if (prefix_lm or not causal) or use_sequence_id:\n return (1, n_heads, seq_len, seq_len)\n return (1, n_heads, 1, seq_len)\n elif prefix_lm or use_sequence_id:\n return (1, 1, seq_len, seq_len)\n return None\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')" }, { "identifier": "build_attn_bias", "path": "llava/model/language_model/mpt/attention.py", "snippet": "def build_attn_bias(attn_impl, attn_bias, n_heads, seq_len, causal=False, alibi=False, alibi_bias_max=8):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n (device, dtype) = (attn_bias.device, attn_bias.dtype)\n attn_bias = attn_bias.add(build_alibi_bias(n_heads, seq_len, full=not causal, alibi_bias_max=alibi_bias_max, device=device, dtype=dtype))\n return attn_bias\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')" }, { "identifier": "MPTBlock", "path": "llava/model/language_model/mpt/blocks.py", "snippet": "class MPTBlock(nn.Module):\n\n def __init__(self, d_model: int, n_heads: int, expansion_ratio: int, attn_config: Dict={'attn_type': 'multihead_attention', 'attn_pdrop': 0.0, 'attn_impl': 'triton', 'qk_ln': False, 'clip_qkv': None, 'softmax_scale': None, 'prefix_lm': False, 'attn_uses_sequence_id': False, 'alibi': False, 'alibi_bias_max': 8}, resid_pdrop: float=0.0, norm_type: str='low_precision_layernorm', verbose: int=0, device: Optional[str]=None, **kwargs):\n del kwargs\n super().__init__()\n norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]\n attn_class = ATTN_CLASS_REGISTRY[attn_config['attn_type']]\n self.norm_1 = norm_class(d_model, device=device)\n self.attn = attn_class(attn_impl=attn_config['attn_impl'], clip_qkv=attn_config['clip_qkv'], qk_ln=attn_config['qk_ln'], softmax_scale=attn_config['softmax_scale'], attn_pdrop=attn_config['attn_pdrop'], d_model=d_model, n_heads=n_heads, verbose=verbose, device=device)\n self.norm_2 = norm_class(d_model, device=device)\n self.ffn = MPTMLP(d_model=d_model, expansion_ratio=expansion_ratio, device=device)\n self.resid_attn_dropout = nn.Dropout(resid_pdrop)\n self.resid_ffn_dropout = nn.Dropout(resid_pdrop)\n\n def forward(self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, attention_mask: Optional[torch.ByteTensor]=None, is_causal: bool=True) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]:\n a = self.norm_1(x)\n (b, attn_weights, past_key_value) = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=is_causal)\n x = x + self.resid_attn_dropout(b)\n m = self.norm_2(x)\n n = self.ffn(m)\n x = x + self.resid_ffn_dropout(n)\n return (x, attn_weights, past_key_value)" }, { "identifier": "SharedEmbedding", "path": "llava/model/language_model/mpt/custom_embedding.py", "snippet": "class SharedEmbedding(nn.Embedding):\n\n def forward(self, input: Tensor, unembed: bool=False) -> Tensor:\n if unembed:\n return F.linear(input, self.weight)\n return super().forward(input)" }, { "identifier": "NORM_CLASS_REGISTRY", "path": "llava/model/language_model/mpt/norm.py", "snippet": "NORM_CLASS_REGISTRY = {'layernorm': torch.nn.LayerNorm, 'low_precision_layernorm': LPLayerNorm, 'rmsnorm': RMSNorm, 'low_precision_rmsnorm': LPRMSNorm}" }, { "identifier": "MPTConfig", "path": "llava/model/language_model/mpt/configuration_mpt.py", "snippet": "class MPTConfig(PretrainedConfig):\n model_type = 'mpt'\n\n def __init__(self, d_model: int=2048, n_heads: int=16, n_layers: int=24, expansion_ratio: int=4, max_seq_len: int=2048, vocab_size: int=50368, resid_pdrop: float=0.0, emb_pdrop: float=0.0, learned_pos_emb: bool=True, attn_config: Dict=attn_config_defaults, init_device: str='cpu', logit_scale: Optional[Union[float, str]]=None, no_bias: bool=False, verbose: int=0, embedding_fraction: float=1.0, norm_type: str='low_precision_layernorm', use_cache: bool=False, init_config: Dict=init_config_defaults, **kwargs):\n \"\"\"The MPT configuration class.\n\n Args:\n d_model (int): The size of the embedding dimension of the model.\n n_heads (int): The number of attention heads.\n n_layers (int): The number of layers in the model.\n expansion_ratio (int): The ratio of the up/down scale in the MLP.\n max_seq_len (int): The maximum sequence length of the model.\n vocab_size (int): The size of the vocabulary.\n resid_pdrop (float): The dropout probability applied to the attention output before combining with residual.\n emb_pdrop (float): The dropout probability for the embedding layer.\n learned_pos_emb (bool): Whether to use learned positional embeddings\n attn_config (Dict): A dictionary used to configure the model's attention module:\n attn_type (str): type of attention to use. Options: multihead_attention, multiquery_attention\n attn_pdrop (float): The dropout probability for the attention layers.\n attn_impl (str): The attention implementation to use. One of 'torch', 'flash', or 'triton'.\n qk_ln (bool): Whether to apply layer normalization to the queries and keys in the attention layer.\n clip_qkv (Optional[float]): If not None, clip the queries, keys, and values in the attention layer to\n this value.\n softmax_scale (Optional[float]): If not None, scale the softmax in the attention layer by this value. If None,\n use the default scale of ``1/sqrt(d_keys)``.\n prefix_lm (Optional[bool]): Whether the model should operate as a Prefix LM. This requires passing an\n extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix\n can attend to one another bi-directionally. Tokens outside the prefix use causal attention.\n attn_uses_sequence_id (Optional[bool]): Whether to restrict attention to tokens that have the same sequence_id.\n When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates\n which sub-sequence each token belongs to.\n Defaults to ``False`` meaning any provided `sequence_id` will be ignored.\n alibi (bool): Whether to use the alibi bias instead of position embeddings.\n alibi_bias_max (int): The maximum value of the alibi bias.\n init_device (str): The device to use for parameter initialization.\n logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value.\n no_bias (bool): Whether to use bias in all layers.\n verbose (int): The verbosity level. 0 is silent.\n embedding_fraction (float): The fraction to scale the gradients of the embedding layer by.\n norm_type (str): choose type of norm to use\n multiquery_attention (bool): Whether to use multiquery attention implementation.\n use_cache (bool): Whether or not the model should return the last key/values attentions\n init_config (Dict): A dictionary used to configure the model initialization:\n init_config.name: The parameter initialization scheme to use. Options: 'default_', 'baseline_',\n 'kaiming_uniform_', 'kaiming_normal_', 'neox_init_', 'small_init_', 'xavier_uniform_', or\n 'xavier_normal_'. These mimic the parameter initialization methods in PyTorch.\n init_div_is_residual (Union[int, float, str, bool]): Value to divide initial weights by if ``module._is_residual`` is True.\n emb_init_std (Optional[float]): The standard deviation of the normal distribution used to initialize the embedding layer.\n emb_init_uniform_lim (Optional[Union[Tuple[float, float], float]]): The lower and upper limits of the uniform distribution\n used to initialize the embedding layer. Mutually exclusive with ``emb_init_std``.\n init_std (float): The standard deviation of the normal distribution used to initialize the model,\n if using the baseline_ parameter initialization scheme.\n init_gain (float): The gain to use for parameter initialization with kaiming or xavier initialization schemes.\n fan_mode (str): The fan mode to use for parameter initialization with kaiming initialization schemes.\n init_nonlinearity (str): The nonlinearity to use for parameter initialization with kaiming initialization schemes.\n ---\n See llmfoundry.models.utils.param_init_fns.py for info on other param init config options\n \"\"\"\n self.d_model = d_model\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.expansion_ratio = expansion_ratio\n self.max_seq_len = max_seq_len\n self.vocab_size = vocab_size\n self.resid_pdrop = resid_pdrop\n self.emb_pdrop = emb_pdrop\n self.learned_pos_emb = learned_pos_emb\n self.attn_config = attn_config\n self.init_device = init_device\n self.logit_scale = logit_scale\n self.no_bias = no_bias\n self.verbose = verbose\n self.embedding_fraction = embedding_fraction\n self.norm_type = norm_type\n self.use_cache = use_cache\n self.init_config = init_config\n if 'name' in kwargs:\n del kwargs['name']\n if 'loss_fn' in kwargs:\n del kwargs['loss_fn']\n super().__init__(**kwargs)\n self._validate_config()\n\n def _set_config_defaults(self, config, config_defaults):\n for (k, v) in config_defaults.items():\n if k not in config:\n config[k] = v\n return config\n\n def _validate_config(self):\n self.attn_config = self._set_config_defaults(self.attn_config, attn_config_defaults)\n self.init_config = self._set_config_defaults(self.init_config, init_config_defaults)\n if self.d_model % self.n_heads != 0:\n raise ValueError('d_model must be divisible by n_heads')\n if any((prob < 0 or prob > 1 for prob in [self.attn_config['attn_pdrop'], self.resid_pdrop, self.emb_pdrop])):\n raise ValueError(\"self.attn_config['attn_pdrop'], resid_pdrop, emb_pdrop are probabilities and must be between 0 and 1\")\n if self.attn_config['attn_impl'] not in ['torch', 'flash', 'triton']:\n raise ValueError(f\"Unknown attn_impl={self.attn_config['attn_impl']}\")\n if self.attn_config['prefix_lm'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('prefix_lm only implemented with torch and triton attention.')\n if self.attn_config['alibi'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('alibi only implemented with torch and triton attention.')\n if self.attn_config['attn_uses_sequence_id'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('attn_uses_sequence_id only implemented with torch and triton attention.')\n if self.embedding_fraction > 1 or self.embedding_fraction <= 0:\n raise ValueError('model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!')\n if isinstance(self.logit_scale, str) and self.logit_scale != 'inv_sqrt_d_model':\n raise ValueError(f\"self.logit_scale={self.logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.\")\n if self.init_config.get('name', None) is None:\n raise ValueError(f\"self.init_config={self.init_config!r} 'name' needs to be set.\")\n if not self.learned_pos_emb and (not self.attn_config['alibi']):\n raise ValueError(f'Positional information must be provided to the model using either learned_pos_emb or alibi.')" }, { "identifier": "AutoTokenizerForMOD", "path": "llava/model/language_model/mpt/adapt_tokenizer.py", "snippet": "class AutoTokenizerForMOD(AutoTokenizer):\n \"\"\"AutoTokenizer + Adaptation for MOD.\n\n A simple wrapper around AutoTokenizer to make instantiating\n an MOD-adapted tokenizer a bit easier.\n\n MOD-adapted tokenizers have sentinel tokens (e.g., <extra_id_0>),\n a padding token, and a property to get the token ids of the\n sentinel tokens.\n \"\"\"\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n \"\"\"See `AutoTokenizer.from_pretrained` docstring.\"\"\"\n tokenizer = super().from_pretrained(*args, **kwargs)\n adapt_tokenizer_for_denoising(tokenizer)\n return tokenizer" }, { "identifier": "adapt_tokenizer_for_denoising", "path": "llava/model/language_model/mpt/adapt_tokenizer.py", "snippet": "def adapt_tokenizer_for_denoising(tokenizer: Tokenizer):\n \"\"\"Adds sentinel tokens and padding token (if missing).\n\n Expands the tokenizer vocabulary to include sentinel tokens\n used in mixture-of-denoiser tasks as well as a padding token.\n\n All added tokens are added as special tokens. No tokens are\n added if sentinel tokens and padding token already exist.\n \"\"\"\n sentinels_to_add = [f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)]\n tokenizer.add_tokens(sentinels_to_add, special_tokens=True)\n if tokenizer.pad_token is None:\n tokenizer.add_tokens('<pad>', special_tokens=True)\n tokenizer.pad_token = '<pad>'\n assert tokenizer.pad_token_id is not None\n sentinels = ''.join([f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)])\n _sentinel_token_ids = tokenizer(sentinels, add_special_tokens=False).input_ids\n tokenizer.sentinel_token_ids = _sentinel_token_ids" }, { "identifier": "add_bidirectional_mask_if_missing", "path": "llava/model/language_model/mpt/hf_prefixlm_converter.py", "snippet": "def add_bidirectional_mask_if_missing(batch: Dict[str, Any]):\n \"\"\"Attempts to add bidirectional_mask to batch if missing.\n\n Raises:\n KeyError if bidirectional_mask is missing and can't be inferred\n \"\"\"\n if 'bidirectional_mask' not in batch:\n if batch.get('mode', None) == 'icl_task':\n batch['bidirectional_mask'] = batch['attention_mask'].clone()\n for (i, continuation_indices) in enumerate(batch['continuation_indices']):\n batch['bidirectional_mask'][i, continuation_indices] = 0\n elif 'labels' in batch and 'attention_mask' in batch:\n batch['bidirectional_mask'] = torch.logical_and(torch.eq(batch['attention_mask'], 1), torch.eq(batch['labels'], -100)).type_as(batch['attention_mask'])\n else:\n raise KeyError('No bidirectional_mask in batch and not sure how to construct one.')" }, { "identifier": "convert_hf_causal_lm_to_prefix_lm", "path": "llava/model/language_model/mpt/hf_prefixlm_converter.py", "snippet": "def convert_hf_causal_lm_to_prefix_lm(model: CAUSAL_LM_TYPES) -> CAUSAL_LM_TYPES:\n \"\"\"Converts a HuggingFace Causal LM to a Prefix LM.\n\n Supported HuggingFace model classes:\n - `GPT2LMHeadModel`\n - `GPTNeoForCausalLM`\n - `GPTNeoXForCausalLM`\n - `GPTJForCausalLM`\n - `BloomForCausalLM`\n - `OPTForCausalLM`\n\n Conversion to a Prefix LM is done by modifying the `forward` method, and possibly also the\n `generate` method and/or select underlying methods depending on the model class.\n\n These changes preserve the model API, but add a new input to `forward`: \"bidirectional_mask\".\n\n Notes on training:\n To actually train the converted model as a Prefix LM, training batches will need to indicate\n the prefix/target structure by including `bidirectional_mask` as part of the batch inputs.\n\n **This is not a standard input and requires custom layers either within or after your dataloader.**\n\n In addition to adding `bidirectional_mask` to the batch, this custom code should modify `labels`\n such that `batch['labels'][batch['bidirectional_mask'] == 1] == -100`.\n That is, the prefix portion of the sequence should not generate any loss. Loss should only be\n generated by the target portion of the sequence.\n\n Notes on `GPTNeoForCausalLM`:\n To simplify the implementation, \"global\" and \"local\" attention layers are handled differently.\n For \"global\" layers, we handle conversion as described above. For \"local\" layers, which use a\n causal attention mask within a restricted local window, we do not alter the masking.\n\n Notes on `forward` method conversion:\n After conversion, the `forward` method will handle a new input, `bidirectional_mask`,\n which should be a [batch_size, seq_length] byte tensor, where 1 indicates token positions\n belonging to the prefix (prefix tokens can attend to one another bidirectionally), and\n 0 indicates token positions belonging to the target.\n\n The new `forward` method will incorporate `bidirectional_mask` (if supplied) into the existing\n causal mask, call the original `forward` method, and (if the causal mask is a buffer) reset\n the causal masks before returning the result.\n\n Notes on `generate` method conversion:\n After conversion, the `generate` method will have the same signature but will internally\n convert all causal masks to be purely bidirectional, call the original `generate` method, and\n (where appropriate) reset the causal masks before returning the result.\n\n This works thanks to the logic of the HuggingFace `generate` API, which first encodes the token\n \"prompt\" passed to `generate` (which is treated as the prefix) and then sequentially generates\n each new token. Encodings are cached as generation happens, so all prefix tokens can attend to one\n another (as expected in a Prefix LM) and generated tokens can only attend to prefix tokens and\n previously-generated tokens (also as expected in a Prefix LM).\n\n To preserve the API, the original methods are renamed to `_original_forward` and\n `_original_generate`, and replaced with new `forward` and `generate` methods that wrap\n them, respectively. Although implementation details vary by model class.\n \"\"\"\n if isinstance(model, _SUPPORTED_GPT_MODELS):\n return _convert_gpt_causal_lm_to_prefix_lm(model)\n elif isinstance(model, BloomForCausalLM):\n return _convert_bloom_causal_lm_to_prefix_lm(model)\n elif isinstance(model, OPTForCausalLM):\n return _convert_opt_causal_lm_to_prefix_lm(model)\n else:\n raise TypeError(f'Cannot convert model to Prefix LM. ' + f'Model does not belong to set of supported HF models:' + f'\\n{_SUPPORTED_HF_MODELS}')" }, { "identifier": "init_empty_weights", "path": "llava/model/language_model/mpt/meta_init_context.py", "snippet": "@contextmanager\ndef init_empty_weights(include_buffers: bool=False):\n \"\"\"Meta initialization context manager.\n\n A context manager under which models are initialized with all parameters\n on the meta device, therefore creating an empty model. Useful when just\n initializing the model would blow the available RAM.\n\n Args:\n include_buffers (`bool`, *optional*, defaults to `False`): Whether or\n not to also put all buffers on the meta device while initializing.\n\n Example:\n ```python\n import torch.nn as nn\n\n # Initialize a model with 100 billions parameters in no time and without using any RAM.\n with init_empty_weights():\n tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n ```\n\n <Tip warning={true}>\n\n Any model created under this context manager has no weights. As such you can't do something like\n `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].\n\n </Tip>\n \"\"\"\n with init_on_device(torch.device('meta'), include_buffers=include_buffers) as f:\n yield f" }, { "identifier": "MODEL_INIT_REGISTRY", "path": "llava/model/language_model/mpt/param_init_fns.py", "snippet": "MODEL_INIT_REGISTRY = {'default_': torch_default_param_init_fn_, 'baseline_': baseline_param_init_fn_, 'kaiming_uniform_': kaiming_uniform_param_init_fn_, 'kaiming_normal_': kaiming_normal_param_init_fn_, 'neox_init_': neox_param_init_fn_, 'small_init_': small_param_init_fn_, 'xavier_uniform_': xavier_uniform_param_init_fn_, 'xavier_normal_': xavier_normal_param_init_fn_}" }, { "identifier": "generic_param_init_fn_", "path": "llava/model/language_model/mpt/param_init_fns.py", "snippet": "def generic_param_init_fn_(module: nn.Module, init_fn_, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, verbose: int=0, **kwargs):\n del kwargs\n if verbose > 1:\n warnings.warn(f'If model has bias parameters they are initialized to 0.')\n init_div_is_residual = init_div_is_residual\n if init_div_is_residual is False:\n div_is_residual = 1.0\n elif init_div_is_residual is True:\n div_is_residual = math.sqrt(2 * n_layers)\n elif isinstance(init_div_is_residual, float) or isinstance(init_div_is_residual, int):\n div_is_residual = init_div_is_residual\n elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric():\n div_is_residual = float(init_div_is_residual)\n else:\n div_is_residual = 1.0\n raise ValueError(f'Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}')\n if init_div_is_residual is not False:\n if verbose > 1:\n warnings.warn(f'Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. ' + f'Set `init_div_is_residual: false` in init config to disable this.')\n if isinstance(module, nn.Linear):\n if hasattr(module, '_fused'):\n fused_init_helper_(module, init_fn_)\n else:\n init_fn_(module.weight)\n if module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n if init_div_is_residual is not False and getattr(module, '_is_residual', False):\n with torch.no_grad():\n module.weight.div_(div_is_residual)\n elif isinstance(module, nn.Embedding):\n if emb_init_std is not None:\n std = emb_init_std\n if std == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using normal distribution with mean=0 and std={std!r}.')\n elif emb_init_uniform_lim is not None:\n lim = emb_init_uniform_lim\n if isinstance(lim, Sequence):\n if len(lim) > 2:\n raise ValueError(f'Uniform init requires a min and a max limit. User input: {lim}.')\n if lim[0] == lim[1]:\n warnings.warn(f'Embedding layer initialized to {lim[0]}.')\n else:\n if lim == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n lim = [-lim, lim]\n (a, b) = lim\n emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using uniform distribution in range {lim}.')\n else:\n emb_init_fn_ = init_fn_\n emb_init_fn_(module.weight)\n elif isinstance(module, tuple(set(NORM_CLASS_REGISTRY.values()))):\n if verbose > 1:\n warnings.warn(f'Norm weights are set to 1. If norm layer has a bias it is initialized to 0.')\n if hasattr(module, 'weight') and module.weight is not None:\n torch.nn.init.ones_(module.weight)\n if hasattr(module, 'bias') and module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n elif isinstance(module, nn.MultiheadAttention):\n if module._qkv_same_embed_dim:\n assert module.in_proj_weight is not None\n assert module.q_proj_weight is None and module.k_proj_weight is None and (module.v_proj_weight is None)\n assert d_model is not None\n _d = d_model\n splits = (0, _d, 2 * _d, 3 * _d)\n for (s, e) in zip(splits[:-1], splits[1:]):\n init_fn_(module.in_proj_weight[s:e])\n else:\n assert module.q_proj_weight is not None and module.k_proj_weight is not None and (module.v_proj_weight is not None)\n assert module.in_proj_weight is None\n init_fn_(module.q_proj_weight)\n init_fn_(module.k_proj_weight)\n init_fn_(module.v_proj_weight)\n if module.in_proj_bias is not None:\n torch.nn.init.zeros_(module.in_proj_bias)\n if module.bias_k is not None:\n torch.nn.init.zeros_(module.bias_k)\n if module.bias_v is not None:\n torch.nn.init.zeros_(module.bias_v)\n init_fn_(module.out_proj.weight)\n if init_div_is_residual is not False and getattr(module.out_proj, '_is_residual', False):\n with torch.no_grad():\n module.out_proj.weight.div_(div_is_residual)\n if module.out_proj.bias is not None:\n torch.nn.init.zeros_(module.out_proj.bias)\n else:\n for _ in module.parameters(recurse=False):\n raise NotImplementedError(f'{module.__class__.__name__} parameters are not initialized by param_init_fn.')" } ]
import math import warnings import torch import torch.nn as nn import torch.nn.functional as F from typing import List, Optional, Tuple, Union from transformers import PreTrainedModel, PreTrainedTokenizer, PreTrainedTokenizerFast from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from .attention import attn_bias_shape, build_attn_bias from .blocks import MPTBlock from .custom_embedding import SharedEmbedding from .norm import NORM_CLASS_REGISTRY from .configuration_mpt import MPTConfig from .adapt_tokenizer import AutoTokenizerForMOD, adapt_tokenizer_for_denoising from .hf_prefixlm_converter import add_bidirectional_mask_if_missing, convert_hf_causal_lm_to_prefix_lm from .meta_init_context import init_empty_weights from .param_init_fns import MODEL_INIT_REGISTRY, generic_param_init_fn_ from .flash_attn_triton import flash_attn_func
9,446
assert isinstance(attn_bias, torch.Tensor) attn_bias = self._apply_sequence_id(attn_bias, sequence_id) if attention_mask is not None: s_k = attention_mask.shape[-1] if attn_bias is None: attn_bias = torch.zeros((1, 1, 1, s_k), device=device, dtype=dtype) else: _s_k = max(0, attn_bias.size(-1) - s_k) attn_bias = attn_bias[:, :, :, _s_k:] if prefix_mask is not None and attention_mask.shape != prefix_mask.shape: raise ValueError(f'attention_mask shape={attention_mask.shape} ' + f'and prefix_mask shape={prefix_mask.shape} are not equal.') min_val = torch.finfo(attn_bias.dtype).min attn_bias = attn_bias.masked_fill(~attention_mask.view(-1, 1, 1, s_k), min_val) return (attn_bias, None) def _apply_prefix_mask(self, attn_bias: torch.Tensor, prefix_mask: torch.Tensor): (s_k, s_q) = attn_bias.shape[-2:] if s_k != self.config.max_seq_len or s_q != self.config.max_seq_len: raise ValueError('attn_bias does not match the expected shape. ' + f'The last two dimensions should both be {self.config.max_length} ' + f'but are {s_k} and {s_q}.') seq_len = prefix_mask.shape[-1] if seq_len > self.config.max_seq_len: raise ValueError(f'prefix_mask sequence length cannot exceed max_seq_len={self.config.max_seq_len}') attn_bias = attn_bias[..., :seq_len, :seq_len] causal = torch.tril(torch.ones((seq_len, seq_len), dtype=torch.bool, device=prefix_mask.device)).view(1, 1, seq_len, seq_len) prefix = prefix_mask.view(-1, 1, 1, seq_len) cannot_attend = ~torch.logical_or(causal, prefix.bool()) min_val = torch.finfo(attn_bias.dtype).min attn_bias = attn_bias.masked_fill(cannot_attend, min_val) return attn_bias def _apply_sequence_id(self, attn_bias: torch.Tensor, sequence_id: torch.LongTensor): seq_len = sequence_id.shape[-1] if seq_len > self.config.max_seq_len: raise ValueError(f'sequence_id sequence length cannot exceed max_seq_len={self.config.max_seq_len}') attn_bias = attn_bias[..., :seq_len, :seq_len] cannot_attend = torch.logical_not(torch.eq(sequence_id.view(-1, seq_len, 1), sequence_id.view(-1, 1, seq_len))).unsqueeze(1) min_val = torch.finfo(attn_bias.dtype).min attn_bias = attn_bias.masked_fill(cannot_attend, min_val) return attn_bias def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, inputs_embeds: Optional[torch.Tensor]=None): return_dict = return_dict if return_dict is not None else self.config.return_dict use_cache = use_cache if use_cache is not None else self.config.use_cache if attention_mask is not None: attention_mask = attention_mask.bool() if prefix_mask is not None: prefix_mask = prefix_mask.bool() if not return_dict: raise NotImplementedError('return_dict False is not implemented yet for MPT') if output_attentions: if self.attn_impl != 'torch': raise NotImplementedError('output_attentions is not implemented for MPT when using attn_impl `flash` or `triton`.') if attention_mask is not None and attention_mask[:, 0].sum() != attention_mask.shape[0] and self.training: raise NotImplementedError('MPT does not support training with left padding.') if self.prefix_lm and prefix_mask is None: raise ValueError('prefix_mask is a required argument when MPT is configured with prefix_lm=True.') if self.training: if self.attn_uses_sequence_id and sequence_id is None: raise ValueError('sequence_id is a required argument when MPT is configured with attn_uses_sequence_id=True ' + 'and the model is in train mode.') elif self.attn_uses_sequence_id is False and sequence_id is not None: warnings.warn('MPT received non-None input for `sequence_id` but is configured with attn_uses_sequence_id=False. ' + 'This input will be ignored. If you want the model to use `sequence_id`, set attn_uses_sequence_id to True.') if input_ids is not None: S = input_ids.size(1) assert S <= self.config.max_seq_len, f'Cannot forward input with seq_len={S}, this model only supports seq_len<={self.config.max_seq_len}' tok_emb = self.wte(input_ids) else: assert inputs_embeds is not None assert self.alibi, 'inputs_embeds is not implemented for MPT unless for alibi.' S = inputs_embeds.size(1) tok_emb = inputs_embeds if self.alibi: x = tok_emb else: past_position = 0 if past_key_values is not None: if len(past_key_values) != self.config.n_layers: raise ValueError(f'past_key_values must provide a past_key_value for each attention ' + f'layer in the network (len(past_key_values)={len(past_key_values)!r}; self.config.n_layers={self.config.n_layers!r}).') past_position = past_key_values[0][0].size(1) if self.attn_impl == 'torch': past_position = past_key_values[0][0].size(3) if S + past_position > self.config.max_seq_len: raise ValueError(f'Cannot forward input with past sequence length {past_position} and current sequence length {S + 1}, this model only supports total sequence length <= {self.config.max_seq_len}.') pos = torch.arange(past_position, S + past_position, dtype=torch.long, device=input_ids.device).unsqueeze(0) if attention_mask is not None: pos = torch.clamp(pos - torch.cumsum((~attention_mask).to(torch.int32), dim=1)[:, past_position:], min=0) pos_emb = self.wpe(pos) x = tok_emb + pos_emb if self.embedding_fraction == 1: x = self.emb_drop(x) else: x_shrunk = x * self.embedding_fraction + x.detach() * (1 - self.embedding_fraction) assert isinstance(self.emb_drop, nn.Module) x = self.emb_drop(x_shrunk) (attn_bias, attention_mask) = self._attn_bias(device=x.device, dtype=torch.float32, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id) if use_cache and past_key_values is None: past_key_values = [() for _ in range(self.config.n_layers)] all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for (b_idx, block) in enumerate(self.blocks): if output_hidden_states: assert all_hidden_states is not None all_hidden_states = all_hidden_states + (x,) past_key_value = past_key_values[b_idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: (x, attn_weights, past_key_value) = torch.utils.checkpoint.checkpoint(block, x, past_key_value, attn_bias, attention_mask, self.is_causal) else: (x, attn_weights, past_key_value) = block(x, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=self.is_causal) if past_key_values is not None: past_key_values[b_idx] = past_key_value if output_attentions: assert all_self_attns is not None all_self_attns = all_self_attns + (attn_weights,) x = self.norm_f(x) if output_hidden_states: assert all_hidden_states is not None all_hidden_states = all_hidden_states + (x,) return BaseModelOutputWithPast(last_hidden_state=x, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns) def param_init_fn(self, module): init_fn_name = self.config.init_config['name']
"""A simple, flexible implementation of a GPT model. Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py """ try: except: pass Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast] class MPTPreTrainedModel(PreTrainedModel): config_class = MPTConfig base_model_prefix = 'model' _no_split_modules = ['MPTBlock'] class MPTModel(MPTPreTrainedModel): def __init__(self, config: MPTConfig): config._validate_config() super().__init__(config) self.attn_impl = config.attn_config['attn_impl'] self.prefix_lm = config.attn_config['prefix_lm'] self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id'] self.alibi = config.attn_config['alibi'] self.alibi_bias_max = config.attn_config['alibi_bias_max'] if config.init_device == 'mixed': if dist.get_local_rank() == 0: config.init_device = 'cpu' else: config.init_device = 'meta' if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys(): norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys()) raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).') norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()] self.embedding_fraction = config.embedding_fraction self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device) if not self.alibi: self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device) self.emb_drop = nn.Dropout(config.emb_pdrop) self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)]) self.norm_f = norm_class(config.d_model, device=config.init_device) if config.init_device != 'meta': print(f'You are using config.init_device={config.init_device!r}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.') self.apply(self.param_init_fn) self.is_causal = not self.prefix_lm self._attn_bias_initialized = False self.attn_bias = None self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id) if config.no_bias: for module in self.modules(): if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter): if config.verbose: warnings.warn(f'Removing bias ({module.bias}) from {module}.') module.register_parameter('bias', None) if config.verbose and config.verbose > 2: print(self) if 'verbose' not in self.config.init_config: self.config.init_config['verbose'] = self.config.verbose if self.config.init_config['verbose'] > 1: init_fn_name = self.config.init_config['name'] warnings.warn(f'Using {init_fn_name} initialization.') self.gradient_checkpointing = False def get_input_embeddings(self): return self.wte def set_input_embeddings(self, value): self.wte = value @torch.no_grad() def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None): if not self._attn_bias_initialized: if self.attn_bias_shape: self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype) self.attn_bias = build_attn_bias(self.attn_impl, self.attn_bias, self.config.n_heads, self.config.max_seq_len, causal=self.is_causal, alibi=self.alibi, alibi_bias_max=self.alibi_bias_max) self._attn_bias_initialized = True if self.attn_impl == 'flash': return (self.attn_bias, attention_mask) if self.attn_bias is not None: self.attn_bias = self.attn_bias.to(dtype=dtype, device=device) attn_bias = self.attn_bias if self.prefix_lm: assert isinstance(attn_bias, torch.Tensor) assert isinstance(prefix_mask, torch.Tensor) attn_bias = self._apply_prefix_mask(attn_bias, prefix_mask) if self.attn_uses_sequence_id and sequence_id is not None: assert isinstance(attn_bias, torch.Tensor) attn_bias = self._apply_sequence_id(attn_bias, sequence_id) if attention_mask is not None: s_k = attention_mask.shape[-1] if attn_bias is None: attn_bias = torch.zeros((1, 1, 1, s_k), device=device, dtype=dtype) else: _s_k = max(0, attn_bias.size(-1) - s_k) attn_bias = attn_bias[:, :, :, _s_k:] if prefix_mask is not None and attention_mask.shape != prefix_mask.shape: raise ValueError(f'attention_mask shape={attention_mask.shape} ' + f'and prefix_mask shape={prefix_mask.shape} are not equal.') min_val = torch.finfo(attn_bias.dtype).min attn_bias = attn_bias.masked_fill(~attention_mask.view(-1, 1, 1, s_k), min_val) return (attn_bias, None) def _apply_prefix_mask(self, attn_bias: torch.Tensor, prefix_mask: torch.Tensor): (s_k, s_q) = attn_bias.shape[-2:] if s_k != self.config.max_seq_len or s_q != self.config.max_seq_len: raise ValueError('attn_bias does not match the expected shape. ' + f'The last two dimensions should both be {self.config.max_length} ' + f'but are {s_k} and {s_q}.') seq_len = prefix_mask.shape[-1] if seq_len > self.config.max_seq_len: raise ValueError(f'prefix_mask sequence length cannot exceed max_seq_len={self.config.max_seq_len}') attn_bias = attn_bias[..., :seq_len, :seq_len] causal = torch.tril(torch.ones((seq_len, seq_len), dtype=torch.bool, device=prefix_mask.device)).view(1, 1, seq_len, seq_len) prefix = prefix_mask.view(-1, 1, 1, seq_len) cannot_attend = ~torch.logical_or(causal, prefix.bool()) min_val = torch.finfo(attn_bias.dtype).min attn_bias = attn_bias.masked_fill(cannot_attend, min_val) return attn_bias def _apply_sequence_id(self, attn_bias: torch.Tensor, sequence_id: torch.LongTensor): seq_len = sequence_id.shape[-1] if seq_len > self.config.max_seq_len: raise ValueError(f'sequence_id sequence length cannot exceed max_seq_len={self.config.max_seq_len}') attn_bias = attn_bias[..., :seq_len, :seq_len] cannot_attend = torch.logical_not(torch.eq(sequence_id.view(-1, seq_len, 1), sequence_id.view(-1, 1, seq_len))).unsqueeze(1) min_val = torch.finfo(attn_bias.dtype).min attn_bias = attn_bias.masked_fill(cannot_attend, min_val) return attn_bias def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, inputs_embeds: Optional[torch.Tensor]=None): return_dict = return_dict if return_dict is not None else self.config.return_dict use_cache = use_cache if use_cache is not None else self.config.use_cache if attention_mask is not None: attention_mask = attention_mask.bool() if prefix_mask is not None: prefix_mask = prefix_mask.bool() if not return_dict: raise NotImplementedError('return_dict False is not implemented yet for MPT') if output_attentions: if self.attn_impl != 'torch': raise NotImplementedError('output_attentions is not implemented for MPT when using attn_impl `flash` or `triton`.') if attention_mask is not None and attention_mask[:, 0].sum() != attention_mask.shape[0] and self.training: raise NotImplementedError('MPT does not support training with left padding.') if self.prefix_lm and prefix_mask is None: raise ValueError('prefix_mask is a required argument when MPT is configured with prefix_lm=True.') if self.training: if self.attn_uses_sequence_id and sequence_id is None: raise ValueError('sequence_id is a required argument when MPT is configured with attn_uses_sequence_id=True ' + 'and the model is in train mode.') elif self.attn_uses_sequence_id is False and sequence_id is not None: warnings.warn('MPT received non-None input for `sequence_id` but is configured with attn_uses_sequence_id=False. ' + 'This input will be ignored. If you want the model to use `sequence_id`, set attn_uses_sequence_id to True.') if input_ids is not None: S = input_ids.size(1) assert S <= self.config.max_seq_len, f'Cannot forward input with seq_len={S}, this model only supports seq_len<={self.config.max_seq_len}' tok_emb = self.wte(input_ids) else: assert inputs_embeds is not None assert self.alibi, 'inputs_embeds is not implemented for MPT unless for alibi.' S = inputs_embeds.size(1) tok_emb = inputs_embeds if self.alibi: x = tok_emb else: past_position = 0 if past_key_values is not None: if len(past_key_values) != self.config.n_layers: raise ValueError(f'past_key_values must provide a past_key_value for each attention ' + f'layer in the network (len(past_key_values)={len(past_key_values)!r}; self.config.n_layers={self.config.n_layers!r}).') past_position = past_key_values[0][0].size(1) if self.attn_impl == 'torch': past_position = past_key_values[0][0].size(3) if S + past_position > self.config.max_seq_len: raise ValueError(f'Cannot forward input with past sequence length {past_position} and current sequence length {S + 1}, this model only supports total sequence length <= {self.config.max_seq_len}.') pos = torch.arange(past_position, S + past_position, dtype=torch.long, device=input_ids.device).unsqueeze(0) if attention_mask is not None: pos = torch.clamp(pos - torch.cumsum((~attention_mask).to(torch.int32), dim=1)[:, past_position:], min=0) pos_emb = self.wpe(pos) x = tok_emb + pos_emb if self.embedding_fraction == 1: x = self.emb_drop(x) else: x_shrunk = x * self.embedding_fraction + x.detach() * (1 - self.embedding_fraction) assert isinstance(self.emb_drop, nn.Module) x = self.emb_drop(x_shrunk) (attn_bias, attention_mask) = self._attn_bias(device=x.device, dtype=torch.float32, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id) if use_cache and past_key_values is None: past_key_values = [() for _ in range(self.config.n_layers)] all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for (b_idx, block) in enumerate(self.blocks): if output_hidden_states: assert all_hidden_states is not None all_hidden_states = all_hidden_states + (x,) past_key_value = past_key_values[b_idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: (x, attn_weights, past_key_value) = torch.utils.checkpoint.checkpoint(block, x, past_key_value, attn_bias, attention_mask, self.is_causal) else: (x, attn_weights, past_key_value) = block(x, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=self.is_causal) if past_key_values is not None: past_key_values[b_idx] = past_key_value if output_attentions: assert all_self_attns is not None all_self_attns = all_self_attns + (attn_weights,) x = self.norm_f(x) if output_hidden_states: assert all_hidden_states is not None all_hidden_states = all_hidden_states + (x,) return BaseModelOutputWithPast(last_hidden_state=x, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns) def param_init_fn(self, module): init_fn_name = self.config.init_config['name']
MODEL_INIT_REGISTRY[init_fn_name](module=module, n_layers=self.config.n_layers, d_model=self.config.d_model, **self.config.init_config)
11
2023-10-23 05:43:54+00:00
12k
deepseek-ai/DreamCraft3D
threestudio/systems/base.py
[ { "identifier": "Exporter", "path": "threestudio/models/exporters/base.py", "snippet": "class Exporter(BaseObject):\n @dataclass\n class Config(BaseObject.Config):\n save_video: bool = False\n\n cfg: Config\n\n def configure(\n self,\n geometry: BaseImplicitGeometry,\n material: BaseMaterial,\n background: BaseBackground,\n ) -> None:\n @dataclass\n class SubModules:\n geometry: BaseImplicitGeometry\n material: BaseMaterial\n background: BaseBackground\n\n self.sub_modules = SubModules(geometry, material, background)\n\n @property\n def geometry(self) -> BaseImplicitGeometry:\n return self.sub_modules.geometry\n\n @property\n def material(self) -> BaseMaterial:\n return self.sub_modules.material\n\n @property\n def background(self) -> BaseBackground:\n return self.sub_modules.background\n\n def __call__(self, *args, **kwargs) -> List[ExporterOutput]:\n raise NotImplementedError" }, { "identifier": "ExporterOutput", "path": "threestudio/models/exporters/base.py", "snippet": "class ExporterOutput:\n save_name: str\n save_type: str\n params: Dict[str, Any]" }, { "identifier": "parse_optimizer", "path": "threestudio/systems/utils.py", "snippet": "def parse_optimizer(config, model):\n if hasattr(config, \"params\"):\n params = [\n {\"params\": get_parameters(model, name), \"name\": name, **args}\n for name, args in config.params.items()\n ]\n threestudio.debug(f\"Specify optimizer params: {config.params}\")\n else:\n params = model.parameters()\n if config.name in [\"FusedAdam\"]:\n import apex\n\n optim = getattr(apex.optimizers, config.name)(params, **config.args)\n elif config.name in [\"Adan\"]:\n from threestudio.systems import optimizers\n\n optim = getattr(optimizers, config.name)(params, **config.args)\n else:\n optim = getattr(torch.optim, config.name)(params, **config.args)\n return optim" }, { "identifier": "parse_scheduler", "path": "threestudio/systems/utils.py", "snippet": "def parse_scheduler(config, optimizer):\n interval = config.get(\"interval\", \"epoch\")\n assert interval in [\"epoch\", \"step\"]\n if config.name == \"SequentialLR\":\n scheduler = {\n \"scheduler\": lr_scheduler.SequentialLR(\n optimizer,\n [\n parse_scheduler(conf, optimizer)[\"scheduler\"]\n for conf in config.schedulers\n ],\n milestones=config.milestones,\n ),\n \"interval\": interval,\n }\n elif config.name == \"ChainedScheduler\":\n scheduler = {\n \"scheduler\": lr_scheduler.ChainedScheduler(\n [\n parse_scheduler(conf, optimizer)[\"scheduler\"]\n for conf in config.schedulers\n ]\n ),\n \"interval\": interval,\n }\n else:\n scheduler = {\n \"scheduler\": get_scheduler(config.name)(optimizer, **config.args),\n \"interval\": interval,\n }\n return scheduler" }, { "identifier": "Updateable", "path": "threestudio/utils/base.py", "snippet": "class Updateable:\n def do_update_step(\n self, epoch: int, global_step: int, on_load_weights: bool = False\n ):\n for attr in self.__dir__():\n if attr.startswith(\"_\"):\n continue\n try:\n module = getattr(self, attr)\n except:\n continue # ignore attributes like property, which can't be retrived using getattr?\n if isinstance(module, Updateable):\n module.do_update_step(\n epoch, global_step, on_load_weights=on_load_weights\n )\n self.update_step(epoch, global_step, on_load_weights=on_load_weights)\n\n def do_update_step_end(self, epoch: int, global_step: int):\n for attr in self.__dir__():\n if attr.startswith(\"_\"):\n continue\n try:\n module = getattr(self, attr)\n except:\n continue # ignore attributes like property, which can't be retrived using getattr?\n if isinstance(module, Updateable):\n module.do_update_step_end(epoch, global_step)\n self.update_step_end(epoch, global_step)\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n # override this method to implement custom update logic\n # if on_load_weights is True, you should be careful doing things related to model evaluations,\n # as the models and tensors are not guarenteed to be on the same device\n pass\n\n def update_step_end(self, epoch: int, global_step: int):\n pass" }, { "identifier": "update_end_if_possible", "path": "threestudio/utils/base.py", "snippet": "def update_end_if_possible(module: Any, epoch: int, global_step: int) -> None:\n if isinstance(module, Updateable):\n module.do_update_step_end(epoch, global_step)" }, { "identifier": "update_if_possible", "path": "threestudio/utils/base.py", "snippet": "def update_if_possible(module: Any, epoch: int, global_step: int) -> None:\n if isinstance(module, Updateable):\n module.do_update_step(epoch, global_step)" }, { "identifier": "parse_structured", "path": "threestudio/utils/config.py", "snippet": "def parse_structured(fields: Any, cfg: Optional[Union[dict, DictConfig]] = None) -> Any:\n scfg = OmegaConf.structured(fields(**cfg))\n return scfg" }, { "identifier": "C", "path": "threestudio/utils/misc.py", "snippet": "def C(value: Any, epoch: int, global_step: int) -> float:\n if isinstance(value, int) or isinstance(value, float):\n pass\n else:\n value = config_to_primitive(value)\n if not isinstance(value, list):\n raise TypeError(\"Scalar specification only supports list, got\", type(value))\n if len(value) == 3:\n value = [0] + value\n if len(value) >= 6:\n select_i = 3\n for i in range(3, len(value) - 2, 2):\n if global_step >= value[i]:\n select_i = i + 2\n if select_i != 3:\n start_value, start_step = value[select_i - 3], value[select_i - 2]\n else:\n start_step, start_value = value[:2]\n end_value, end_step = value[select_i - 1], value[select_i]\n value = [start_step, start_value, end_value, end_step]\n assert len(value) == 4\n start_step, start_value, end_value, end_step = value\n if isinstance(end_step, int):\n current_step = global_step\n value = start_value + (end_value - start_value) * max(\n min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0\n )\n elif isinstance(end_step, float):\n current_step = epoch\n value = start_value + (end_value - start_value) * max(\n min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0\n )\n return value" }, { "identifier": "cleanup", "path": "threestudio/utils/misc.py", "snippet": "def cleanup():\n gc.collect()\n torch.cuda.empty_cache()\n tcnn.free_temporary_memory()" }, { "identifier": "get_device", "path": "threestudio/utils/misc.py", "snippet": "def get_device():\n return torch.device(f\"cuda:{get_rank()}\")" }, { "identifier": "load_module_weights", "path": "threestudio/utils/misc.py", "snippet": "def load_module_weights(\n path, module_name=None, ignore_modules=None, map_location=None\n) -> Tuple[dict, int, int]:\n if module_name is not None and ignore_modules is not None:\n raise ValueError(\"module_name and ignore_modules cannot be both set\")\n if map_location is None:\n map_location = get_device()\n\n ckpt = torch.load(path, map_location=map_location)\n state_dict = ckpt[\"state_dict\"]\n state_dict_to_load = state_dict\n\n if ignore_modules is not None:\n state_dict_to_load = {}\n for k, v in state_dict.items():\n ignore = any(\n [k.startswith(ignore_module + \".\") for ignore_module in ignore_modules]\n )\n if ignore:\n continue\n state_dict_to_load[k] = v\n\n if module_name is not None:\n state_dict_to_load = {}\n for k, v in state_dict.items():\n m = re.match(rf\"^{module_name}\\.(.*)$\", k)\n if m is None:\n continue\n state_dict_to_load[m.group(1)] = v\n\n return state_dict_to_load, ckpt[\"epoch\"], ckpt[\"global_step\"]" }, { "identifier": "find_last_path", "path": "threestudio/utils/misc.py", "snippet": "def find_last_path(path: str):\n if (path is not None) and (\"LAST\" in path):\n path = path.replace(\" \", \"_\")\n base_dir_prefix, suffix = path.split(\"LAST\", 1)\n base_dir = os.path.dirname(base_dir_prefix)\n prefix = os.path.split(base_dir_prefix)[-1]\n base_dir_prefix = os.path.join(base_dir, prefix)\n all_path = os.listdir(base_dir)\n all_path = [os.path.join(base_dir, dir) for dir in all_path]\n filtered_path = [dir for dir in all_path if dir.startswith(base_dir_prefix)]\n filtered_path.sort(reverse=True)\n last_path = filtered_path[0]\n new_path = last_path + suffix\n if os.path.exists(new_path):\n return new_path\n else:\n raise FileNotFoundError(new_path)\n else:\n return path" }, { "identifier": "SaverMixin", "path": "threestudio/utils/saving.py", "snippet": "class SaverMixin:\n _save_dir: Optional[str] = None\n _wandb_logger: Optional[WandbLogger] = None\n\n def set_save_dir(self, save_dir: str):\n self._save_dir = save_dir\n\n def get_save_dir(self):\n if self._save_dir is None:\n raise ValueError(\"Save dir is not set\")\n return self._save_dir\n\n def convert_data(self, data):\n if data is None:\n return None\n elif isinstance(data, np.ndarray):\n return data\n elif isinstance(data, torch.Tensor):\n return data.detach().cpu().numpy()\n elif isinstance(data, list):\n return [self.convert_data(d) for d in data]\n elif isinstance(data, dict):\n return {k: self.convert_data(v) for k, v in data.items()}\n else:\n raise TypeError(\n \"Data must be in type numpy.ndarray, torch.Tensor, list or dict, getting\",\n type(data),\n )\n\n def get_save_path(self, filename):\n save_path = os.path.join(self.get_save_dir(), filename)\n os.makedirs(os.path.dirname(save_path), exist_ok=True)\n return save_path\n\n def create_loggers(self, cfg_loggers: DictConfig) -> None:\n if \"wandb\" in cfg_loggers.keys() and cfg_loggers.wandb.enable:\n self._wandb_logger = WandbLogger(\n project=cfg_loggers.wandb.project, name=cfg_loggers.wandb.name\n )\n\n def get_loggers(self) -> List:\n if self._wandb_logger:\n return [self._wandb_logger]\n else:\n return []\n\n DEFAULT_RGB_KWARGS = {\"data_format\": \"HWC\", \"data_range\": (0, 1)}\n DEFAULT_UV_KWARGS = {\n \"data_format\": \"HWC\",\n \"data_range\": (0, 1),\n \"cmap\": \"checkerboard\",\n }\n DEFAULT_GRAYSCALE_KWARGS = {\"data_range\": None, \"cmap\": \"jet\"}\n DEFAULT_GRID_KWARGS = {\"align\": \"max\"}\n\n def get_rgb_image_(self, img, data_format, data_range, rgba=False):\n img = self.convert_data(img)\n assert data_format in [\"CHW\", \"HWC\"]\n if data_format == \"CHW\":\n img = img.transpose(1, 2, 0)\n if img.dtype != np.uint8:\n img = img.clip(min=data_range[0], max=data_range[1])\n img = (\n (img - data_range[0]) / (data_range[1] - data_range[0]) * 255.0\n ).astype(np.uint8)\n nc = 4 if rgba else 3\n imgs = [img[..., start : start + nc] for start in range(0, img.shape[-1], nc)]\n imgs = [\n img_\n if img_.shape[-1] == nc\n else np.concatenate(\n [\n img_,\n np.zeros(\n (img_.shape[0], img_.shape[1], nc - img_.shape[2]),\n dtype=img_.dtype,\n ),\n ],\n axis=-1,\n )\n for img_ in imgs\n ]\n img = np.concatenate(imgs, axis=1)\n if rgba:\n img = cv2.cvtColor(img, cv2.COLOR_RGBA2BGRA)\n else:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n return img\n\n def _save_rgb_image(\n self,\n filename,\n img,\n data_format,\n data_range,\n name: Optional[str] = None,\n step: Optional[int] = None,\n ):\n img = self.get_rgb_image_(img, data_format, data_range)\n cv2.imwrite(filename, img)\n if name and self._wandb_logger:\n wandb.log(\n {\n name: wandb.Image(self.get_save_path(filename)),\n \"trainer/global_step\": step,\n }\n )\n\n def save_rgb_image(\n self,\n filename,\n img,\n data_format=DEFAULT_RGB_KWARGS[\"data_format\"],\n data_range=DEFAULT_RGB_KWARGS[\"data_range\"],\n name: Optional[str] = None,\n step: Optional[int] = None,\n ) -> str:\n save_path = self.get_save_path(filename)\n self._save_rgb_image(save_path, img, data_format, data_range, name, step)\n return save_path\n\n def get_uv_image_(self, img, data_format, data_range, cmap):\n img = self.convert_data(img)\n assert data_format in [\"CHW\", \"HWC\"]\n if data_format == \"CHW\":\n img = img.transpose(1, 2, 0)\n img = img.clip(min=data_range[0], max=data_range[1])\n img = (img - data_range[0]) / (data_range[1] - data_range[0])\n assert cmap in [\"checkerboard\", \"color\"]\n if cmap == \"checkerboard\":\n n_grid = 64\n mask = (img * n_grid).astype(int)\n mask = (mask[..., 0] + mask[..., 1]) % 2 == 0\n img = np.ones((img.shape[0], img.shape[1], 3), dtype=np.uint8) * 255\n img[mask] = np.array([255, 0, 255], dtype=np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n elif cmap == \"color\":\n img_ = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n img_[..., 0] = (img[..., 0] * 255).astype(np.uint8)\n img_[..., 1] = (img[..., 1] * 255).astype(np.uint8)\n img_ = cv2.cvtColor(img_, cv2.COLOR_RGB2BGR)\n img = img_\n return img\n\n def save_uv_image(\n self,\n filename,\n img,\n data_format=DEFAULT_UV_KWARGS[\"data_format\"],\n data_range=DEFAULT_UV_KWARGS[\"data_range\"],\n cmap=DEFAULT_UV_KWARGS[\"cmap\"],\n ) -> str:\n save_path = self.get_save_path(filename)\n img = self.get_uv_image_(img, data_format, data_range, cmap)\n cv2.imwrite(save_path, img)\n return save_path\n\n def get_grayscale_image_(self, img, data_range, cmap):\n img = self.convert_data(img)\n img = np.nan_to_num(img)\n if data_range is None:\n img = (img - img.min()) / (img.max() - img.min())\n else:\n img = img.clip(data_range[0], data_range[1])\n img = (img - data_range[0]) / (data_range[1] - data_range[0])\n assert cmap in [None, \"jet\", \"magma\", \"spectral\"]\n if cmap == None:\n img = (img * 255.0).astype(np.uint8)\n img = np.repeat(img[..., None], 3, axis=2)\n elif cmap == \"jet\":\n img = (img * 255.0).astype(np.uint8)\n img = cv2.applyColorMap(img, cv2.COLORMAP_JET)\n elif cmap == \"magma\":\n img = 1.0 - img\n base = cm.get_cmap(\"magma\")\n num_bins = 256\n colormap = LinearSegmentedColormap.from_list(\n f\"{base.name}{num_bins}\", base(np.linspace(0, 1, num_bins)), num_bins\n )(np.linspace(0, 1, num_bins))[:, :3]\n a = np.floor(img * 255.0)\n b = (a + 1).clip(max=255.0)\n f = img * 255.0 - a\n a = a.astype(np.uint16).clip(0, 255)\n b = b.astype(np.uint16).clip(0, 255)\n img = colormap[a] + (colormap[b] - colormap[a]) * f[..., None]\n img = (img * 255.0).astype(np.uint8)\n elif cmap == \"spectral\":\n colormap = plt.get_cmap(\"Spectral\")\n\n def blend_rgba(image):\n image = image[..., :3] * image[..., -1:] + (\n 1.0 - image[..., -1:]\n ) # blend A to RGB\n return image\n\n img = colormap(img)\n img = blend_rgba(img)\n img = (img * 255).astype(np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n return img\n\n def _save_grayscale_image(\n self,\n filename,\n img,\n data_range,\n cmap,\n name: Optional[str] = None,\n step: Optional[int] = None,\n ):\n img = self.get_grayscale_image_(img, data_range, cmap)\n cv2.imwrite(filename, img)\n if name and self._wandb_logger:\n wandb.log(\n {\n name: wandb.Image(self.get_save_path(filename)),\n \"trainer/global_step\": step,\n }\n )\n\n def save_grayscale_image(\n self,\n filename,\n img,\n data_range=DEFAULT_GRAYSCALE_KWARGS[\"data_range\"],\n cmap=DEFAULT_GRAYSCALE_KWARGS[\"cmap\"],\n name: Optional[str] = None,\n step: Optional[int] = None,\n ) -> str:\n save_path = self.get_save_path(filename)\n self._save_grayscale_image(save_path, img, data_range, cmap, name, step)\n return save_path\n\n def get_image_grid_(self, imgs, align):\n if isinstance(imgs[0], list):\n return np.concatenate(\n [self.get_image_grid_(row, align) for row in imgs], axis=0\n )\n cols = []\n for col in imgs:\n assert col[\"type\"] in [\"rgb\", \"uv\", \"grayscale\"]\n if col[\"type\"] == \"rgb\":\n rgb_kwargs = self.DEFAULT_RGB_KWARGS.copy()\n rgb_kwargs.update(col[\"kwargs\"])\n cols.append(self.get_rgb_image_(col[\"img\"], **rgb_kwargs))\n elif col[\"type\"] == \"uv\":\n uv_kwargs = self.DEFAULT_UV_KWARGS.copy()\n uv_kwargs.update(col[\"kwargs\"])\n cols.append(self.get_uv_image_(col[\"img\"], **uv_kwargs))\n elif col[\"type\"] == \"grayscale\":\n grayscale_kwargs = self.DEFAULT_GRAYSCALE_KWARGS.copy()\n grayscale_kwargs.update(col[\"kwargs\"])\n cols.append(self.get_grayscale_image_(col[\"img\"], **grayscale_kwargs))\n\n if align == \"max\":\n h = max([col.shape[0] for col in cols])\n w = max([col.shape[1] for col in cols])\n elif align == \"min\":\n h = min([col.shape[0] for col in cols])\n w = min([col.shape[1] for col in cols])\n elif isinstance(align, int):\n h = align\n w = align\n elif (\n isinstance(align, tuple)\n and isinstance(align[0], int)\n and isinstance(align[1], int)\n ):\n h, w = align\n else:\n raise ValueError(\n f\"Unsupported image grid align: {align}, should be min, max, int or (int, int)\"\n )\n\n for i in range(len(cols)):\n if cols[i].shape[0] != h or cols[i].shape[1] != w:\n cols[i] = cv2.resize(cols[i], (w, h), interpolation=cv2.INTER_LINEAR)\n return np.concatenate(cols, axis=1)\n\n def save_image_grid(\n self,\n filename,\n imgs,\n align=DEFAULT_GRID_KWARGS[\"align\"],\n name: Optional[str] = None,\n step: Optional[int] = None,\n texts: Optional[List[float]] = None,\n ):\n save_path = self.get_save_path(filename)\n img = self.get_image_grid_(imgs, align=align)\n\n if texts is not None:\n img = Image.fromarray(img)\n draw = ImageDraw.Draw(img)\n black, white = (0, 0, 0), (255, 255, 255)\n for i, text in enumerate(texts):\n draw.text((2, (img.size[1] // len(texts)) * i + 1), f\"{text}\", white)\n draw.text((0, (img.size[1] // len(texts)) * i + 1), f\"{text}\", white)\n draw.text((2, (img.size[1] // len(texts)) * i - 1), f\"{text}\", white)\n draw.text((0, (img.size[1] // len(texts)) * i - 1), f\"{text}\", white)\n draw.text((1, (img.size[1] // len(texts)) * i), f\"{text}\", black)\n img = np.asarray(img)\n\n cv2.imwrite(save_path, img)\n if name and self._wandb_logger:\n wandb.log({name: wandb.Image(save_path), \"trainer/global_step\": step})\n return save_path\n\n def save_image(self, filename, img) -> str:\n save_path = self.get_save_path(filename)\n img = self.convert_data(img)\n assert img.dtype == np.uint8 or img.dtype == np.uint16\n if img.ndim == 3 and img.shape[-1] == 3:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n elif img.ndim == 3 and img.shape[-1] == 4:\n img = cv2.cvtColor(img, cv2.COLOR_RGBA2BGRA)\n cv2.imwrite(save_path, img)\n return save_path\n\n def save_cubemap(self, filename, img, data_range=(0, 1), rgba=False) -> str:\n save_path = self.get_save_path(filename)\n img = self.convert_data(img)\n assert img.ndim == 4 and img.shape[0] == 6 and img.shape[1] == img.shape[2]\n\n imgs_full = []\n for start in range(0, img.shape[-1], 3):\n img_ = img[..., start : start + 3]\n img_ = np.stack(\n [\n self.get_rgb_image_(img_[i], \"HWC\", data_range, rgba=rgba)\n for i in range(img_.shape[0])\n ],\n axis=0,\n )\n size = img_.shape[1]\n placeholder = np.zeros((size, size, 3), dtype=np.float32)\n img_full = np.concatenate(\n [\n np.concatenate(\n [placeholder, img_[2], placeholder, placeholder], axis=1\n ),\n np.concatenate([img_[1], img_[4], img_[0], img_[5]], axis=1),\n np.concatenate(\n [placeholder, img_[3], placeholder, placeholder], axis=1\n ),\n ],\n axis=0,\n )\n imgs_full.append(img_full)\n\n imgs_full = np.concatenate(imgs_full, axis=1)\n cv2.imwrite(save_path, imgs_full)\n return save_path\n\n def save_data(self, filename, data) -> str:\n data = self.convert_data(data)\n if isinstance(data, dict):\n if not filename.endswith(\".npz\"):\n filename += \".npz\"\n save_path = self.get_save_path(filename)\n np.savez(save_path, **data)\n else:\n if not filename.endswith(\".npy\"):\n filename += \".npy\"\n save_path = self.get_save_path(filename)\n np.save(save_path, data)\n return save_path\n\n def save_state_dict(self, filename, data) -> str:\n save_path = self.get_save_path(filename)\n torch.save(data, save_path)\n return save_path\n\n def save_img_sequence(\n self,\n filename,\n img_dir,\n matcher,\n save_format=\"mp4\",\n fps=30,\n name: Optional[str] = None,\n step: Optional[int] = None,\n ) -> str:\n assert save_format in [\"gif\", \"mp4\"]\n if not filename.endswith(save_format):\n filename += f\".{save_format}\"\n save_path = self.get_save_path(filename)\n matcher = re.compile(matcher)\n img_dir = os.path.join(self.get_save_dir(), img_dir)\n imgs = []\n for f in os.listdir(img_dir):\n if matcher.search(f):\n imgs.append(f)\n imgs = sorted(imgs, key=lambda f: int(matcher.search(f).groups()[0]))\n imgs = [cv2.imread(os.path.join(img_dir, f)) for f in imgs]\n\n if save_format == \"gif\":\n imgs = [cv2.cvtColor(i, cv2.COLOR_BGR2RGB) for i in imgs]\n imageio.mimsave(save_path, imgs, fps=fps, palettesize=256)\n elif save_format == \"mp4\":\n imgs = [cv2.cvtColor(i, cv2.COLOR_BGR2RGB) for i in imgs]\n imageio.mimsave(save_path, imgs, fps=fps)\n if name and self._wandb_logger:\n wandb.log(\n {\n name: wandb.Video(save_path, format=\"mp4\"),\n \"trainer/global_step\": step,\n }\n )\n return save_path\n\n def save_mesh(self, filename, v_pos, t_pos_idx, v_tex=None, t_tex_idx=None) -> str:\n save_path = self.get_save_path(filename)\n v_pos = self.convert_data(v_pos)\n t_pos_idx = self.convert_data(t_pos_idx)\n mesh = trimesh.Trimesh(vertices=v_pos, faces=t_pos_idx)\n mesh.export(save_path)\n return save_path\n\n def save_obj(\n self,\n filename: str,\n mesh: Mesh,\n save_mat: bool = False,\n save_normal: bool = False,\n save_uv: bool = False,\n save_vertex_color: bool = False,\n map_Kd: Optional[Float[Tensor, \"H W 3\"]] = None,\n map_Ks: Optional[Float[Tensor, \"H W 3\"]] = None,\n map_Bump: Optional[Float[Tensor, \"H W 3\"]] = None,\n map_Pm: Optional[Float[Tensor, \"H W 1\"]] = None,\n map_Pr: Optional[Float[Tensor, \"H W 1\"]] = None,\n map_format: str = \"jpg\",\n ) -> List[str]:\n save_paths: List[str] = []\n if not filename.endswith(\".obj\"):\n filename += \".obj\"\n v_pos, t_pos_idx = self.convert_data(mesh.v_pos), self.convert_data(\n mesh.t_pos_idx\n )\n v_nrm, v_tex, t_tex_idx, v_rgb = None, None, None, None\n if save_normal:\n v_nrm = self.convert_data(mesh.v_nrm)\n if save_uv:\n v_tex, t_tex_idx = self.convert_data(mesh.v_tex), self.convert_data(\n mesh.t_tex_idx\n )\n if save_vertex_color:\n v_rgb = self.convert_data(mesh.v_rgb)\n matname, mtllib = None, None\n if save_mat:\n matname = \"default\"\n mtl_filename = filename.replace(\".obj\", \".mtl\")\n mtllib = os.path.basename(mtl_filename)\n mtl_save_paths = self._save_mtl(\n mtl_filename,\n matname,\n map_Kd=self.convert_data(map_Kd),\n map_Ks=self.convert_data(map_Ks),\n map_Bump=self.convert_data(map_Bump),\n map_Pm=self.convert_data(map_Pm),\n map_Pr=self.convert_data(map_Pr),\n map_format=map_format,\n )\n save_paths += mtl_save_paths\n obj_save_path = self._save_obj(\n filename,\n v_pos,\n t_pos_idx,\n v_nrm=v_nrm,\n v_tex=v_tex,\n t_tex_idx=t_tex_idx,\n v_rgb=v_rgb,\n matname=matname,\n mtllib=mtllib,\n )\n save_paths.append(obj_save_path)\n return save_paths\n\n def _save_obj(\n self,\n filename,\n v_pos,\n t_pos_idx,\n v_nrm=None,\n v_tex=None,\n t_tex_idx=None,\n v_rgb=None,\n matname=None,\n mtllib=None,\n ) -> str:\n obj_str = \"\"\n if matname is not None:\n obj_str += f\"mtllib {mtllib}\\n\"\n obj_str += f\"g object\\n\"\n obj_str += f\"usemtl {matname}\\n\"\n for i in range(len(v_pos)):\n obj_str += f\"v {v_pos[i][0]} {v_pos[i][1]} {v_pos[i][2]}\"\n if v_rgb is not None:\n obj_str += f\" {v_rgb[i][0]} {v_rgb[i][1]} {v_rgb[i][2]}\"\n obj_str += \"\\n\"\n if v_nrm is not None:\n for v in v_nrm:\n obj_str += f\"vn {v[0]} {v[1]} {v[2]}\\n\"\n if v_tex is not None:\n for v in v_tex:\n obj_str += f\"vt {v[0]} {1.0 - v[1]}\\n\"\n\n for i in range(len(t_pos_idx)):\n obj_str += \"f\"\n for j in range(3):\n obj_str += f\" {t_pos_idx[i][j] + 1}/\"\n if v_tex is not None:\n obj_str += f\"{t_tex_idx[i][j] + 1}\"\n obj_str += \"/\"\n if v_nrm is not None:\n obj_str += f\"{t_pos_idx[i][j] + 1}\"\n obj_str += \"\\n\"\n\n save_path = self.get_save_path(filename)\n with open(save_path, \"w\") as f:\n f.write(obj_str)\n return save_path\n\n def _save_mtl(\n self,\n filename,\n matname,\n Ka=(0.0, 0.0, 0.0),\n Kd=(1.0, 1.0, 1.0),\n Ks=(0.0, 0.0, 0.0),\n map_Kd=None,\n map_Ks=None,\n map_Bump=None,\n map_Pm=None,\n map_Pr=None,\n map_format=\"jpg\",\n step: Optional[int] = None,\n ) -> List[str]:\n mtl_save_path = self.get_save_path(filename)\n save_paths = [mtl_save_path]\n mtl_str = f\"newmtl {matname}\\n\"\n mtl_str += f\"Ka {Ka[0]} {Ka[1]} {Ka[2]}\\n\"\n if map_Kd is not None:\n map_Kd_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_kd.{map_format}\"\n )\n mtl_str += f\"map_Kd texture_kd.{map_format}\\n\"\n self._save_rgb_image(\n map_Kd_save_path,\n map_Kd,\n data_format=\"HWC\",\n data_range=(0, 1),\n name=f\"{matname}_Kd\",\n step=step,\n )\n save_paths.append(map_Kd_save_path)\n else:\n mtl_str += f\"Kd {Kd[0]} {Kd[1]} {Kd[2]}\\n\"\n if map_Ks is not None:\n map_Ks_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_ks.{map_format}\"\n )\n mtl_str += f\"map_Ks texture_ks.{map_format}\\n\"\n self._save_rgb_image(\n map_Ks_save_path,\n map_Ks,\n data_format=\"HWC\",\n data_range=(0, 1),\n name=f\"{matname}_Ks\",\n step=step,\n )\n save_paths.append(map_Ks_save_path)\n else:\n mtl_str += f\"Ks {Ks[0]} {Ks[1]} {Ks[2]}\\n\"\n if map_Bump is not None:\n map_Bump_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_nrm.{map_format}\"\n )\n mtl_str += f\"map_Bump texture_nrm.{map_format}\\n\"\n self._save_rgb_image(\n map_Bump_save_path,\n map_Bump,\n data_format=\"HWC\",\n data_range=(0, 1),\n name=f\"{matname}_Bump\",\n step=step,\n )\n save_paths.append(map_Bump_save_path)\n if map_Pm is not None:\n map_Pm_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_metallic.{map_format}\"\n )\n mtl_str += f\"map_Pm texture_metallic.{map_format}\\n\"\n self._save_grayscale_image(\n map_Pm_save_path,\n map_Pm,\n data_range=(0, 1),\n cmap=None,\n name=f\"{matname}_refl\",\n step=step,\n )\n save_paths.append(map_Pm_save_path)\n if map_Pr is not None:\n map_Pr_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_roughness.{map_format}\"\n )\n mtl_str += f\"map_Pr texture_roughness.{map_format}\\n\"\n self._save_grayscale_image(\n map_Pr_save_path,\n map_Pr,\n data_range=(0, 1),\n cmap=None,\n name=f\"{matname}_Ns\",\n step=step,\n )\n save_paths.append(map_Pr_save_path)\n with open(self.get_save_path(filename), \"w\") as f:\n f.write(mtl_str)\n return save_paths\n\n def save_file(self, filename, src_path) -> str:\n save_path = self.get_save_path(filename)\n shutil.copyfile(src_path, save_path)\n return save_path\n\n def save_json(self, filename, payload) -> str:\n save_path = self.get_save_path(filename)\n with open(save_path, \"w\") as f:\n f.write(json.dumps(payload))\n return save_path" } ]
import os import pytorch_lightning as pl import torch.nn.functional as F import threestudio from dataclasses import dataclass, field from threestudio.models.exporters.base import Exporter, ExporterOutput from threestudio.systems.utils import parse_optimizer, parse_scheduler from threestudio.utils.base import ( Updateable, update_end_if_possible, update_if_possible, ) from threestudio.utils.config import parse_structured from threestudio.utils.misc import C, cleanup, get_device, load_module_weights, find_last_path from threestudio.utils.saving import SaverMixin from threestudio.utils.typing import * from threestudio.utils.config import load_config, parse_structured
9,660
class BaseSystem(pl.LightningModule, Updateable, SaverMixin): @dataclass class Config: loggers: dict = field(default_factory=dict) loss: dict = field(default_factory=dict) optimizer: dict = field(default_factory=dict) scheduler: Optional[dict] = None weights: Optional[str] = None weights_ignore_modules: Optional[List[str]] = None cleanup_after_validation_step: bool = False cleanup_after_test_step: bool = False cfg: Config def __init__(self, cfg, resumed=False) -> None: super().__init__() self.cfg = parse_structured(self.Config, cfg) self._save_dir: Optional[str] = None self._resumed: bool = resumed self._resumed_eval: bool = False self._resumed_eval_status: dict = {"global_step": 0, "current_epoch": 0} if "loggers" in cfg: self.create_loggers(cfg.loggers) self.configure() if self.cfg.weights is not None: self.load_weights(self.cfg.weights, self.cfg.weights_ignore_modules) self.post_configure() def load_weights(self, weights: str, ignore_modules: Optional[List[str]] = None): state_dict, epoch, global_step = load_module_weights( weights, ignore_modules=ignore_modules, map_location="cpu" ) self.load_state_dict(state_dict, strict=False) # restore step-dependent states self.do_update_step(epoch, global_step, on_load_weights=True) def set_resume_status(self, current_epoch: int, global_step: int): # restore correct epoch and global step in eval self._resumed_eval = True self._resumed_eval_status["current_epoch"] = current_epoch self._resumed_eval_status["global_step"] = global_step @property def resumed(self): # whether from resumed checkpoint return self._resumed @property def true_global_step(self): if self._resumed_eval: return self._resumed_eval_status["global_step"] else: return self.global_step @property def true_current_epoch(self): if self._resumed_eval: return self._resumed_eval_status["current_epoch"] else: return self.current_epoch def configure(self) -> None: pass def post_configure(self) -> None: """ executed after weights are loaded """ pass def C(self, value: Any) -> float: return C(value, self.true_current_epoch, self.true_global_step) def configure_optimizers(self): optim = parse_optimizer(self.cfg.optimizer, self) ret = { "optimizer": optim, } if self.cfg.scheduler is not None: ret.update( { "lr_scheduler": parse_scheduler(self.cfg.scheduler, optim), } ) return ret def training_step(self, batch, batch_idx): raise NotImplementedError def validation_step(self, batch, batch_idx): raise NotImplementedError def on_train_batch_end(self, outputs, batch, batch_idx): self.dataset = self.trainer.train_dataloader.dataset update_end_if_possible( self.dataset, self.true_current_epoch, self.true_global_step ) self.do_update_step_end(self.true_current_epoch, self.true_global_step) def on_validation_batch_end(self, outputs, batch, batch_idx): self.dataset = self.trainer.val_dataloaders.dataset update_end_if_possible( self.dataset, self.true_current_epoch, self.true_global_step ) self.do_update_step_end(self.true_current_epoch, self.true_global_step) if self.cfg.cleanup_after_validation_step: # cleanup to save vram
class BaseSystem(pl.LightningModule, Updateable, SaverMixin): @dataclass class Config: loggers: dict = field(default_factory=dict) loss: dict = field(default_factory=dict) optimizer: dict = field(default_factory=dict) scheduler: Optional[dict] = None weights: Optional[str] = None weights_ignore_modules: Optional[List[str]] = None cleanup_after_validation_step: bool = False cleanup_after_test_step: bool = False cfg: Config def __init__(self, cfg, resumed=False) -> None: super().__init__() self.cfg = parse_structured(self.Config, cfg) self._save_dir: Optional[str] = None self._resumed: bool = resumed self._resumed_eval: bool = False self._resumed_eval_status: dict = {"global_step": 0, "current_epoch": 0} if "loggers" in cfg: self.create_loggers(cfg.loggers) self.configure() if self.cfg.weights is not None: self.load_weights(self.cfg.weights, self.cfg.weights_ignore_modules) self.post_configure() def load_weights(self, weights: str, ignore_modules: Optional[List[str]] = None): state_dict, epoch, global_step = load_module_weights( weights, ignore_modules=ignore_modules, map_location="cpu" ) self.load_state_dict(state_dict, strict=False) # restore step-dependent states self.do_update_step(epoch, global_step, on_load_weights=True) def set_resume_status(self, current_epoch: int, global_step: int): # restore correct epoch and global step in eval self._resumed_eval = True self._resumed_eval_status["current_epoch"] = current_epoch self._resumed_eval_status["global_step"] = global_step @property def resumed(self): # whether from resumed checkpoint return self._resumed @property def true_global_step(self): if self._resumed_eval: return self._resumed_eval_status["global_step"] else: return self.global_step @property def true_current_epoch(self): if self._resumed_eval: return self._resumed_eval_status["current_epoch"] else: return self.current_epoch def configure(self) -> None: pass def post_configure(self) -> None: """ executed after weights are loaded """ pass def C(self, value: Any) -> float: return C(value, self.true_current_epoch, self.true_global_step) def configure_optimizers(self): optim = parse_optimizer(self.cfg.optimizer, self) ret = { "optimizer": optim, } if self.cfg.scheduler is not None: ret.update( { "lr_scheduler": parse_scheduler(self.cfg.scheduler, optim), } ) return ret def training_step(self, batch, batch_idx): raise NotImplementedError def validation_step(self, batch, batch_idx): raise NotImplementedError def on_train_batch_end(self, outputs, batch, batch_idx): self.dataset = self.trainer.train_dataloader.dataset update_end_if_possible( self.dataset, self.true_current_epoch, self.true_global_step ) self.do_update_step_end(self.true_current_epoch, self.true_global_step) def on_validation_batch_end(self, outputs, batch, batch_idx): self.dataset = self.trainer.val_dataloaders.dataset update_end_if_possible( self.dataset, self.true_current_epoch, self.true_global_step ) self.do_update_step_end(self.true_current_epoch, self.true_global_step) if self.cfg.cleanup_after_validation_step: # cleanup to save vram
cleanup()
9
2023-10-23 07:40:20+00:00
12k
YORG-AI/Open-Assistant
package/src/yorgassistant/core/assistant/async_threads.py
[ { "identifier": "Assistants", "path": "package/src/yorgassistant/core/assistant/assistant.py", "snippet": "class Assistants():\n def __init__(self, config,yaml_path:Optional[str] = None):\n self.config = config\n YamlPathConfig.assistants_yaml_path = yaml_path if yaml_path else 'assistants.yaml'\n \n def set_assistants_yaml_path(yaml_path: str):\n # 检查 yaml_path 是否为绝对路径\n if not os.path.isabs(yaml_path):\n # 获取调用此方法的栈帧\n stack = inspect.stack()\n caller_frame = stack[1]\n # 获取调用者的文件路径\n caller_path = caller_frame.filename\n # 获取调用者的目录路径\n caller_dir = os.path.dirname(caller_path)\n # 构建 yaml 文件的绝对路径\n full_yaml_path = os.path.join(caller_dir, yaml_path)\n else:\n full_yaml_path = yaml_path\n\n # 获取 yaml 文件所在的目录\n yaml_dir = os.path.dirname(full_yaml_path)\n # 如果目录不存在,则创建它\n os.makedirs(yaml_dir, exist_ok=True)\n # 设置 assistants_yaml_path\n YamlPathConfig.assistants_yaml_path = full_yaml_path\n\n def save_to_yaml(self):\n # 构建 assistants.yaml 文件的绝对路径\n assistants_yaml_path = YamlPathConfig.assistants_yaml_path\n # 检查文件是否存在,如果不存在,则创建一个空的yaml文件\n if not os.path.exists(assistants_yaml_path):\n with open(assistants_yaml_path, 'w') as file:\n file.write('') # 创建一个空文件\n # 使用绝对路径打开 assistants.yaml 文件\n with open(assistants_yaml_path, 'r') as file:\n data = yaml.safe_load(file) or []\n # 查找具有相同 id 的 assistant\n for i, d in enumerate(data):\n if d['id'] == self.config.id:\n # 如果找到了,就更新它\n data[i] = self.config.__dict__\n break\n else:\n # 如果没有找到,就添加新的 assistant 到列表中\n data.append(self.config.__dict__)\n # 写回 YAML 文件\n with open(assistants_yaml_path, 'w') as file:\n yaml.dump(data, file)\n\n @property\n def id(self):\n return self.config.id\n\n @property\n def name(self):\n return self.config.name\n\n @name.setter\n def name(self, value):\n self.config.name = value\n self.save_to_yaml() # 更新 YAML 文件\n\n @property\n def instructions(self):\n return self.config.instructions\n\n @instructions.setter\n def instructions(self, value):\n self.config.instructions = value\n\n @property\n def description(self):\n return self.config.description\n\n @description.setter\n def description(self, value):\n self.config.description = value\n\n @property\n def tools(self):\n return self.config.tools\n\n @tools.setter\n def tools(self, value):\n self.config.tools = value\n self.save_to_yaml() # 更新 YAML 文件\n\n @property\n def model(self):\n return self.config.model\n\n @model.setter\n def model(self, value):\n self.config.model = value\n self.save_to_yaml() # 更新 YAML 文件\n\n def get_tools_type_list(self):\n return [tool['type'] for tool in self.config.tools]\n\n @staticmethod\n def create(name: str = None, instructions: str = None, tools: list[dict] = [{'type':''}], model: str = 'gpt-4', description: str = None, file_ids: list = None) -> 'Assistants':\n # 创建配置和 Assistants 对象\n config = AssistantConfig(\n id=str(uuid.uuid4()),\n created_at=int(time.time()),\n name=name,\n description=description,\n instructions=instructions,\n tools=tools,\n model=model,\n file_ids=file_ids if file_ids is not None else [],\n )\n assistant = Assistants(config,YamlPathConfig.assistants_yaml_path)\n assistant.save_to_yaml() # 保存到 YAML 文件\n return assistant\n \n @staticmethod\n def get_all_assistants() -> List[Dict[str, Any]]:\n \"\"\"\n 读取 YAML 文件并返回所有 assistants 的信息列表。\n \"\"\"\n # 确保 YAML 文件路径已经被设置\n if YamlPathConfig.assistants_yaml_path:\n if not os.path.isfile(YamlPathConfig.assistants_yaml_path):\n # 如果文件路径存在但文件不存在,则创建一个空文件\n with open(YamlPathConfig.assistants_yaml_path, 'w') as file:\n yaml.dump([], file)\n else:\n raise FileNotFoundError(\"The threads YAML file path is not set.\")\n\n # 读取 YAML 文件\n with open(YamlPathConfig.assistants_yaml_path, 'r') as file:\n assistants_data = yaml.safe_load(file) or []\n # 使用 from_dict 方法将每个字典转换为 AssistantConfig 实例\n assistants_list = []\n for item in assistants_data:\n config = AssistantConfig(**item)\n assistants_list.append(config)\n return assistants_list\n @classmethod\n def from_id(cls, id: str) -> 'Assistants':\n # 使用传入的 yaml_path 参数打开 YAML 文件\n with open(YamlPathConfig.assistants_yaml_path, 'r') as file:\n data = yaml.safe_load(file) or []\n # 查找具有相同 id 的配置\n for d in data:\n if d['id'] == id:\n # 如果找到了,就用这个配置创建一个新的 Assistants 对象\n config = AssistantConfig(**d)\n return cls(config, YamlPathConfig.assistants_yaml_path) # 使用传入的 yaml_path 创建 Assistants 实例\n # 如果没有找到,就抛出一个异常\n raise ValueError(f'No assistant with id {id} found in YAML file.')\n \n @classmethod\n def delete_by_id(cls, id: str):\n\n # 使用绝对路径打开 assistants.yaml 文件\n with open(YamlPathConfig.assistants_yaml_path, 'r') as file:\n data = yaml.safe_load(file) or []\n\n # 查找具有相同 id 的 assistant\n for i, d in enumerate(data):\n if d['id'] == id:\n # 如果找到了,就删除它\n del data[i]\n break\n else:\n # 如果没有找到,就抛出一个异常\n raise ValueError(f'No assistant with id {id} found in YAML file.')\n\n # 写回 YAML 文件\n with open(YamlPathConfig.assistants_yaml_path, 'w') as file:\n yaml.dump(data, file)" }, { "identifier": "OpenAINode", "path": "package/src/yorgassistant/core/nodes/openai/openai.py", "snippet": "class OpenAINode(BaseNode):\n config: NodeConfig = NodeConfig(**openai_node_config)\n\n history: list[dict[str, any]]\n functions: list[dict[str, any]]\n\n cur_role: Optional[str]\n cur_content: Optional[str]\n\n def __init__(self):\n super().__init__()\n\n self.history = []\n self.functions = []\n\n self.cur_role = None\n self.cur_content = None\n\n def complete(self, input: CompleteInput):\n \"\"\"\n Complete with only current history. No extra messages.\n \"\"\"\n return self._make_completion([], input)\n\n # TODO: generalize these chat functions\n def chat(self, input: ChatInput):\n \"\"\"\n Chat with OpenAI's model with simple text.\n \"\"\"\n return self._make_completion(\n [\n Message(\n role=\"user\",\n content=input.message_text,\n )\n ],\n input,\n )\n\n def chat_with_prompt_template(self, input: ChatWithPromptTemplateInput):\n \"\"\"\n Chat with OpenAI's model with a specific prompt template.\n \"\"\"\n return self._make_completion(\n [\n Message(\n role=\"user\",\n content=input.prompt_template.format(**input.params),\n )\n ],\n input,\n )\n\n def chat_with_message(self, input: ChatWithMessageInput):\n \"\"\"\n Chat with OpenAI's model with a specific message dict.\n \"\"\"\n return self._make_completion([input.message], input)\n\n def chat_with_messages(self, input: ChatWithMessagesInput):\n \"\"\"\n Chat with OpenAI's model with a specific message dict.\n \"\"\"\n return self._make_completion(input.messages, input)\n\n def use_old_openai_with_prompt(self, input: OldCompleteInput):\n return self._make_old_completion(input.prompt, input)\n\n def _make_old_completion(\n self, prompt: str, input: OldCompleteConfig\n ) -> OpenAIOldResp:\n \"\"\"\n Make a completion with the given messages.\n \"\"\"\n\n kwargs = {\"model\": input.model, \"max_tokens\": 1096}\n\n kwargs[\"prompt\"] = prompt\n # set streaming if needed\n if input.use_streaming:\n kwargs[\"stream\"] = True\n\n # TODO: add exception handling\n try:\n client = OpenAI(api_key=os.getenv(\"OPENAI_CHAT_API_KEY\"))\n response = client.completions.create(**kwargs)\n except Exception as e:\n logging.warn(f\"openai_node._make_completion: error occurred: {e}\")\n return OpenAIOldResp(\n text=f\"Error occurred: {e}\",\n finish_reason=\"error\",\n )\n\n if input.use_streaming:\n # TODO 目前不支持流式处理\n resp = OpenAIOldResp(text=\"\", finish_reason=\"\")\n for completion in response:\n resp.text += completion[\"choices\"][0][\"text\"]\n if choice.finish_reason:\n resp.finish_reason = completion[\"choices\"][0][\"finish_reason\"]\n break\n return resp\n\n resp = OpenAIOldResp(**response.choices[0].model_dump())\n return resp\n\n def _make_completion(\n self, messages: list[Message], input: ChatConfig\n ) -> OpenAIResp | OpenAIStreamingResp:\n \"\"\"\n Make a completion with the given messages.\n \"\"\"\n\n kwargs = {\n \"model\": input.model,\n }\n\n cur_messages = []\n\n # if history is empty, add a default system message\n if len(self.history) == 0:\n cur_messages.append(\n Message(\n role=\"system\",\n content=\"You are a helpful AI assistant. You should answer the user's questions and help them with their tasks.\",\n ).dict(exclude_none=True)\n )\n else:\n cur_messages += self.history\n\n # append history if needed\n if input.append_history:\n for message in messages:\n self.add_single_message(message)\n\n # add all input messages to argument `messages`\n for message in messages:\n cur_messages.append(message.dict(exclude_none=True))\n\n kwargs[\"messages\"] = tt.trim(cur_messages, input.model, max_tokens=9999)\n\n # add function definitions if exists\n if len(self.functions) > 0:\n kwargs[\"functions\"] = self.functions\n kwargs[\"function_call\"] = \"auto\"\n\n # set streaming if needed\n if input.use_streaming:\n kwargs[\"stream\"] = True\n\n # TODO: add exception handling\n try:\n client = OpenAI(api_key=os.getenv(\"OPENAI_CHAT_API_KEY\"))\n response = client.chat.completions.create(**kwargs)\n except Exception as e:\n logging.warn(f\"openai_node._make_completion: error occurred: {e}\")\n return OpenAIResp(\n message=Message(\n role=\"system\",\n content=f\"Error occurred: {e}\",\n ),\n finish_reason=\"error\",\n )\n\n if input.use_streaming:\n resp = OpenAIStreamingResp(**response.choices[0].dict())\n if input.append_history:\n self.history.append(resp.delta.dict(exclude_none=True))\n return resp\n\n resp = OpenAIResp(**response.choices[0].dict())\n if input.append_history:\n self.history.append(resp.message.dict(exclude_none=True))\n return resp\n\n def add_function(self, func_def: FunctionDefinition):\n self.functions.append(\n func_def.dict()\n ) # redefined dict() doesn't have exclude_none arg\n\n def add_single_message(self, msg: Message):\n if self.cur_role is not None and self.cur_content is not None:\n self.history.append(\n Message(\n role=self.cur_role,\n content=self.cur_content,\n ).dict(exclude_none=True)\n )\n self.cur_role = None\n self.cur_content = None\n\n self.history.append(msg.dict(exclude_none=True))\n\n def add_system_message(self, content: str):\n self.add_single_message(\n Message(\n role=\"system\",\n content=content,\n )\n )\n\n def add_role(self, role: str):\n if self.cur_role is not None and self.cur_content is not None:\n self.add_single_message(\n Message(\n role=self.cur_role,\n content=self.cur_content,\n )\n )\n\n self.cur_role = role\n\n def add_content(self, content: str):\n if self.cur_content is not None:\n self.cur_content += content\n else:\n self.cur_content = content" }, { "identifier": "AsyncOpenAINode", "path": "package/src/yorgassistant/core/nodes/openai/openai.py", "snippet": "class AsyncOpenAINode(BaseNode):\n config: NodeConfig = NodeConfig(**openai_node_config)\n\n history: list[dict[str, any]]\n functions: list[dict[str, any]]\n\n cur_role: Optional[str]\n cur_content: Optional[str]\n\n def __init__(self):\n super().__init__()\n\n self.history = []\n self.functions = []\n\n self.cur_role = None\n self.cur_content = None\n\n openai.api_key = os.getenv(\"OPENAI_CHAT_API_KEY\")\n openai.api_base = os.getenv(\"OPENAI_CHAT_API_BASE\")\n\n async def complete(self, input: CompleteInput):\n \"\"\"\n Complete with only current history. No extra messages.\n \"\"\"\n return await self._make_completion([], input)\n\n # TODO: generalize these chat functions\n async def chat(self, input: ChatInput):\n \"\"\"\n Chat with OpenAI's model with simple text.\n \"\"\"\n return await self._make_completion(\n [\n Message(\n role=\"user\",\n content=input.message_text,\n )\n ],\n input,\n )\n\n async def chat_with_prompt_template(self, input: ChatWithPromptTemplateInput):\n \"\"\"\n Chat with OpenAI's model with a specific prompt template.\n \"\"\"\n return await self._make_completion(\n [\n Message(\n role=\"user\",\n content=input.prompt_template.format(**input.params),\n )\n ],\n input,\n )\n\n async def chat_with_message(self, input: ChatWithMessageInput):\n \"\"\"\n Chat with OpenAI's model with a specific message dict.\n \"\"\"\n return await self._make_completion([input.message], input)\n\n async def chat_with_messages(self, input: ChatWithMessagesInput):\n \"\"\"\n Chat with OpenAI's model with a specific message dict.\n \"\"\"\n return await self._make_completion(input.messages, input)\n\n async def use_old_openai_with_prompt(self, input: OldCompleteInput):\n return await self._make_old_completion(input.prompt, input)\n\n async def _make_old_completion(\n self, prompt: str, input: OldCompleteConfig\n ) -> OpenAIOldResp:\n \"\"\"\n Make a completion with the given messages.\n \"\"\"\n\n kwargs = {\"model\": input.model, \"max_tokens\": 1096}\n\n kwargs[\"prompt\"] = prompt\n # set streaming if needed\n if input.use_streaming:\n kwargs[\"stream\"] = True\n\n # TODO: add exception handling\n try:\n client = OpenAI(api_key=os.getenv(\"OPENAI_CHAT_API_KEY\"))\n response = client.completions.create(**kwargs)\n except Exception as e:\n logging.warn(f\"openai_node._make_completion: error occurred: {e}\")\n return OpenAIOldResp(\n text=f\"Error occurred: {e}\",\n finish_reason=\"error\",\n )\n\n if input.use_streaming:\n # TODO 目前不支持流式处理\n resp = OpenAIOldResp(text=\"\", finish_reason=\"\")\n for completion in response:\n resp.text += completion[\"choices\"][0][\"text\"]\n if choice.finish_reason:\n resp.finish_reason = completion[\"choices\"][0][\"finish_reason\"]\n break\n return resp\n\n resp = OpenAIOldResp(**response.choices[0].model_dump())\n return resp\n\n async def _make_completion(\n self, messages: list[Message], input: ChatConfig\n ) -> OpenAIResp | OpenAIStreamingResp:\n \"\"\"\n Make a completion with the given messages.\n \"\"\"\n\n kwargs = {\n \"model\": input.model,\n }\n\n cur_messages = []\n\n # if history is empty, add a default system message\n if len(self.history) == 0:\n cur_messages.append(\n Message(\n role=\"system\",\n content=\"You are a helpful AI assistant. You should answer the user's questions and help them with their tasks.\",\n ).dict(exclude_none=True)\n )\n else:\n cur_messages += self.history\n\n # append history if needed\n if input.append_history:\n for message in messages:\n self.add_single_message(message)\n\n # add all input messages to argument `messages`\n for message in messages:\n cur_messages.append(message.dict(exclude_none=True))\n\n kwargs[\"messages\"] = tt.trim(cur_messages, input.model, max_tokens=9999)\n\n # add function definitions if exists\n if len(self.functions) > 0:\n kwargs[\"functions\"] = self.functions\n kwargs[\"function_call\"] = \"auto\"\n\n # set streaming if needed\n if input.use_streaming:\n kwargs[\"stream\"] = True\n\n # TODO: add exception handling\n try:\n client = AsyncOpenAI(api_key=os.getenv(\"OPENAI_CHAT_API_KEY\"))\n response = await client.chat.completions.create(**kwargs)\n except Exception as e:\n return OpenAIResp(\n message=Message(\n role=\"system\",\n content=f\"Error occurred: {e}\",\n ),\n finish_reason=\"error\",\n )\n\n if input.use_streaming:\n resp = OpenAIStreamingResp(**response.choices[0].dict())\n if input.append_history:\n self.history.append(resp.delta.dict(exclude_none=True))\n return resp\n\n resp = OpenAIResp(**response.choices[0].dict())\n if input.append_history:\n self.history.append(resp.message.dict(exclude_none=True))\n return resp\n\n def add_function(self, func_def: FunctionDefinition):\n self.functions.append(\n func_def.dict()\n ) # redefined dict() doesn't have exclude_none arg\n\n def add_single_message(self, msg: Message):\n if self.cur_role is not None and self.cur_content is not None:\n self.history.append(\n Message(\n role=self.cur_role,\n content=self.cur_content,\n ).dict(exclude_none=True)\n )\n self.cur_role = None\n self.cur_content = None\n\n self.history.append(msg.dict(exclude_none=True))\n\n def add_system_message(self, content: str):\n self.add_single_message(\n Message(\n role=\"system\",\n content=content,\n )\n )\n\n def add_role(self, role: str):\n if self.cur_role is not None and self.cur_content is not None:\n self.add_single_message(\n Message(\n role=self.cur_role,\n content=self.cur_content,\n )\n )\n\n self.cur_role = role\n\n def add_content(self, content: str):\n if self.cur_content is not None:\n self.cur_content += content\n else:\n self.cur_content = content" }, { "identifier": "Tools", "path": "package/src/yorgassistant/core/assistant/tools/tools.py", "snippet": "class Tools:\n tools: dict[str, Tool]\n\n def __init__(self):\n self.tools = {}\n # 获取调用此方法的栈帧\n stack = inspect.stack()\n caller_frame = stack[1]\n # 获取调用者的文件路径\n caller_path = caller_frame.filename\n # 获取调用者的目录路径\n caller_dir = os.path.dirname(caller_path)\n # 构建 openai.yaml 文件的绝对路径\n yaml_file_path = os.path.join(caller_dir, YamlPathConfig.tools_yaml_path)\n tools_yaml_path = yaml_file_path\n # 读取 tools.yaml 文件,初始化所有 tools\n with open(tools_yaml_path, \"r\") as f:\n config_obj = yaml.safe_load(f)\n for tool_name, tool_config in config_obj[\"tools\"].items():\n self.tools[tool_name] = Tool(config=ToolConfig(**tool_config))\n\n def set_tools_yaml_path(yaml_path:str):\n # 检查 yaml_path 是否为绝对路径\n if not os.path.isabs(yaml_path):\n # 获取调用此方法的栈帧\n stack = inspect.stack()\n caller_frame = stack[1]\n # 获取调用者的文件路径\n caller_path = caller_frame.filename\n # 获取调用者的目录路径\n caller_dir = os.path.dirname(caller_path)\n # 构建 yaml 文件的绝对路径\n full_yaml_path = os.path.join(caller_dir, yaml_path)\n else:\n full_yaml_path = yaml_path\n # 获取 yaml 文件所在的目录\n yaml_dir = os.path.dirname(full_yaml_path)\n # 如果目录不存在,则创建它\n os.makedirs(yaml_dir, exist_ok=True)\n # 设置 yaml_path\n YamlPathConfig.tools_yaml_path = full_yaml_path\n\n def get_tool(self, tool_name: str) -> Tool:\n # 找到对应的工具\n tool = self.tools.get(tool_name)\n if tool is None:\n raise ValueError(f\"No tool named {tool_name} found.\")\n\n return tool\n\n def get_tool_summary(self, tool_name: str) -> str:\n # 在 tools.yaml 文件中找到对应的工具\n tool = self.tools.get(tool_name)\n if tool is None:\n raise ValueError(f\"No tool named {tool_name} found.\")\n\n return tool.config.summary\n\n def get_tools_list_summary(self, tools_list: list[str]) -> dict[str, str]:\n tools_summary = {}\n for tool_name in tools_list:\n summary = self.get_tool_summary(tool_name)\n tools_summary[tool_name] = summary\n return tools_summary" }, { "identifier": "Tool", "path": "package/src/yorgassistant/core/assistant/tools/tools.py", "snippet": "class Tool:\n config: ToolConfig\n entity: BaseToolEntity\n _tool_type: str # 使用一个内部变量来存储 tool_type 的值\n\n def __init__(self, config: ToolConfig):\n self.config = config\n entity_name = config.entity_name\n\n if entity_name in FUNCTION_TOOL_ENTITIES:\n self.entity = FunctionToolEntity(FUNCTION_TOOL_ENTITIES[entity_name])\n self._tool_type = 'function'\n elif entity_name in STATEFUL_TOOL_ENTITIES:\n self.entity = STATEFUL_TOOL_ENTITIES[entity_name]()\n self._tool_type = 'stateful'\n else:\n raise Exception(f\"Tool entity {entity_name} not found.\")\n\n @property\n def tool_type(self):\n return self._tool_type\n\n @tool_type.setter\n def tool_type(self, value):\n self._tool_type = value\n # TODO: response check and type convert\n def call(self, **kwargs):\n return self.entity.call(**kwargs)\n\n def need_llm_generate_parameters(self) -> bool:\n return self.entity.need_llm_generate_parameters()\n\n def need_llm_generate_response(self) -> bool:\n return self.entity.need_llm_generate_response()\n\n def has_done(self) -> bool:\n return self.entity.current_state() == State.DONE" } ]
import uuid import time import yaml import os import re import logging import json import inspect from typing import Any, List, Optional,Dict from .assistant import Assistants from ..nodes.openai.openai import OpenAINode,AsyncOpenAINode from ..nodes.openai.openai_model import * from .tools.tools import Tools, Tool from .config import * from .prompt.few_shot_cot_tools_choose_prompt import * from .prompt.parameters_generate_prompt import * from .prompt.response_generate_prompt import *
7,362
class AsyncThreads: current_tool: Tool chat_node: OpenAINode # Threads 全局的 OpenAI node,仅用于 chat 交互以及对 tool 执行结果的分析(选择 tool 以及生成参数不使用该 node) def __init__(self, config: ThreadsConfig,threads_yaml_path:Optional[str] = None): self._config = config self.current_tool = None YamlPathConfig.threads_yaml_path = threads_yaml_path if threads_yaml_path else "threads.yaml" @property def config(self): return self._config @property def id(self): return self._config.id def set_threads_yaml_path(yaml_path:str): # 检查 yaml_path 是否为绝对路径 if not os.path.isabs(yaml_path): # 获取调用此方法的栈帧 stack = inspect.stack() caller_frame = stack[1] # 获取调用者的文件路径 caller_path = caller_frame.filename # 获取调用者的目录路径 caller_dir = os.path.dirname(caller_path) # 构建 yaml 文件的绝对路径 full_yaml_path = os.path.join(caller_dir, yaml_path) else: full_yaml_path = yaml_path # 获取 yaml 文件所在的目录 yaml_dir = os.path.dirname(full_yaml_path) # 如果目录不存在,则创建它 os.makedirs(yaml_dir, exist_ok=True) # 设置 yaml_path YamlPathConfig.threads_yaml_path = full_yaml_path async def save_to_yaml(self): # 构建 threads.yaml 文件的绝对路径 threads_yaml_path = YamlPathConfig.threads_yaml_path # 检查文件是否存在,如果不存在,则创建一个空的yaml文件 if not os.path.exists(threads_yaml_path): with open(threads_yaml_path, 'w') as file: file.write('') # 创建一个空文件 # 使用绝对路径打开 threads.yaml 文件 with open(threads_yaml_path, "r") as file: data = yaml.safe_load(file) or [] # 查找具有相同 id 的 assistant for i, d in enumerate(data): if d["id"] == self.config.id: # 如果找到了,就更新它 data[i] = self.config.to_dict() break else: # 如果没有找到,就添加新的 assistant 到列表中 data.append(self.config.to_dict()) # 写回 YAML 文件 with open(threads_yaml_path, "w") as file: yaml.dump(data, file) @staticmethod def create(yaml_file_path:str) -> "AsyncThreads": # 创建 ThreadsConfig 对象 config = ThreadsConfig( id=str(uuid.uuid4()), object="AsyncThreads", created_at=int(time.time()), message_history=[], metadata={}, ) # 创建 Threads 对象 threads = AsyncThreads(config,YamlPathConfig.threads_yaml_path) # 保存到 YAML 文件 threads.save_to_yaml() return threads @classmethod def from_id(cls, id: str) -> 'AsyncThreads': # 使用传入的 yaml_path 参数打开 YAML 文件 with open(YamlPathConfig.threads_yaml_path, 'r') as file: data = yaml.safe_load(file) or [] # 查找具有相同 id 的配置 for d in data: if d['id'] == id: # 如果找到了,就用这个配置创建一个新的对象 config = ThreadsConfig.from_dict(d) return cls(config, YamlPathConfig.threads_yaml_path) # 使用传入的 yaml_path 创建 实例 # 如果没有找到,就抛出一个异常 raise ValueError(f'No threads with id {id} found in YAML file.') @staticmethod def get_all_threads() -> List[Dict[str, Any]]: """ 读取 YAML 文件并返回所有 threads 的信息列表。 """ # 确保 YAML 文件路径已经被设置 if YamlPathConfig.threads_yaml_path: if not os.path.isfile(YamlPathConfig.threads_yaml_path): # 如果文件路径存在但文件不存在,则创建一个空文件 with open(YamlPathConfig.threads_yaml_path, 'w') as file: yaml.dump([], file) else: raise FileNotFoundError("The threads YAML file path is not set.") # 读取 YAML 文件 with open(YamlPathConfig.threads_yaml_path, 'r') as file: data = yaml.safe_load(file) or [] # 使用 from_dict 方法将每个字典转换为 ThreadsConfig 实例 threads_list = [] for item in data: config = ThreadsConfig.from_dict(item) threads_list.append(config) return threads_list async def run(self, assistant_id: str, input_text: str, **kwargs): try: # 使用 from_id 方法获取助手 assistant = Assistants.from_id(assistant_id) tools_list = assistant.get_tools_type_list() # 初始化 Tools 对象
def extract_bracket_content(s: str) -> list: content = re.findall(r"\[(.*?)\]", s) content = [c.replace("'", "") for c in content] content = filter(lambda x: x != "", content) ret = [] for item in content: if "," in item: ret.extend(item.split(",")) else: ret.append(item) return ret class AsyncThreads: current_tool: Tool chat_node: OpenAINode # Threads 全局的 OpenAI node,仅用于 chat 交互以及对 tool 执行结果的分析(选择 tool 以及生成参数不使用该 node) def __init__(self, config: ThreadsConfig,threads_yaml_path:Optional[str] = None): self._config = config self.current_tool = None YamlPathConfig.threads_yaml_path = threads_yaml_path if threads_yaml_path else "threads.yaml" @property def config(self): return self._config @property def id(self): return self._config.id def set_threads_yaml_path(yaml_path:str): # 检查 yaml_path 是否为绝对路径 if not os.path.isabs(yaml_path): # 获取调用此方法的栈帧 stack = inspect.stack() caller_frame = stack[1] # 获取调用者的文件路径 caller_path = caller_frame.filename # 获取调用者的目录路径 caller_dir = os.path.dirname(caller_path) # 构建 yaml 文件的绝对路径 full_yaml_path = os.path.join(caller_dir, yaml_path) else: full_yaml_path = yaml_path # 获取 yaml 文件所在的目录 yaml_dir = os.path.dirname(full_yaml_path) # 如果目录不存在,则创建它 os.makedirs(yaml_dir, exist_ok=True) # 设置 yaml_path YamlPathConfig.threads_yaml_path = full_yaml_path async def save_to_yaml(self): # 构建 threads.yaml 文件的绝对路径 threads_yaml_path = YamlPathConfig.threads_yaml_path # 检查文件是否存在,如果不存在,则创建一个空的yaml文件 if not os.path.exists(threads_yaml_path): with open(threads_yaml_path, 'w') as file: file.write('') # 创建一个空文件 # 使用绝对路径打开 threads.yaml 文件 with open(threads_yaml_path, "r") as file: data = yaml.safe_load(file) or [] # 查找具有相同 id 的 assistant for i, d in enumerate(data): if d["id"] == self.config.id: # 如果找到了,就更新它 data[i] = self.config.to_dict() break else: # 如果没有找到,就添加新的 assistant 到列表中 data.append(self.config.to_dict()) # 写回 YAML 文件 with open(threads_yaml_path, "w") as file: yaml.dump(data, file) @staticmethod def create(yaml_file_path:str) -> "AsyncThreads": # 创建 ThreadsConfig 对象 config = ThreadsConfig( id=str(uuid.uuid4()), object="AsyncThreads", created_at=int(time.time()), message_history=[], metadata={}, ) # 创建 Threads 对象 threads = AsyncThreads(config,YamlPathConfig.threads_yaml_path) # 保存到 YAML 文件 threads.save_to_yaml() return threads @classmethod def from_id(cls, id: str) -> 'AsyncThreads': # 使用传入的 yaml_path 参数打开 YAML 文件 with open(YamlPathConfig.threads_yaml_path, 'r') as file: data = yaml.safe_load(file) or [] # 查找具有相同 id 的配置 for d in data: if d['id'] == id: # 如果找到了,就用这个配置创建一个新的对象 config = ThreadsConfig.from_dict(d) return cls(config, YamlPathConfig.threads_yaml_path) # 使用传入的 yaml_path 创建 实例 # 如果没有找到,就抛出一个异常 raise ValueError(f'No threads with id {id} found in YAML file.') @staticmethod def get_all_threads() -> List[Dict[str, Any]]: """ 读取 YAML 文件并返回所有 threads 的信息列表。 """ # 确保 YAML 文件路径已经被设置 if YamlPathConfig.threads_yaml_path: if not os.path.isfile(YamlPathConfig.threads_yaml_path): # 如果文件路径存在但文件不存在,则创建一个空文件 with open(YamlPathConfig.threads_yaml_path, 'w') as file: yaml.dump([], file) else: raise FileNotFoundError("The threads YAML file path is not set.") # 读取 YAML 文件 with open(YamlPathConfig.threads_yaml_path, 'r') as file: data = yaml.safe_load(file) or [] # 使用 from_dict 方法将每个字典转换为 ThreadsConfig 实例 threads_list = [] for item in data: config = ThreadsConfig.from_dict(item) threads_list.append(config) return threads_list async def run(self, assistant_id: str, input_text: str, **kwargs): try: # 使用 from_id 方法获取助手 assistant = Assistants.from_id(assistant_id) tools_list = assistant.get_tools_type_list() # 初始化 Tools 对象
tools = Tools()
3
2023-10-24 15:15:48+00:00
12k
zju3dv/4K4D
scripts/renbody/warp_gaussian_with_smpl.py
[ { "identifier": "dotdict", "path": "easyvolcap/utils/base_utils.py", "snippet": "class dotdict(dict, Dict[KT, VT]):\n \"\"\"\n This is the default data passing object used throughout the codebase\n Main function: dot access for dict values & dict like merging and updates\n\n a dictionary that supports dot notation \n as well as dictionary access notation \n usage: d = make_dotdict() or d = make_dotdict{'val1':'first'})\n set attributes: d.val2 = 'second' or d['val2'] = 'second'\n get attributes: d.val2 or d['val2']\n \"\"\"\n\n def update(self, dct: Dict = None, **kwargs):\n dct = copy(dct) # avoid modifying the original dict, use super's copy to avoid recursion\n\n # Handle different arguments\n if dct is None:\n dct = kwargs\n elif isinstance(dct, Mapping):\n dct.update(kwargs)\n else:\n super().update(dct, **kwargs)\n return\n\n # Recursive updates\n for k, v in dct.items():\n if k in self:\n\n # Handle type conversions\n target_type = type(self[k])\n if not isinstance(v, target_type):\n # NOTE: bool('False') will be True\n if target_type == bool and isinstance(v, str):\n dct[k] = v == 'True'\n else:\n dct[k] = target_type(v)\n\n if isinstance(v, dict):\n self[k].update(v) # recursion from here\n else:\n self[k] = v\n else:\n if isinstance(v, dict):\n self[k] = dotdict(v) # recursion?\n else:\n self[k] = v\n return self\n\n def __init__(self, *args, **kwargs):\n self.update(*args, **kwargs)\n\n copy = return_dotdict(dict.copy)\n fromkeys = return_dotdict(dict.fromkeys)\n\n # def __hash__(self):\n # # return hash(''.join([str(self.values().__hash__())]))\n # return super(dotdict, self).__hash__()\n\n # def __init__(self, *args, **kwargs):\n # super(dotdict, self).__init__(*args, **kwargs)\n\n \"\"\"\n Uncomment following lines and \n comment out __getattr__ = dict.__getitem__ to get feature:\n \n returns empty numpy array for undefined keys, so that you can easily copy things around\n TODO: potential caveat, harder to trace where this is set to np.array([], dtype=np.float32)\n \"\"\"\n\n def __getitem__(self, key):\n try:\n return dict.__getitem__(self, key)\n except KeyError as e:\n raise AttributeError(e)\n # MARK: Might encounter exception in newer version of pytorch\n # Traceback (most recent call last):\n # File \"/home/xuzhen/miniconda3/envs/torch/lib/python3.9/multiprocessing/queues.py\", line 245, in _feed\n # obj = _ForkingPickler.dumps(obj)\n # File \"/home/xuzhen/miniconda3/envs/torch/lib/python3.9/multiprocessing/reduction.py\", line 51, in dumps\n # cls(buf, protocol).dump(obj)\n # KeyError: '__getstate__'\n # MARK: Because you allow your __getattr__() implementation to raise the wrong kind of exception.\n # FIXME: not working typing hinting code\n __getattr__: Callable[..., 'torch.Tensor'] = __getitem__ # type: ignore # overidden dict.__getitem__\n __getattribute__: Callable[..., 'torch.Tensor'] # type: ignore\n # __getattr__ = dict.__getitem__\n __setattr__ = dict.__setitem__\n __delattr__ = dict.__delitem__\n\n # TODO: better ways to programmically define these special variables?\n\n @property\n def meta(self) -> dotdict:\n # Special variable used for storing cpu tensor in batch\n if 'meta' not in self:\n self.meta = dotdict()\n return self.__getitem__('meta')\n\n @meta.setter\n def meta(self, meta):\n self.__setitem__('meta', meta)\n\n @property\n def output(self) -> dotdict: # late annotation needed for this\n # Special entry for storing output tensor in batch\n if 'output' not in self:\n self.output = dotdict()\n return self.__getitem__('output')\n\n @output.setter\n def output(self, output):\n self.__setitem__('output', output)\n\n @property\n def persistent(self) -> dotdict: # late annotation needed for this\n # Special entry for storing persistent tensor in batch\n if 'persistent' not in self:\n self.persistent = dotdict()\n return self.__getitem__('persistent')\n\n @persistent.setter\n def persistent(self, persistent):\n self.__setitem__('persistent', persistent)\n\n @property\n def type(self) -> str: # late annotation needed for this\n # Special entry for type based construction system\n return self.__getitem__('type')\n\n @type.setter\n def type(self, type):\n self.__setitem__('type', type)\n\n def to_dict(self):\n out = dict()\n for k, v in self.items():\n if isinstance(v, dotdict):\n v = v.to_dict() # recursion point\n out[k] = v\n return out" }, { "identifier": "load_mesh", "path": "easyvolcap/utils/data_utils.py", "snippet": "def load_mesh(filename: str, device='cuda', load_uv=False, load_aux=False, backend='pytorch3d'):\n from pytorch3d.io import load_ply, load_obj\n if backend == 'trimesh':\n import trimesh\n mesh: trimesh.Trimesh = trimesh.load(filename)\n return mesh.vertices, mesh.faces\n\n vm, fm = None, None\n if filename.endswith('.npz'):\n mesh = np.load(filename)\n v = torch.from_numpy(mesh['verts'])\n f = torch.from_numpy(mesh['faces'])\n\n if load_uv:\n vm = torch.from_numpy(mesh['uvs'])\n fm = torch.from_numpy(mesh['uvfaces'])\n else:\n if filename.endswith('.ply'):\n v, f = load_ply(filename)\n elif filename.endswith('.obj'):\n v, faces_attr, aux = load_obj(filename)\n f = faces_attr.verts_idx\n\n if load_uv:\n vm = aux.verts_uvs\n fm = faces_attr.textures_idx\n else:\n raise NotImplementedError(f'Unrecognized input format for: {filename}')\n\n v = v.to(device, non_blocking=True).contiguous()\n f = f.to(device, non_blocking=True).contiguous()\n\n if load_uv:\n vm = vm.to(device, non_blocking=True).contiguous()\n fm = fm.to(device, non_blocking=True).contiguous()\n\n if load_uv:\n if load_aux:\n return v, f, vm, fm, aux\n else:\n return v, f, vm, fm\n else:\n return v, f" }, { "identifier": "load_dotdict", "path": "easyvolcap/utils/data_utils.py", "snippet": "def load_dotdict(path):\n f = np.load(path)\n f = dotdict({**f})\n return f" }, { "identifier": "to_tensor", "path": "easyvolcap/utils/data_utils.py", "snippet": "def to_tensor(batch, ignore_list: bool = False) -> Union[torch.Tensor, dotdict[str, torch.Tensor]]:\n if isinstance(batch, (tuple, list)) and not ignore_list:\n batch = [to_tensor(b, ignore_list) for b in batch]\n elif isinstance(batch, dict):\n batch = dotdict({k: to_tensor(v, ignore_list) for k, v in batch.items()})\n elif isinstance(batch, torch.Tensor):\n pass\n else: # numpy and others\n batch = torch.as_tensor(batch)\n return batch" }, { "identifier": "load_network", "path": "easyvolcap/utils/net_utils.py", "snippet": "def load_network(\n model: nn.Module,\n model_dir: str = '',\n resume: bool = True, # when resume is False, will try as a fresh restart\n epoch: int = -1,\n strict: bool = True, # report errors if something is wrong\n skips: List[str] = [],\n only: List[str] = [],\n prefix: str = '', # will match and remove these prefix\n allow_mismatch: List[str] = [],\n):\n pretrained, model_path = load_pretrained(model_dir, resume, epoch,\n remove_if_not_resuming=False,\n warn_if_not_exist=False)\n if pretrained is None:\n pretrained, model_path = load_pretrained(model_dir, resume, epoch, '.pth',\n remove_if_not_resuming=False,\n warn_if_not_exist=False)\n if pretrained is None:\n pretrained, model_path = load_pretrained(model_dir, resume, epoch, '.pt',\n remove_if_not_resuming=False,\n warn_if_not_exist=resume)\n if pretrained is None:\n return 0\n\n # log(f'Loading network: {blue(model_path)}')\n # ordered dict cannot be mutated while iterating\n # vanilla dict cannot change size while iterating\n pretrained_model = pretrained['model']\n\n if skips:\n keys = list(pretrained_model.keys())\n for k in keys:\n if root_of_any(k, skips):\n del pretrained_model[k]\n\n if only:\n keys = list(pretrained_model.keys()) # since the dict has been mutated, some keys might not exist\n for k in keys:\n if not root_of_any(k, only):\n del pretrained_model[k]\n\n if prefix:\n keys = list(pretrained_model.keys()) # since the dict has been mutated, some keys might not exist\n for k in keys:\n if k.startswith(prefix):\n pretrained_model[k[len(prefix):]] = pretrained_model[k]\n del pretrained_model[k]\n\n for key in allow_mismatch:\n if key in model.state_dict() and key in pretrained_model and not strict:\n model_parent = model\n pretrained_parent = pretrained_model\n chain = key.split('.')\n for k in chain[:-1]: # except last one\n model_parent = getattr(model_parent, k)\n pretrained_parent = pretrained_parent[k]\n last_name = chain[-1]\n setattr(model_parent, last_name, nn.Parameter(pretrained_parent[last_name], requires_grad=getattr(model_parent, last_name).requires_grad)) # just replace without copying\n\n (model if not isinstance(model, DDP) else model.module).load_state_dict(pretrained_model, strict=strict)\n log(f'Loaded network {blue(model_path)} at epoch {blue(pretrained[\"epoch\"])}')\n return pretrained[\"epoch\"] + 1" }, { "identifier": "GaussianModel", "path": "easyvolcap/utils/gaussian_utils.py", "snippet": "class GaussianModel(nn.Module):\n def __init__(self,\n xyz: torch.Tensor = None,\n colors: torch.Tensor = None,\n init_occ: float = 0.1,\n sh_deg: int = 3,\n scale_min: float = 1e-4,\n scale_max: float = 1e1,\n ):\n super().__init__()\n\n @torch.jit.script\n def scaling_activation(x, scale_min: float = scale_min, scale_max: float = scale_max):\n return torch.sigmoid(x) * (scale_max - scale_min) + scale_min\n\n @torch.jit.script\n def scaling_inverse_activation(x, scale_min: float = scale_min, scale_max: float = scale_max):\n return torch.logit(((x - scale_min) / (scale_max - scale_min)).clamp(1e-5, 1 - 1e-5))\n\n self.setup_functions(scaling_activation=scaling_activation, scaling_inverse_activation=scaling_inverse_activation)\n\n # SH realte configs\n self.active_sh_degree = make_buffer(torch.zeros(1))\n self.max_sh_degree = sh_deg\n\n # Initalize trainable parameters\n self.create_from_pcd(xyz, colors, init_occ)\n\n # Densification related parameters\n self.max_radii2D = make_buffer(torch.zeros(self.get_xyz.shape[0]))\n self.xyz_gradient_accum = make_buffer(torch.zeros((self.get_xyz.shape[0], 1)))\n self.denom = make_buffer(torch.zeros((self.get_xyz.shape[0], 1)))\n\n # Perform some model messaging before loading\n self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook)\n\n def setup_functions(self,\n scaling_activation=torch.exp,\n scaling_inverse_activation=torch.log,\n opacity_activation=torch.sigmoid,\n inverse_opacity_activation=inverse_sigmoid,\n rotation_activation=F.normalize,\n ):\n def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):\n L = build_scaling_rotation(scaling_modifier * scaling, rotation)\n actual_covariance = L @ L.transpose(1, 2)\n symm = strip_symmetric(actual_covariance)\n return symm\n\n self.scaling_activation = getattr(torch, scaling_activation) if isinstance(scaling_activation, str) else scaling_activation\n self.opacity_activation = getattr(torch, opacity_activation) if isinstance(opacity_activation, str) else opacity_activation\n self.rotation_activation = getattr(torch, rotation_activation) if isinstance(rotation_activation, str) else rotation_activation\n\n self.scaling_inverse_activation = getattr(torch, scaling_inverse_activation) if isinstance(scaling_inverse_activation, str) else scaling_inverse_activation\n self.inverse_opacity_activation = getattr(torch, inverse_opacity_activation) if isinstance(inverse_opacity_activation, str) else inverse_opacity_activation\n self.covariance_activation = build_covariance_from_scaling_rotation\n\n @property\n def device(self):\n return self.get_xyz.device\n\n @property\n def get_scaling(self):\n return self.scaling_activation(self._scaling)\n\n @property\n def get_rotation(self):\n return self.rotation_activation(self._rotation)\n\n @property\n def get_xyz(self):\n return self._xyz\n\n @property\n def get_features(self):\n features_dc = self._features_dc\n features_rest = self._features_rest\n return torch.cat((features_dc, features_rest), dim=1)\n\n @property\n def get_opacity(self):\n return self.opacity_activation(self._opacity)\n\n def get_covariance(self, scaling_modifier=1):\n return self.covariance_activation(self.get_scaling, scaling_modifier, self._rotation)\n\n def oneupSHdegree(self):\n if self.active_sh_degree < self.max_sh_degree:\n self.active_sh_degree += 1\n\n def create_from_pcd(self, xyz: torch.Tensor, colors: torch.Tensor, opacity: float = 0.1):\n from simple_knn._C import distCUDA2\n if xyz is None:\n xyz = torch.empty(0, 3, device='cuda') # by default, init empty gaussian model on CUDA\n\n features = torch.zeros((xyz.shape[0], 3, (self.max_sh_degree + 1) ** 2))\n if colors is not None:\n SH = RGB2SH(colors)\n features[:, :3, 0] = SH\n features[:, 3: 1:] = 0\n\n dist2 = torch.clamp_min(distCUDA2(xyz.float().cuda()), 0.0000001)\n scales = self.scaling_inverse_activation(torch.sqrt(dist2))[..., None].repeat(1, 3)\n rots = torch.rand((xyz.shape[0], 4))\n rots[:, 0] = 1\n\n opacities = self.inverse_opacity_activation(opacity * torch.ones((xyz.shape[0], 1), dtype=torch.float))\n\n self._xyz = make_params(xyz)\n self._features_dc = make_params(features[:, :, :1].transpose(1, 2).contiguous())\n self._features_rest = make_params(features[:, :, 1:].transpose(1, 2).contiguous())\n self._scaling = make_params(scales)\n self._rotation = make_params(rots)\n self._opacity = make_params(opacities)\n\n @torch.no_grad()\n def _load_state_dict_pre_hook(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):\n # Supports loading points and features with different shapes\n if prefix is not '' and not prefix.endswith('.'): prefix = prefix + '.' # special care for when we're loading the model directly\n for name, params in self.named_parameters():\n params.data = params.data.new_empty(state_dict[f'{prefix}{name}'].shape)\n\n def reset_opacity(self, optimizer_state):\n for _, val in optimizer_state.items():\n if val.name == '_opacity':\n break\n opacities_new = inverse_sigmoid(torch.min(self.get_opacity, torch.ones_like(self.get_opacity) * 0.01))\n self._opacity.set_(opacities_new.detach())\n self._opacity.grad = None\n val.old_keep = torch.zeros_like(val.old_keep, dtype=torch.bool)\n val.new_keep = torch.zeros_like(val.new_keep, dtype=torch.bool)\n val.new_params = self._opacity\n # optimizable_tensors = self.replace_tensor_to_optimizer(opacities_new, \"opacity\")\n # self._opacity = optimizable_tensors[\"opacity\"]\n\n def replace_tensor_to_optimizer(self, tensor, name):\n optimizable_tensors = {}\n for group in self.optimizer.param_groups:\n if group[\"name\"] == name:\n stored_state = self.optimizer.state.get(group['params'][0], None)\n stored_state[\"exp_avg\"] = torch.zeros_like(tensor)\n stored_state[\"exp_avg_sq\"] = torch.zeros_like(tensor)\n\n del self.optimizer.state[group['params'][0]]\n group[\"params\"][0] = nn.Parameter(tensor.requires_grad_(True))\n self.optimizer.state[group['params'][0]] = stored_state\n\n optimizable_tensors[group[\"name\"]] = group[\"params\"][0]\n return optimizable_tensors\n\n def _prune_optimizer(self, mask: torch.Tensor):\n optimizable_tensors = {}\n for group in self.optimizer.param_groups:\n stored_state = self.optimizer.state.get(group['params'][0], None)\n if stored_state is not None:\n stored_state[\"exp_avg\"] = stored_state[\"exp_avg\"][mask]\n stored_state[\"exp_avg_sq\"] = stored_state[\"exp_avg_sq\"][mask]\n\n del self.optimizer.state[group['params'][0]]\n group[\"params\"][0] = nn.Parameter((group[\"params\"][0][mask].requires_grad_(True)))\n self.optimizer.state[group['params'][0]] = stored_state\n\n optimizable_tensors[group[\"name\"]] = group[\"params\"][0]\n else:\n group[\"params\"][0] = nn.Parameter(group[\"params\"][0][mask].requires_grad_(True))\n optimizable_tensors[group[\"name\"]] = group[\"params\"][0]\n return optimizable_tensors\n\n def prune_points(self, mask):\n valid_points_mask = ~mask\n # optimizable_tensors = self._prune_optimizer(valid_points_mask)\n\n # self._xyz = optimizable_tensors[\"xyz\"]\n # self._features_dc = optimizable_tensors[\"f_dc\"]\n # self._features_rest = optimizable_tensors[\"f_rest\"]\n # self._opacity = optimizable_tensors[\"opacity\"]\n # self._scaling = optimizable_tensors[\"scaling\"]\n # self._rotation = optimizable_tensors[\"rotation\"]\n\n self._xyz.set_(self._xyz[valid_points_mask].detach())\n self._xyz.grad = None\n self._features_dc.set_(self._features_dc[valid_points_mask].detach())\n self._features_dc.grad = None\n self._features_rest.set_(self._features_rest[valid_points_mask].detach())\n self._features_rest.grad = None\n self._opacity.set_(self._opacity[valid_points_mask].detach())\n self._opacity.grad = None\n self._scaling.set_(self._scaling[valid_points_mask].detach())\n self._scaling.grad = None\n self._rotation.set_(self._rotation[valid_points_mask].detach())\n self._rotation.grad = None\n\n self.xyz_gradient_accum.set_(self.xyz_gradient_accum[valid_points_mask])\n self.xyz_gradient_accum.grad = None\n self.denom.set_(self.denom[valid_points_mask])\n self.denom.grad = None\n self.max_radii2D.set_(self.max_radii2D[valid_points_mask])\n self.max_radii2D.grad = None\n\n def cat_tensors_to_optimizer(self, tensors_dict):\n optimizable_tensors = {}\n for group in self.optimizer.param_groups:\n assert len(group[\"params\"]) == 1\n extension_tensor = tensors_dict[group[\"name\"]]\n stored_state = self.optimizer.state.get(group['params'][0], None)\n if stored_state is not None:\n\n stored_state[\"exp_avg\"] = torch.cat((stored_state[\"exp_avg\"], torch.zeros_like(extension_tensor)), dim=0)\n stored_state[\"exp_avg_sq\"] = torch.cat((stored_state[\"exp_avg_sq\"], torch.zeros_like(extension_tensor)), dim=0)\n\n del self.optimizer.state[group['params'][0]]\n group[\"params\"][0] = nn.Parameter(torch.cat((group[\"params\"][0], extension_tensor), dim=0).requires_grad_(True))\n self.optimizer.state[group['params'][0]] = stored_state\n\n optimizable_tensors[group[\"name\"]] = group[\"params\"][0]\n else:\n group[\"params\"][0] = nn.Parameter(torch.cat((group[\"params\"][0], extension_tensor), dim=0).requires_grad_(True))\n optimizable_tensors[group[\"name\"]] = group[\"params\"][0]\n\n return optimizable_tensors\n\n def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation, optimizer_state):\n d = dotdict({\n \"_xyz\": new_xyz,\n \"_features_dc\": new_features_dc,\n \"_features_rest\": new_features_rest,\n \"_opacity\": new_opacities,\n \"_scaling\": new_scaling,\n \"_rotation\": new_rotation,\n })\n\n # optimizable_tensors = self.cat_tensors_to_optimizer(d)\n # self._xyz = optimizable_tensors[\"xyz\"]\n # self._features_dc = optimizable_tensors[\"f_dc\"]\n # self._features_rest = optimizable_tensors[\"f_rest\"]\n # self._opacity = optimizable_tensors[\"opacity\"]\n # self._scaling = optimizable_tensors[\"scaling\"]\n # self._rotation = optimizable_tensors[\"rotation\"]\n\n for name, new_params in d.items():\n params: nn.Parameter = getattr(self, name)\n params.set_(torch.cat((params.data, new_params), dim=0).detach())\n params.grad = None\n\n device = self.get_xyz.device\n self.xyz_gradient_accum.set_(torch.zeros((self.get_xyz.shape[0], 1), device=device))\n self.xyz_gradient_accum.grad = None\n self.denom.set_(torch.zeros((self.get_xyz.shape[0], 1), device=device))\n self.denom.grad = None\n self.max_radii2D.set_(torch.zeros((self.get_xyz.shape[0]), device=device))\n self.max_radii2D.grad = None\n\n for val in optimizer_state.values():\n name = val.name\n val.new_keep = torch.cat((val.new_keep, torch.zeros_like(d[name], dtype=torch.bool, requires_grad=False)), dim=0)\n val.new_params = getattr(self, name)\n assert val.new_keep.shape == val.new_params.shape\n\n def densify_and_split(self, grads, grad_threshold, scene_extent, percent_dense, min_opacity, max_screen_size, optimizer_state, N=2):\n n_init_points = self.get_xyz.shape[0]\n device = self.get_xyz.device\n # Extract points that satisfy the gradient condition\n padded_grad = torch.zeros((n_init_points), device=device)\n padded_grad[:grads.shape[0]] = grads.squeeze()\n selected_pts_mask = padded_grad >= grad_threshold\n selected_pts_mask = torch.logical_and(selected_pts_mask,\n torch.max(self.get_scaling, dim=1).values > percent_dense * scene_extent)\n\n stds = self.get_scaling[selected_pts_mask].repeat(N, 1)\n means = torch.zeros((stds.size(0), 3), device=device)\n samples = torch.normal(mean=means, std=stds)\n rots = build_rotation(self._rotation[selected_pts_mask]).repeat(N, 1, 1)\n new_xyz = torch.bmm(rots, samples.unsqueeze(-1)).squeeze(-1) + self.get_xyz[selected_pts_mask].repeat(N, 1)\n new_scaling = self.scaling_inverse_activation(self.get_scaling[selected_pts_mask].repeat(N, 1) / (0.8 * N))\n new_rotation = self._rotation[selected_pts_mask].repeat(N, 1)\n new_features_dc = self._features_dc[selected_pts_mask].repeat(N, 1, 1)\n new_features_rest = self._features_rest[selected_pts_mask].repeat(N, 1, 1)\n new_opacity = self._opacity[selected_pts_mask].repeat(N, 1)\n\n self.densification_postfix(new_xyz, new_features_dc, new_features_rest, new_opacity, new_scaling, new_rotation, optimizer_state)\n\n prune_filter = torch.cat((selected_pts_mask, torch.zeros(N * selected_pts_mask.sum(), device=device, dtype=bool)))\n self.prune_points(prune_filter)\n old_keep_mask = ~prune_filter[:grads.shape[0]]\n for val in optimizer_state.values():\n name = val.name\n val.old_keep[~old_keep_mask] = False\n val.new_keep = val.new_keep[~prune_filter]\n val.params = getattr(self, name)\n assert val.old_keep.sum() == val.new_keep.sum()\n assert val.new_keep.shape == val.new_params.shape\n\n prune_mask = (self.get_opacity < min_opacity).squeeze()\n if max_screen_size:\n big_points_vs = self.max_radii2D > max_screen_size\n big_points_ws = self.get_scaling.max(dim=1).values > 0.1 * scene_extent\n prune_mask = torch.logical_or(torch.logical_or(prune_mask, big_points_vs), big_points_ws)\n self.prune_points(prune_mask)\n _old_keep_mask = old_keep_mask.clone()\n mask_mask = old_keep_mask[old_keep_mask]\n _mask = prune_mask[:mask_mask.shape[0]]\n mask_mask[_mask] = False\n old_keep_mask[_old_keep_mask] = mask_mask\n for val in optimizer_state.values():\n name = val.name\n val.old_keep[~old_keep_mask] = False\n val.new_keep = val.new_keep[~prune_mask]\n val.params = getattr(self, name)\n assert val.old_keep.sum() == val.new_keep.sum()\n assert val.new_keep.shape == val.new_params.shape\n\n def densify_and_clone(self, grads, grad_threshold, scene_extent, percent_dense, optimizer_state):\n # Extract points that satisfy the gradient condition\n selected_pts_mask = torch.norm(grads, dim=-1) >= grad_threshold\n selected_pts_mask = torch.logical_and(selected_pts_mask,\n torch.max(self.get_scaling, dim=1).values <= percent_dense * scene_extent)\n\n new_xyz = self._xyz[selected_pts_mask]\n new_features_dc = self._features_dc[selected_pts_mask]\n new_features_rest = self._features_rest[selected_pts_mask]\n new_opacities = self._opacity[selected_pts_mask]\n new_scaling = self._scaling[selected_pts_mask]\n new_rotation = self._rotation[selected_pts_mask]\n\n self.densification_postfix(new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation, optimizer_state)\n\n def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size, percent_dense, optimizer_state):\n\n grads = self.xyz_gradient_accum / self.denom\n grads[grads.isnan()] = 0.0\n\n self.densify_and_clone(grads, max_grad, extent, percent_dense, optimizer_state)\n self.densify_and_split(grads, max_grad, extent, percent_dense, min_opacity, max_screen_size, optimizer_state)\n\n torch.cuda.empty_cache()\n\n def add_densification_stats(self, viewspace_point_tensor, update_filter):\n self.xyz_gradient_accum[update_filter] += torch.norm(viewspace_point_tensor.grad[update_filter, :2], dim=-1, keepdim=True)\n self.denom[update_filter] += 1\n\n def construct_list_of_attributes(self):\n l = ['x', 'y', 'z', 'nx', 'ny', 'nz']\n # All channels except the 3 DC\n for i in range(self._features_dc.shape[1] * self._features_dc.shape[2]):\n l.append('f_dc_{}'.format(i))\n for i in range(self._features_rest.shape[1] * self._features_rest.shape[2]):\n l.append('f_rest_{}'.format(i))\n l.append('opacity')\n for i in range(self._scaling.shape[1]):\n l.append('scale_{}'.format(i))\n for i in range(self._rotation.shape[1]):\n l.append('rot_{}'.format(i))\n return l\n\n def save_ply(self, path):\n import os\n from plyfile import PlyData, PlyElement\n dirname = os.path.dirname(path)\n os.makedirs(dirname, exist_ok=True)\n\n xyz = self._xyz.detach().cpu().numpy()\n normals = np.zeros_like(xyz)\n f_dc = self._features_dc.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy()\n f_rest = self._features_rest.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy()\n opacities = self._opacity.detach().cpu().numpy()\n scale = self._scaling.detach().cpu().numpy()\n rotation = self._rotation.detach().cpu().numpy()\n\n dtype_full = [(attribute, 'f4') for attribute in self.construct_list_of_attributes()]\n\n elements = np.empty(xyz.shape[0], dtype=dtype_full)\n attributes = np.concatenate((xyz, normals, f_dc, f_rest, opacities, scale, rotation), axis=1)\n elements[:] = list(map(tuple, attributes))\n el = PlyElement.describe(elements, 'vertex')\n PlyData([el]).write(path)\n\n def render(self, batch: dotdict):\n # TODO: Make rendering function easier to read, now there're at least 3 types of gaussian rendering function\n from diff_gauss import GaussianRasterizationSettings, GaussianRasterizer\n\n # Prepare renderable parameters, without batch\n xyz = self.get_xyz\n scale3 = self.get_scaling\n rot4 = self.get_rotation\n occ = self.get_opacity\n sh = self.get_features\n\n # Prepare the camera transformation for Gaussian\n gaussian_camera = to_x(prepare_gaussian_camera(add_batch(batch)), torch.float)\n\n # Prepare rasterization settings for gaussian\n raster_settings = GaussianRasterizationSettings(\n image_height=gaussian_camera.image_height,\n image_width=gaussian_camera.image_width,\n tanfovx=gaussian_camera.tanfovx,\n tanfovy=gaussian_camera.tanfovy,\n bg=torch.full([3], 0.0, device=xyz.device), # GPU # TODO: make these configurable\n scale_modifier=1.0, # TODO: make these configurable\n viewmatrix=gaussian_camera.world_view_transform,\n projmatrix=gaussian_camera.full_proj_transform,\n sh_degree=self.active_sh_degree,\n campos=gaussian_camera.camera_center,\n prefiltered=False,\n debug=False,\n )\n\n # Rasterize visible Gaussians to image, obtain their radii (on screen).\n scr = torch.zeros_like(xyz, requires_grad=True) + 0 # gradient magic\n if scr.requires_grad: scr.retain_grad()\n rasterizer = GaussianRasterizer(raster_settings=raster_settings)\n rendered_image, rendered_depth, rendered_alpha, radii = typed(torch.float, torch.float)(rasterizer)(\n means3D=xyz,\n means2D=scr,\n shs=sh,\n colors_precomp=None,\n opacities=occ,\n scales=scale3,\n rotations=rot4,\n cov3D_precomp=None,\n )\n\n # No batch dimension\n rgb = rendered_image.permute(1, 2, 0)\n acc = rendered_alpha.permute(1, 2, 0)\n dpt = rendered_depth.permute(1, 2, 0)\n\n return rgb, acc, dpt # H, W, C" }, { "identifier": "load_bodymodel", "path": "easyvolcap/utils/easy_utils.py", "snippet": "def load_bodymodel(data_root: str, bodymodel_file: str, device='cpu'):\n # Import easymocap here since it's not a hard dependency, only if you want to use SMPL as prior\n from easymocap.config.baseconfig import load_object, Config\n\n if False: # easymocap has been updated\n # Load config and convert the relative paths to absolute paths\n cfg_model = Config.load(join(data_root, bodymodel_file)) # whatever for now\n cfg_model.module = cfg_model.module.replace('SMPLHModelEmbedding', 'SMPLHModel')\n cfg_model.module = cfg_model.module.replace('SMPLLayerEmbedding', 'SMPLModel')\n\n # Cannot use relative path since easymocap is somewhere else and can be different on different machines\n easymocap_path = pkg_resources.get_distribution(\"easymocap\").location\n for key, value in cfg_model.args.items(): cfg_model.args[key] = join(easymocap_path, value) if 'path' in key else value\n\n # Set device to cpu\n cfg_model.args.device = 'cpu'\n\n # Load actual body model\n bodymodel = load_object(cfg_model.module, cfg_model.args)\n\n if bodymodel_file:\n cfg_exp = Config.load(join(data_root, bodymodel_file))\n else:\n cfg_exp = Config.load(data_root)\n\n cfg_model = cfg_exp.args.at_final.load_body_model\n easymocap_path = pkg_resources.get_distribution(\"easymocap\").location\n cfg_model.args.model_path = join(easymocap_path, cfg_model.args.model_path)\n cfg_model.args.regressor_path = join(easymocap_path, cfg_model.args.regressor_path)\n cfg_model.args.device = device\n\n body_loader = load_object(cfg_model.module, cfg_model.args)\n bodymodel = body_loader.smplmodel\n\n return bodymodel" }, { "identifier": "world_points_to_pose_points", "path": "easyvolcap/utils/blend_utils.py", "snippet": "def world_points_to_pose_points(wpts, R, Th):\n \"\"\"\n wpts: n_batch, n_points, 3\n R: n_batch, 3, 3\n Th: n_batch, 1, 3\n \"\"\"\n if Th.ndim == 2:\n Th = Th[..., None, :] # add fake point dimension\n pts = torch.matmul(wpts - Th, R)\n return pts" }, { "identifier": "pose_points_to_world_points", "path": "easyvolcap/utils/blend_utils.py", "snippet": "def pose_points_to_world_points(ppts, R, Th):\n \"\"\"\n ppts: n_batch, n_points, 3\n R: n_batch, 3, 3\n Th: n_batch, 1, 3\n \"\"\"\n if Th.ndim == 2:\n Th = Th[..., None, :] # add fake point dimension\n pts = torch.matmul(ppts, R.transpose(1, 2)) + Th\n return pts" }, { "identifier": "sample_blend_K_closest_points", "path": "easyvolcap/utils/sample_utils.py", "snippet": "def sample_blend_K_closest_points(src: torch.Tensor, ref: torch.Tensor, values: torch.Tensor = None, K: int = 4, eps: float = 1e-8) -> Tuple[torch.Tensor, torch.Tensor]:\n # not so useful to aggregate all K points\n n_batch, n_points, _ = src.shape\n ret = cast_knn_points(src, ref, K=K)\n dists, vert_ids = ret.dists, ret.idx # (n_batch, n_points, K)\n # sampled = values[vert_ids] # (n_batch, n_points, K, D)\n weights = 1 / (dists + eps)\n weights /= weights.sum(dim=-1, keepdim=True)\n dists = torch.einsum('ijk,ijk->ij', dists, weights)\n if values is None:\n return dists.view(n_batch, n_points, 1)\n # sampled *= weights[..., None] # augment weight in last dim for bones # written separatedly to avoid OOM\n # sampled = sampled.sum(dim=-2) # sum over second to last for weighted bw\n values = values.view(-1, values.shape[-1]) # (n, D)\n sampled = torch.einsum('ijkl,ijk->ijl', values[vert_ids], weights)\n return sampled.view(n_batch, n_points, -1), dists.view(n_batch, n_points, 1)" } ]
import os import argparse import torch import numpy as np import torch.nn.functional as F import sys from glob import glob from os.path import join from tqdm import tqdm from easymocap.bodymodel.smpl import SMPLModel from easymocap.bodymodel.lbs import batch_rodrigues from easyvolcap.utils.sh_utils import * from easyvolcap.utils.console_utils import * from easyvolcap.utils.base_utils import dotdict from easyvolcap.utils.data_utils import load_mesh, load_dotdict, to_tensor from easyvolcap.utils.net_utils import load_network from easyvolcap.utils.gaussian_utils import GaussianModel from easyvolcap.utils.easy_utils import load_bodymodel from easyvolcap.utils.blend_utils import world_points_to_pose_points, pose_points_to_world_points from easyvolcap.utils.sample_utils import sample_blend_K_closest_points from pytorch3d.structures import Meshes, Pointclouds from pytorch3d.transforms import matrix_to_quaternion, quaternion_to_matrix from pytorch3d.ops import knn_points from easyvolcap.scripts.main import test # will do everything a normal user would do from easyvolcap.engine import cfg
9,589
def get_transform(weights, A, eps=torch.finfo(torch.float32).eps, inverse=False): """ weights: B, N, J A: B, J, D, D """ T = torch.einsum('bpn,bnij->bpij', weights, A) dim = T.shape[-1] if inverse: T = (T + eps * torch.eye(dim, device=T.device, dtype=T.dtype)[None, None]).inverse() return T def transform(xyz, T): xyz = F.pad(xyz, (0, 1), value=1.0) xyz = torch.einsum("bpij,bpj->bpi", T, xyz)[..., :3] return xyz def load_pcd(path, sh_deg, smpl, prefix='sampler.pcds.0.', freeze=True, norm_with_smpl=True): pcd = GaussianModel(torch.rand(1, 3), None, 0.1, sh_deg) load_network(pcd, path, prefix=prefix) if norm_with_smpl: Rh = smpl['Rh'] Th = smpl['Th'] pcd._xyz.data = world_points_to_pose_points(pcd._xyz[None], Rh, Th)[0] pcd._xyz.grad = None R = quaternion_to_matrix(pcd._rotation) R = Rh[0].mT @ R pcd._rotation.data = matrix_to_quaternion(R) pcd._rotation.grad = None if freeze: for params in pcd.parameters(): params.requires_grad = False assert pcd.active_sh_degree.item() == sh_deg return pcd def load_smpl(path): smpl = to_tensor(load_dotdict(path)) smpl = dotdict({ 'shapes': smpl.shapes[:1], 'poses': smpl.poses[:1], 'Rh': batch_rodrigues(smpl.Rh[:1]), 'Th': smpl.Th[:1], }) return smpl def compute_lbs(pcd: GaussianModel, smpl: dotdict, bodymodel: SMPLModel, K=4): xyz = pcd.get_xyz smpl_verts = bodymodel(shapes=smpl['shapes'], poses=smpl['poses'])
def get_transform(weights, A, eps=torch.finfo(torch.float32).eps, inverse=False): """ weights: B, N, J A: B, J, D, D """ T = torch.einsum('bpn,bnij->bpij', weights, A) dim = T.shape[-1] if inverse: T = (T + eps * torch.eye(dim, device=T.device, dtype=T.dtype)[None, None]).inverse() return T def transform(xyz, T): xyz = F.pad(xyz, (0, 1), value=1.0) xyz = torch.einsum("bpij,bpj->bpi", T, xyz)[..., :3] return xyz def load_pcd(path, sh_deg, smpl, prefix='sampler.pcds.0.', freeze=True, norm_with_smpl=True): pcd = GaussianModel(torch.rand(1, 3), None, 0.1, sh_deg) load_network(pcd, path, prefix=prefix) if norm_with_smpl: Rh = smpl['Rh'] Th = smpl['Th'] pcd._xyz.data = world_points_to_pose_points(pcd._xyz[None], Rh, Th)[0] pcd._xyz.grad = None R = quaternion_to_matrix(pcd._rotation) R = Rh[0].mT @ R pcd._rotation.data = matrix_to_quaternion(R) pcd._rotation.grad = None if freeze: for params in pcd.parameters(): params.requires_grad = False assert pcd.active_sh_degree.item() == sh_deg return pcd def load_smpl(path): smpl = to_tensor(load_dotdict(path)) smpl = dotdict({ 'shapes': smpl.shapes[:1], 'poses': smpl.poses[:1], 'Rh': batch_rodrigues(smpl.Rh[:1]), 'Th': smpl.Th[:1], }) return smpl def compute_lbs(pcd: GaussianModel, smpl: dotdict, bodymodel: SMPLModel, K=4): xyz = pcd.get_xyz smpl_verts = bodymodel(shapes=smpl['shapes'], poses=smpl['poses'])
weights, dists = sample_blend_K_closest_points(xyz[None], smpl_verts, bodymodel.weights[None], K=K)
9
2023-10-17 04:48:46+00:00
12k
codefuse-ai/Test-Agent
chat/model/model_adapter.py
[ { "identifier": "CPU_ISA", "path": "chat/constants.py", "snippet": "CPU_ISA = os.getenv(\"CPU_ISA\")" }, { "identifier": "GptqConfig", "path": "chat/modules/gptq.py", "snippet": "class GptqConfig:\n ckpt: str = field(\n default=None,\n metadata={\n \"help\": \"Load quantized model. The path to the local GPTQ checkpoint.\"\n },\n )\n wbits: int = field(default=16, metadata={\"help\": \"#bits to use for quantization\"})\n groupsize: int = field(\n default=-1,\n metadata={\"help\": \"Groupsize to use for quantization; default uses full row.\"},\n )\n act_order: bool = field(\n default=True,\n metadata={\"help\": \"Whether to apply the activation order GPTQ heuristic\"},\n )" }, { "identifier": "load_gptq_quantized", "path": "chat/modules/gptq.py", "snippet": "def load_gptq_quantized(model_name, gptq_config: GptqConfig):\n print(\"Loading GPTQ quantized model...\")\n\n try:\n script_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n module_path = os.path.join(script_path, \"../repositories/GPTQ-for-LLaMa\")\n\n sys.path.insert(0, module_path)\n from llama import load_quant\n except ImportError as e:\n print(f\"Error: Failed to load GPTQ-for-LLaMa. {e}\")\n print(\"See https://github.com/lm-sys/FastChat/blob/main/docs/gptq.md\")\n sys.exit(-1)\n\n tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)\n # only `fastest-inference-4bit` branch cares about `act_order`\n if gptq_config.act_order:\n model = load_quant(\n model_name,\n find_gptq_ckpt(gptq_config),\n gptq_config.wbits,\n gptq_config.groupsize,\n act_order=gptq_config.act_order,\n )\n else:\n # other branches\n model = load_quant(\n model_name,\n find_gptq_ckpt(gptq_config),\n gptq_config.wbits,\n gptq_config.groupsize,\n )\n\n return model, tokenizer" }, { "identifier": "AWQConfig", "path": "chat/modules/awq.py", "snippet": "class AWQConfig:\n ckpt: str = field(\n default=None,\n metadata={\n \"help\": \"Load quantized model. The path to the local AWQ checkpoint.\"\n },\n )\n wbits: int = field(default=16, metadata={\"help\": \"#bits to use for quantization\"})\n groupsize: int = field(\n default=-1,\n metadata={\"help\": \"Groupsize to use for quantization; default uses full row.\"},\n )" }, { "identifier": "load_awq_quantized", "path": "chat/modules/awq.py", "snippet": "def load_awq_quantized(model_name, awq_config: AWQConfig, device):\n print(\"Loading AWQ quantized model...\")\n\n try:\n from tinychat.utils import load_quant\n from tinychat.modules import make_quant_norm, make_quant_attn, make_fused_mlp\n except ImportError as e:\n print(f\"Error: Failed to import tinychat. {e}\")\n print(\"Please double check if you have successfully installed AWQ\")\n print(\"See https://github.com/lm-sys/FastChat/blob/main/docs/awq.md\")\n sys.exit(-1)\n\n config = AutoConfig.from_pretrained(model_name, trust_remote_code=True)\n tokenizer = AutoTokenizer.from_pretrained(\n model_name, use_fast=False, trust_remote_code=True\n )\n\n def skip(*args, **kwargs):\n pass\n\n torch.nn.init.kaiming_uniform_ = skip\n torch.nn.init.kaiming_normal_ = skip\n torch.nn.init.uniform_ = skip\n torch.nn.init.normal_ = skip\n modeling_utils._init_weights = False\n\n torch.set_default_dtype(torch.half)\n model = AutoModelForCausalLM.from_config(config, trust_remote_code=True)\n\n if any(name in find_awq_ckpt(awq_config) for name in [\"llama\", \"vicuna\"]):\n model = load_quant.load_awq_llama_fast(\n model,\n find_awq_ckpt(awq_config),\n awq_config.wbits,\n awq_config.groupsize,\n device,\n )\n make_quant_attn(model, device)\n make_quant_norm(model)\n make_fused_mlp(model)\n else:\n model = load_quant.load_awq_model(\n model,\n find_awq_ckpt(awq_config),\n awq_config.wbits,\n awq_config.groupsize,\n device,\n )\n return model, tokenizer" }, { "identifier": "Conversation", "path": "chat/conversation.py", "snippet": "class Conversation:\n \"\"\"A class that manages prompt templates and keeps all conversation history.\"\"\"\n\n # The name of this template\n name: str\n # The template of the system prompt\n system_template: str = \"{system_message}\"\n # The system message\n system_message: str = \"\"\n # The names of two roles\n roles: List[str] = ((\"USER\", \"ASSISTANT\"),)\n # All messages. Each item is (role, message).\n messages: List[List[str]] = ()\n # The number of few shot examples\n offset: int = 0\n # The separator style and configurations\n sep_style: SeparatorStyle = SeparatorStyle.ADD_COLON_SINGLE\n sep: str = \"\\n\"\n sep2: str = None\n # Stop criteria (the default one is EOS token)\n stop_str: Union[str, List[str]] = None\n # Stops generation if meeting any token in this list\n stop_token_ids: List[int] = None\n\n def get_prompt(self) -> str:\n \"\"\"Get the prompt for generation.\"\"\"\n system_prompt = self.system_template.format(system_message=self.system_message)\n if self.sep_style == SeparatorStyle.ADD_COLON_SINGLE:\n ret = system_prompt + self.sep\n for role, message in self.messages:\n if message:\n ret += role + \": \" + message + self.sep\n else:\n ret += role + \":\"\n return ret\n elif self.sep_style == SeparatorStyle.ADD_COLON_TWO:\n seps = [self.sep, self.sep2]\n ret = system_prompt + seps[0]\n for i, (role, message) in enumerate(self.messages):\n if message:\n ret += role + \": \" + message + seps[i % 2]\n else:\n ret += role + \":\"\n return ret\n elif self.sep_style == SeparatorStyle.ADD_COLON_SPACE_SINGLE:\n ret = system_prompt + self.sep\n for role, message in self.messages:\n if message:\n ret += role + \": \" + message + self.sep\n else:\n ret += role + \": \" # must be end with a space\n return ret\n elif self.sep_style == SeparatorStyle.ADD_NEW_LINE_SINGLE:\n ret = \"\" if system_prompt == \"\" else system_prompt + self.sep\n for role, message in self.messages:\n if message:\n ret += role + \"\\n\" + message + self.sep\n else:\n ret += role + \"\\n\"\n return ret\n elif self.sep_style == SeparatorStyle.NO_COLON_SINGLE:\n ret = system_prompt\n for role, message in self.messages:\n if message:\n ret += role + message + self.sep\n else:\n ret += role\n return ret\n elif self.sep_style == SeparatorStyle.NO_COLON_TWO:\n seps = [self.sep, self.sep2]\n ret = system_prompt\n for i, (role, message) in enumerate(self.messages):\n if message:\n ret += role + message + seps[i % 2]\n else:\n ret += role\n return ret\n elif self.sep_style == SeparatorStyle.RWKV:\n ret = system_prompt\n for i, (role, message) in enumerate(self.messages):\n if message:\n ret += (\n role\n + \": \"\n + message.replace(\"\\r\\n\", \"\\n\").replace(\"\\n\\n\", \"\\n\")\n )\n ret += \"\\n\\n\"\n else:\n ret += role + \":\"\n return ret\n elif self.sep_style == SeparatorStyle.LLAMA2:\n seps = [self.sep, self.sep2]\n if self.system_message:\n ret = system_prompt\n else:\n ret = \"[INST] \"\n for i, (role, message) in enumerate(self.messages):\n if message:\n if i == 0:\n ret += message + \" \"\n else:\n ret += role + \" \" + message + seps[i % 2]\n else:\n ret += role\n return ret\n\n elif self.sep_style == SeparatorStyle.LLAMA2_TESTGPT:\n seps = [self.sep, self.sep2]\n if self.system_message:\n ret = system_prompt\n else:\n ret = \"<s>human\\n \"\n for i, (role, message) in enumerate(self.messages):\n if message:\n if i == 0:\n ret += message + \" \"\n else:\n ret += role + \" \" + message + seps[i % 2]\n else:\n ret += role\n return ret\n elif self.sep_style == SeparatorStyle.CHATGLM:\n # source: https://huggingface.co/THUDM/chatglm-6b/blob/1d240ba371910e9282298d4592532d7f0f3e9f3e/modeling_chatglm.py#L1302-L1308\n # source2: https://huggingface.co/THUDM/chatglm2-6b/blob/e186c891cf64310ac66ef10a87e6635fa6c2a579/modeling_chatglm.py#L926\n round_add_n = 1 if self.name == \"chatglm2\" else 0\n if system_prompt:\n ret = system_prompt + self.sep\n else:\n ret = \"\"\n\n for i, (role, message) in enumerate(self.messages):\n if i % 2 == 0:\n ret += f\"[Round {i//2 + round_add_n}]{self.sep}\"\n\n if message:\n ret += f\"{role}:{message}{self.sep}\"\n else:\n ret += f\"{role}:\"\n return ret\n elif self.sep_style == SeparatorStyle.CHATML:\n ret = \"\" if system_prompt == \"\" else system_prompt + self.sep + \"\\n\"\n for role, message in self.messages:\n if message:\n ret += role + \"\\n\" + message + self.sep + \"\\n\"\n else:\n ret += role + \"\\n\"\n return ret\n elif self.sep_style == SeparatorStyle.CHATINTERN:\n # source: https://huggingface.co/internlm/internlm-chat-7b-8k/blob/bd546fa984b4b0b86958f56bf37f94aa75ab8831/modeling_internlm.py#L771\n seps = [self.sep, self.sep2]\n ret = system_prompt\n for i, (role, message) in enumerate(self.messages):\n if i % 2 == 0:\n ret += \"<s>\"\n if message:\n ret += role + \":\" + message + seps[i % 2] + \"\\n\"\n else:\n ret += role + \":\"\n return ret\n elif self.sep_style == SeparatorStyle.DOLLY:\n seps = [self.sep, self.sep2]\n ret = system_prompt\n for i, (role, message) in enumerate(self.messages):\n if message:\n ret += role + \":\\n\" + message + seps[i % 2]\n if i % 2 == 1:\n ret += \"\\n\\n\"\n else:\n ret += role + \":\\n\"\n return ret\n elif self.sep_style == SeparatorStyle.PHOENIX:\n ret = system_prompt\n for role, message in self.messages:\n if message:\n ret += role + \": \" + \"<s>\" + message + \"</s>\"\n else:\n ret += role + \": \" + \"<s>\"\n return ret\n elif self.sep_style == SeparatorStyle.ROBIN:\n ret = system_prompt + self.sep\n for role, message in self.messages:\n if message:\n ret += role + \":\\n\" + message + self.sep\n else:\n ret += role + \":\\n\"\n return ret\n else:\n raise ValueError(f\"Invalid style: {self.sep_style}\")\n\n def set_system_message(self, system_message: str):\n \"\"\"Set the system message.\"\"\"\n self.system_message = system_message\n\n def append_message(self, role: str, message: str):\n \"\"\"Append a new message.\"\"\"\n self.messages.append([role, message])\n\n def update_last_message(self, message: str):\n \"\"\"Update the last output.\n\n The last message is typically set to be None when constructing the prompt,\n so we need to update it in-place after getting the response from a model.\n \"\"\"\n self.messages[-1][1] = message\n\n def to_gradio_chatbot(self):\n \"\"\"Convert the conversation to gradio chatbot format.\"\"\"\n ret = []\n for i, (role, msg) in enumerate(self.messages[self.offset :]):\n if i % 2 == 0:\n ret.append([msg, None])\n else:\n ret[-1][-1] = msg\n return ret\n\n def to_openai_api_messages(self):\n \"\"\"Convert the conversation to OpenAI chat completion format.\"\"\"\n ret = [{\"role\": \"system\", \"content\": self.system_message}]\n\n for i, (_, msg) in enumerate(self.messages[self.offset :]):\n if i % 2 == 0:\n ret.append({\"role\": \"user\", \"content\": msg})\n else:\n if msg is not None:\n ret.append({\"role\": \"assistant\", \"content\": msg})\n return ret\n\n def copy(self):\n return Conversation(\n name=self.name,\n system_template=self.system_template,\n system_message=self.system_message,\n roles=self.roles,\n messages=[[x, y] for x, y in self.messages],\n offset=self.offset,\n sep_style=self.sep_style,\n sep=self.sep,\n sep2=self.sep2,\n stop_str=self.stop_str,\n stop_token_ids=self.stop_token_ids,\n )\n\n def dict(self):\n return {\n \"template_name\": self.name,\n \"system_message\": self.system_message,\n \"roles\": self.roles,\n \"messages\": self.messages,\n \"offset\": self.offset,\n }" }, { "identifier": "get_conv_template", "path": "chat/conversation.py", "snippet": "def get_conv_template(name: str) -> Conversation:\n \"\"\"Get a conversation template.\"\"\"\n return conv_templates[name].copy()" }, { "identifier": "load_compress_model", "path": "chat/model/compression.py", "snippet": "def load_compress_model(model_path, device, torch_dtype, use_fast, revision=\"main\"):\n # partially load model\n # `use_fast=True`` is not supported for some models.\n try:\n tokenizer = AutoTokenizer.from_pretrained(\n model_path, use_fast=use_fast, revision=revision, trust_remote_code=True\n )\n except TypeError:\n tokenizer = AutoTokenizer.from_pretrained(\n model_path, use_fast=~use_fast, revision=revision, trust_remote_code=True\n )\n with init_empty_weights():\n # `trust_remote_code` should be set as `True` for both AutoConfig and AutoModel\n config = AutoConfig.from_pretrained(\n model_path,\n low_cpu_mem_usage=True,\n torch_dtype=torch_dtype,\n trust_remote_code=True,\n revision=revision,\n )\n # some models are loaded by AutoModel but not AutoModelForCausalLM,\n # such as chatglm, chatglm2\n try:\n model = AutoModelForCausalLM.from_config(config, trust_remote_code=True)\n except NameError:\n model = AutoModel.from_config(config, trust_remote_code=True)\n linear_weights = get_compressed_list(model)\n if os.path.exists(model_path):\n # `model_path` is a local folder\n base_pattern = os.path.join(model_path, \"pytorch_model*.bin\")\n else:\n # `model_path` is a cached Hugging Face repo\n # We don't necessarily need to download the model' repo again if there is a cache.\n # So check the default huggingface cache first.\n model_path_temp = os.path.join(\n os.getenv(\"HOME\"),\n \".cache/huggingface/hub\",\n \"models--\" + model_path.replace(\"/\", \"--\"),\n \"snapshots/\",\n )\n downloaded = False\n if os.path.exists(model_path_temp):\n temp_last_dir = os.listdir(model_path_temp)[-1]\n model_path_temp = os.path.join(model_path_temp, temp_last_dir)\n base_pattern = os.path.join(model_path_temp, \"pytorch_model*.bin\")\n files = glob.glob(base_pattern)\n if len(files) > 0:\n downloaded = True\n\n if downloaded:\n model_path = model_path_temp\n else:\n model_path = snapshot_download(model_path, revision=revision)\n base_pattern = os.path.join(model_path, \"pytorch_model*.bin\")\n\n files = glob.glob(base_pattern)\n if len(files) == 0:\n raise ValueError(\n f\"Cannot find any model weight files. \"\n f\"Please check your (cached) weight path: {model_path}\"\n )\n\n compressed_state_dict = {}\n for filename in tqdm(files):\n tmp_state_dict = torch.load(filename, map_location=lambda storage, loc: storage)\n for name in tmp_state_dict:\n if name in linear_weights:\n tensor = tmp_state_dict[name].to(device, dtype=torch_dtype)\n compressed_state_dict[name] = compress(\n tensor, default_compression_config\n )\n else:\n compressed_state_dict[name] = tmp_state_dict[name].to(\n device, dtype=torch_dtype\n )\n tmp_state_dict[name] = None\n tensor = None\n gc.collect()\n torch.cuda.empty_cache()\n if device == \"xpu\":\n torch.xpu.empty_cache()\n\n for name in model.state_dict():\n if name not in linear_weights:\n set_module_tensor_to_device(\n model, name, device, value=compressed_state_dict[name]\n )\n apply_compressed_weight(model, compressed_state_dict, device)\n\n if torch_dtype == torch.float16:\n model.half()\n model.to(device)\n model.eval()\n\n return model, tokenizer" }, { "identifier": "replace_llama_with_condense", "path": "chat/model/llama_condense_monkey_patch.py", "snippet": "def replace_llama_with_condense(ratio):\n transformers.models.llama.modeling_llama.LlamaRotaryEmbedding = partial(\n CondenseRotaryEmbedding, ratio=ratio\n )" }, { "identifier": "generate_stream_chatglm", "path": "chat/model/model_chatglm.py", "snippet": "@torch.inference_mode()\ndef generate_stream_chatglm(\n model,\n tokenizer,\n params,\n device,\n context_len=2048,\n stream_interval=2,\n judge_sent_end=False,\n):\n prompt = params[\"prompt\"]\n temperature = float(params.get(\"temperature\", 1.0))\n repetition_penalty = float(params.get(\"repetition_penalty\", 1.0))\n top_p = float(params.get(\"top_p\", 1.0))\n max_new_tokens = int(params.get(\"max_new_tokens\", 256))\n echo = params.get(\"echo\", True)\n\n inputs = tokenizer([prompt], return_tensors=\"pt\").to(model.device)\n input_echo_len = len(inputs[\"input_ids\"][0])\n\n gen_kwargs = {\n \"max_length\": max_new_tokens + input_echo_len,\n \"do_sample\": True if temperature > 1e-5 else False,\n \"top_p\": top_p,\n \"repetition_penalty\": repetition_penalty,\n \"logits_processor\": [invalid_score_processor],\n }\n if temperature > 1e-5:\n gen_kwargs[\"temperature\"] = temperature\n\n total_len = 0\n for total_ids in model.stream_generate(**inputs, **gen_kwargs):\n total_ids = total_ids.tolist()[0]\n total_len = len(total_ids)\n if echo:\n output_ids = total_ids\n else:\n output_ids = total_ids[input_echo_len:]\n response = tokenizer.decode(output_ids)\n response = process_response(response)\n\n yield {\n \"text\": response,\n \"usage\": {\n \"prompt_tokens\": input_echo_len,\n \"completion_tokens\": total_len - input_echo_len,\n \"total_tokens\": total_len,\n },\n \"finish_reason\": None,\n }\n\n # TODO: ChatGLM stop when it reach max length\n # Only last stream result contains finish_reason, we set finish_reason as stop\n ret = {\n \"text\": response,\n \"usage\": {\n \"prompt_tokens\": input_echo_len,\n \"completion_tokens\": total_len - input_echo_len,\n \"total_tokens\": total_len,\n },\n \"finish_reason\": \"stop\",\n }\n yield ret" }, { "identifier": "generate_stream_codet5p", "path": "chat/model/model_codet5p.py", "snippet": "@torch.inference_mode()\ndef generate_stream_codet5p(\n model,\n tokenizer,\n params,\n device,\n context_len=2048,\n stream_interval=2,\n judge_sent_end=False,\n):\n prompt = params[\"prompt\"]\n temperature = float(params.get(\"temperature\", 1.0))\n repetition_penalty = float(params.get(\"repetition_penalty\", 1.0))\n top_p = float(params.get(\"top_p\", 1.0))\n top_k = int(params.get(\"top_k\", 50)) # -1 means disable\n max_new_tokens = int(params.get(\"max_new_tokens\", 1024))\n stop_token_ids = params.get(\"stop_token_ids\", None) or []\n stop_token_ids.append(tokenizer.eos_token_id)\n\n decode_config = dict(skip_special_tokens=True, clean_up_tokenization_spaces=True)\n streamer = TextIteratorStreamer(tokenizer, **decode_config)\n encoding = tokenizer(prompt, return_tensors=\"pt\").to(device)\n input_ids = encoding.input_ids\n encoding[\"decoder_input_ids\"] = encoding[\"input_ids\"].clone()\n input_echo_len = len(input_ids)\n\n generation_config = GenerationConfig(\n max_new_tokens=max_new_tokens,\n do_sample=temperature >= 1e-5,\n temperature=temperature,\n repetition_penalty=repetition_penalty,\n no_repeat_ngram_size=10,\n top_p=top_p,\n top_k=top_k,\n eos_token_id=stop_token_ids,\n )\n\n class CodeBlockStopper(StoppingCriteria):\n def __call__(\n self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs\n ) -> bool:\n # Code-completion is open-end generation.\n # We check \\n\\n to stop at end of a code block.\n if list(input_ids[0][-2:]) == [628, 198]:\n return True\n return False\n\n gen_kwargs = dict(\n **encoding,\n streamer=streamer,\n generation_config=generation_config,\n stopping_criteria=StoppingCriteriaList([CodeBlockStopper()]),\n )\n thread = Thread(target=model.generate, kwargs=gen_kwargs)\n thread.start()\n i = 0\n output = \"\"\n for new_text in streamer:\n i += 1\n output += new_text\n if i % stream_interval == 0 or i == max_new_tokens - 1:\n yield {\n \"text\": output,\n \"usage\": {\n \"prompt_tokens\": input_echo_len,\n \"completion_tokens\": i,\n \"total_tokens\": input_echo_len + i,\n },\n \"finish_reason\": None,\n }\n if i >= max_new_tokens:\n break\n\n if i >= max_new_tokens:\n finish_reason = \"length\"\n else:\n finish_reason = \"stop\"\n\n yield {\n \"text\": output,\n \"usage\": {\n \"prompt_tokens\": input_echo_len,\n \"completion_tokens\": i,\n \"total_tokens\": input_echo_len + i,\n },\n \"finish_reason\": finish_reason,\n }\n thread.join()\n\n # clean\n gc.collect()\n torch.cuda.empty_cache()\n if device == \"xpu\":\n torch.xpu.empty_cache()" }, { "identifier": "generate_stream_falcon", "path": "chat/model/model_falcon.py", "snippet": "@torch.inference_mode()\ndef generate_stream_falcon(\n model,\n tokenizer,\n params,\n device,\n context_len=2048,\n stream_interval=2,\n judge_sent_end=False,\n):\n prompt = params[\"prompt\"]\n len_prompt = len(prompt)\n temperature = float(params.get(\"temperature\", 1.0))\n repetition_penalty = float(params.get(\"repetition_penalty\", 1.0))\n top_p = float(params.get(\"top_p\", 1.0))\n top_k = int(params.get(\"top_k\", 50)) # -1 means disable\n max_new_tokens = int(params.get(\"max_new_tokens\", 256))\n stop_str = params.get(\"stop\", None)\n echo = bool(params.get(\"echo\", True))\n stop_token_ids = params.get(\"stop_token_ids\", None) or []\n stop_token_ids.append(tokenizer.eos_token_id)\n\n inputs = tokenizer(prompt, return_tensors=\"pt\").to(model.device)\n input_ids = inputs[\"input_ids\"]\n attention_mask = inputs[\"attention_mask\"]\n\n max_src_len = context_len - max_new_tokens - 8\n\n input_ids = input_ids[-max_src_len:] # truncate from the left\n attention_mask = attention_mask[-max_src_len:] # truncate from the left\n input_echo_len = len(input_ids)\n\n decode_config = dict(skip_special_tokens=True, clean_up_tokenization_spaces=True)\n streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, **decode_config)\n\n generation_config = GenerationConfig(\n max_new_tokens=max_new_tokens,\n do_sample=temperature >= 1e-5,\n temperature=temperature,\n repetition_penalty=repetition_penalty,\n no_repeat_ngram_size=10,\n top_p=top_p,\n top_k=top_k,\n eos_token_id=stop_token_ids,\n )\n\n generation_kwargs = dict(\n inputs=input_ids,\n attention_mask=attention_mask,\n streamer=streamer,\n generation_config=generation_config,\n )\n\n thread = Thread(target=model.generate, kwargs=generation_kwargs)\n thread.start()\n\n if echo:\n # means keep the prompt\n output = prompt\n else:\n output = \"\"\n\n for i, new_text in enumerate(streamer):\n output += new_text\n if i % stream_interval == 0:\n if echo:\n rfind_start = len_prompt\n else:\n rfind_start = 0\n\n partially_stopped = False\n if stop_str:\n if isinstance(stop_str, str):\n pos = output.rfind(stop_str, rfind_start)\n if pos != -1:\n output = output[:pos]\n else:\n partially_stopped = is_partial_stop(output, stop_str)\n elif isinstance(stop_str, Iterable):\n for each_stop in stop_str:\n pos = output.rfind(each_stop, rfind_start)\n if pos != -1:\n output = output[:pos]\n break\n else:\n partially_stopped = is_partial_stop(output, each_stop)\n if partially_stopped:\n break\n else:\n raise ValueError(\"Invalid stop field type.\")\n\n # prevent yielding partial stop sequence\n if not partially_stopped:\n yield {\n \"text\": output,\n \"usage\": {\n \"prompt_tokens\": input_echo_len,\n \"completion_tokens\": i,\n \"total_tokens\": input_echo_len + i,\n },\n \"finish_reason\": None,\n }\n output = output.strip()\n\n # finish stream event, which contains finish reason\n if i == max_new_tokens - 1:\n finish_reason = \"length\"\n elif partially_stopped:\n finish_reason = None\n else:\n finish_reason = \"stop\"\n\n yield {\n \"text\": output,\n \"usage\": {\n \"prompt_tokens\": input_echo_len,\n \"completion_tokens\": i,\n \"total_tokens\": input_echo_len + i,\n },\n \"finish_reason\": finish_reason,\n }\n\n # clean\n gc.collect()\n torch.cuda.empty_cache()\n if device == \"xpu\":\n torch.xpu.empty_cache()" }, { "identifier": "replace_llama_attn_with_non_inplace_operations", "path": "chat/model/monkey_patch_non_inplace.py", "snippet": "def replace_llama_attn_with_non_inplace_operations():\n \"\"\"Avoid bugs in mps backend by not using in-place operations.\"\"\"\n transformers.models.llama.modeling_llama.LlamaAttention.forward = forward" }, { "identifier": "get_gpu_memory", "path": "chat/utils.py", "snippet": "def get_gpu_memory(max_gpus=None):\n \"\"\"Get available memory for each GPU.\"\"\"\n import torch\n\n gpu_memory = []\n num_gpus = (\n torch.cuda.device_count()\n if max_gpus is None\n else min(max_gpus, torch.cuda.device_count())\n )\n\n for gpu_id in range(num_gpus):\n with torch.cuda.device(gpu_id):\n device = torch.cuda.current_device()\n gpu_properties = torch.cuda.get_device_properties(device)\n total_memory = gpu_properties.total_memory / (1024**3)\n allocated_memory = torch.cuda.memory_allocated() / (1024**3)\n available_memory = total_memory - allocated_memory\n gpu_memory.append(available_memory)\n return gpu_memory" } ]
import math import os import sys import warnings import accelerate import psutil import torch import intel_extension_for_pytorch as ipex import intel_extension_for_pytorch as ipex from typing import Dict, List, Optional from functools import cache from functools import lru_cache as cache from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer, LlamaTokenizer, LlamaForCausalLM, T5Tokenizer, ) from chat.constants import CPU_ISA from chat.modules.gptq import GptqConfig, load_gptq_quantized from chat.modules.awq import AWQConfig, load_awq_quantized from chat.conversation import Conversation, get_conv_template from chat.model.compression import load_compress_model from chat.model.llama_condense_monkey_patch import ( replace_llama_with_condense, ) from chat.model.model_chatglm import generate_stream_chatglm from chat.model.model_codet5p import generate_stream_codet5p from chat.model.model_falcon import generate_stream_falcon from chat.model.monkey_patch_non_inplace import ( replace_llama_attn_with_non_inplace_operations, ) from chat.utils import get_gpu_memory from transformers import BitsAndBytesConfig from chat.server.inference import generate_stream from peft import PeftConfig, PeftModel from peft import PeftConfig, PeftModel from chat.model.rwkv_model import RwkvModel from transformers.generation import GenerationConfig
8,642
model = AutoModel.from_pretrained( model_path, low_cpu_mem_usage=True, **from_pretrained_kwargs ) return model, tokenizer def load_compress_model(self, model_path, device, torch_dtype, revision="main"): return load_compress_model( model_path, device, torch_dtype, use_fast=self.use_fast_tokenizer, revision=revision, ) def get_default_conv_template(self, model_path: str) -> Conversation: return get_conv_template("one_shot") # A global registry for all model adapters # TODO (lmzheng): make it a priority queue. model_adapters: List[BaseModelAdapter] = [] def register_model_adapter(cls): """Register a model adapter.""" model_adapters.append(cls()) @cache def get_model_adapter(model_path: str) -> BaseModelAdapter: """Get a model adapter for a model_path.""" model_path_basename = os.path.basename(os.path.normpath(model_path)) # Try the basename of model_path at first for adapter in model_adapters: if adapter.match(model_path_basename) and type(adapter) != BaseModelAdapter: return adapter # Then try the full path for adapter in model_adapters: if adapter.match(model_path): return adapter raise ValueError(f"No valid model adapter for {model_path}") def raise_warning_for_incompatible_cpu_offloading_configuration( device: str, load_8bit: bool, cpu_offloading: bool ): if cpu_offloading: if not load_8bit: warnings.warn( "The cpu-offloading feature can only be used while also using 8-bit-quantization.\n" "Use '--load-8bit' to enable 8-bit-quantization\n" "Continuing without cpu-offloading enabled\n" ) return False if not "linux" in sys.platform: warnings.warn( "CPU-offloading is only supported on linux-systems due to the limited compatability with the bitsandbytes-package\n" "Continuing without cpu-offloading enabled\n" ) return False if device != "cuda": warnings.warn( "CPU-offloading is only enabled when using CUDA-devices\n" "Continuing without cpu-offloading enabled\n" ) return False return cpu_offloading def load_model( model_path: str, device: str = "cuda", num_gpus: int = 1, max_gpu_memory: Optional[str] = None, load_8bit: bool = False, cpu_offloading: bool = False, gptq_config: Optional[GptqConfig] = None, awq_config: Optional[AWQConfig] = None, revision: str = "main", debug: bool = False, ): """Load a model from Hugging Face.""" # get model adapter adapter = get_model_adapter(model_path) # Handle device mapping cpu_offloading = raise_warning_for_incompatible_cpu_offloading_configuration( device, load_8bit, cpu_offloading ) if device == "cpu": kwargs = {"torch_dtype": torch.float32} if CPU_ISA in ["avx512_bf16", "amx"]: try: kwargs = {"torch_dtype": torch.bfloat16} except ImportError: warnings.warn( "Intel Extension for PyTorch is not installed, it can be installed to accelerate cpu inference" ) elif device == "cuda": kwargs = {"torch_dtype": torch.float16} if num_gpus != 1: kwargs["device_map"] = "auto" if max_gpu_memory is None: kwargs[ "device_map" ] = "sequential" # This is important for not the same VRAM sizes available_gpu_memory = get_gpu_memory(num_gpus) kwargs["max_memory"] = { i: str(int(available_gpu_memory[i] * 0.85)) + "GiB" for i in range(num_gpus) } else: kwargs["max_memory"] = {i: max_gpu_memory for i in range(num_gpus)} elif device == "mps": kwargs = {"torch_dtype": torch.float16} # Avoid bugs in mps backend by not using in-place operations.
"""Model adapter registration.""" if sys.version_info >= (3, 9): else: # Check an environment variable to check if we should be sharing Peft model # weights. When false we treat all Peft models as separate. peft_share_base_weights = ( os.environ.get("PEFT_SHARE_BASE_WEIGHTS", "false").lower() == "true" ) class BaseModelAdapter: """The base and the default model adapter.""" use_fast_tokenizer = True def match(self, model_path: str): return True def load_model(self, model_path: str, from_pretrained_kwargs: dict): revision = from_pretrained_kwargs.get("revision", "main") try: tokenizer = AutoTokenizer.from_pretrained( model_path, use_fast=self.use_fast_tokenizer, revision=revision, trust_remote_code=True, ) except TypeError: tokenizer = AutoTokenizer.from_pretrained( model_path, use_fast=False, revision=revision, trust_remote_code=True ) try: model = AutoModelForCausalLM.from_pretrained( model_path, low_cpu_mem_usage=True, **from_pretrained_kwargs ) except NameError: model = AutoModel.from_pretrained( model_path, low_cpu_mem_usage=True, **from_pretrained_kwargs ) return model, tokenizer def load_compress_model(self, model_path, device, torch_dtype, revision="main"): return load_compress_model( model_path, device, torch_dtype, use_fast=self.use_fast_tokenizer, revision=revision, ) def get_default_conv_template(self, model_path: str) -> Conversation: return get_conv_template("one_shot") # A global registry for all model adapters # TODO (lmzheng): make it a priority queue. model_adapters: List[BaseModelAdapter] = [] def register_model_adapter(cls): """Register a model adapter.""" model_adapters.append(cls()) @cache def get_model_adapter(model_path: str) -> BaseModelAdapter: """Get a model adapter for a model_path.""" model_path_basename = os.path.basename(os.path.normpath(model_path)) # Try the basename of model_path at first for adapter in model_adapters: if adapter.match(model_path_basename) and type(adapter) != BaseModelAdapter: return adapter # Then try the full path for adapter in model_adapters: if adapter.match(model_path): return adapter raise ValueError(f"No valid model adapter for {model_path}") def raise_warning_for_incompatible_cpu_offloading_configuration( device: str, load_8bit: bool, cpu_offloading: bool ): if cpu_offloading: if not load_8bit: warnings.warn( "The cpu-offloading feature can only be used while also using 8-bit-quantization.\n" "Use '--load-8bit' to enable 8-bit-quantization\n" "Continuing without cpu-offloading enabled\n" ) return False if not "linux" in sys.platform: warnings.warn( "CPU-offloading is only supported on linux-systems due to the limited compatability with the bitsandbytes-package\n" "Continuing without cpu-offloading enabled\n" ) return False if device != "cuda": warnings.warn( "CPU-offloading is only enabled when using CUDA-devices\n" "Continuing without cpu-offloading enabled\n" ) return False return cpu_offloading def load_model( model_path: str, device: str = "cuda", num_gpus: int = 1, max_gpu_memory: Optional[str] = None, load_8bit: bool = False, cpu_offloading: bool = False, gptq_config: Optional[GptqConfig] = None, awq_config: Optional[AWQConfig] = None, revision: str = "main", debug: bool = False, ): """Load a model from Hugging Face.""" # get model adapter adapter = get_model_adapter(model_path) # Handle device mapping cpu_offloading = raise_warning_for_incompatible_cpu_offloading_configuration( device, load_8bit, cpu_offloading ) if device == "cpu": kwargs = {"torch_dtype": torch.float32} if CPU_ISA in ["avx512_bf16", "amx"]: try: kwargs = {"torch_dtype": torch.bfloat16} except ImportError: warnings.warn( "Intel Extension for PyTorch is not installed, it can be installed to accelerate cpu inference" ) elif device == "cuda": kwargs = {"torch_dtype": torch.float16} if num_gpus != 1: kwargs["device_map"] = "auto" if max_gpu_memory is None: kwargs[ "device_map" ] = "sequential" # This is important for not the same VRAM sizes available_gpu_memory = get_gpu_memory(num_gpus) kwargs["max_memory"] = { i: str(int(available_gpu_memory[i] * 0.85)) + "GiB" for i in range(num_gpus) } else: kwargs["max_memory"] = {i: max_gpu_memory for i in range(num_gpus)} elif device == "mps": kwargs = {"torch_dtype": torch.float16} # Avoid bugs in mps backend by not using in-place operations.
replace_llama_attn_with_non_inplace_operations()
12
2023-10-20 08:56:20+00:00
12k
thuml/iTransformer
run.py
[ { "identifier": "Exp_Long_Term_Forecast", "path": "experiments/exp_long_term_forecasting.py", "snippet": "class Exp_Long_Term_Forecast(Exp_Basic):\n def __init__(self, args):\n super(Exp_Long_Term_Forecast, self).__init__(args)\n\n def _build_model(self):\n model = self.model_dict[self.args.model].Model(self.args).float()\n\n if self.args.use_multi_gpu and self.args.use_gpu:\n model = nn.DataParallel(model, device_ids=self.args.device_ids)\n return model\n\n def _get_data(self, flag):\n data_set, data_loader = data_provider(self.args, flag)\n return data_set, data_loader\n\n def _select_optimizer(self):\n model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate)\n return model_optim\n\n def _select_criterion(self):\n criterion = nn.MSELoss()\n return criterion\n\n def vali(self, vali_data, vali_loader, criterion):\n total_loss = []\n self.model.eval()\n with torch.no_grad():\n for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(vali_loader):\n batch_x = batch_x.float().to(self.device)\n batch_y = batch_y.float()\n\n if 'PEMS' in self.args.data or 'Solar' in self.args.data:\n batch_x_mark = None\n batch_y_mark = None\n else:\n batch_x_mark = batch_x_mark.float().to(self.device)\n batch_y_mark = batch_y_mark.float().to(self.device)\n\n # decoder input\n dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()\n dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)\n # encoder - decoder\n if self.args.use_amp:\n with torch.cuda.amp.autocast():\n if self.args.output_attention:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n else:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n else:\n if self.args.output_attention:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n else:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n f_dim = -1 if self.args.features == 'MS' else 0\n outputs = outputs[:, -self.args.pred_len:, f_dim:]\n batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)\n\n pred = outputs.detach().cpu()\n true = batch_y.detach().cpu()\n\n loss = criterion(pred, true)\n\n total_loss.append(loss)\n total_loss = np.average(total_loss)\n self.model.train()\n return total_loss\n\n def train(self, setting):\n train_data, train_loader = self._get_data(flag='train')\n vali_data, vali_loader = self._get_data(flag='val')\n test_data, test_loader = self._get_data(flag='test')\n\n path = os.path.join(self.args.checkpoints, setting)\n if not os.path.exists(path):\n os.makedirs(path)\n\n time_now = time.time()\n\n train_steps = len(train_loader)\n early_stopping = EarlyStopping(patience=self.args.patience, verbose=True)\n\n model_optim = self._select_optimizer()\n criterion = self._select_criterion()\n\n if self.args.use_amp:\n scaler = torch.cuda.amp.GradScaler()\n\n for epoch in range(self.args.train_epochs):\n iter_count = 0\n train_loss = []\n\n self.model.train()\n epoch_time = time.time()\n for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(train_loader):\n iter_count += 1\n model_optim.zero_grad()\n batch_x = batch_x.float().to(self.device)\n\n batch_y = batch_y.float().to(self.device)\n if 'PEMS' in self.args.data or 'Solar' in self.args.data:\n batch_x_mark = None\n batch_y_mark = None\n else:\n batch_x_mark = batch_x_mark.float().to(self.device)\n batch_y_mark = batch_y_mark.float().to(self.device)\n\n # decoder input\n dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()\n dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)\n\n # encoder - decoder\n if self.args.use_amp:\n with torch.cuda.amp.autocast():\n if self.args.output_attention:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n else:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n\n f_dim = -1 if self.args.features == 'MS' else 0\n outputs = outputs[:, -self.args.pred_len:, f_dim:]\n batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)\n loss = criterion(outputs, batch_y)\n train_loss.append(loss.item())\n else:\n if self.args.output_attention:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n else:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n\n f_dim = -1 if self.args.features == 'MS' else 0\n outputs = outputs[:, -self.args.pred_len:, f_dim:]\n batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)\n loss = criterion(outputs, batch_y)\n train_loss.append(loss.item())\n\n if (i + 1) % 100 == 0:\n print(\"\\titers: {0}, epoch: {1} | loss: {2:.7f}\".format(i + 1, epoch + 1, loss.item()))\n speed = (time.time() - time_now) / iter_count\n left_time = speed * ((self.args.train_epochs - epoch) * train_steps - i)\n print('\\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))\n iter_count = 0\n time_now = time.time()\n\n if self.args.use_amp:\n scaler.scale(loss).backward()\n scaler.step(model_optim)\n scaler.update()\n else:\n loss.backward()\n model_optim.step()\n\n print(\"Epoch: {} cost time: {}\".format(epoch + 1, time.time() - epoch_time))\n train_loss = np.average(train_loss)\n vali_loss = self.vali(vali_data, vali_loader, criterion)\n test_loss = self.vali(test_data, test_loader, criterion)\n\n print(\"Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}\".format(\n epoch + 1, train_steps, train_loss, vali_loss, test_loss))\n early_stopping(vali_loss, self.model, path)\n if early_stopping.early_stop:\n print(\"Early stopping\")\n break\n\n adjust_learning_rate(model_optim, epoch + 1, self.args)\n\n # get_cka(self.args, setting, self.model, train_loader, self.device, epoch)\n\n best_model_path = path + '/' + 'checkpoint.pth'\n self.model.load_state_dict(torch.load(best_model_path))\n\n return self.model\n\n def test(self, setting, test=0):\n test_data, test_loader = self._get_data(flag='test')\n if test:\n print('loading model')\n self.model.load_state_dict(torch.load(os.path.join('./checkpoints/' + setting, 'checkpoint.pth')))\n\n preds = []\n trues = []\n folder_path = './test_results/' + setting + '/'\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\n self.model.eval()\n with torch.no_grad():\n for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(test_loader):\n batch_x = batch_x.float().to(self.device)\n batch_y = batch_y.float().to(self.device)\n\n if 'PEMS' in self.args.data or 'Solar' in self.args.data:\n batch_x_mark = None\n batch_y_mark = None\n else:\n batch_x_mark = batch_x_mark.float().to(self.device)\n batch_y_mark = batch_y_mark.float().to(self.device)\n\n # decoder input\n dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()\n dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)\n # encoder - decoder\n if self.args.use_amp:\n with torch.cuda.amp.autocast():\n if self.args.output_attention:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n else:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n else:\n if self.args.output_attention:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n\n else:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n\n f_dim = -1 if self.args.features == 'MS' else 0\n outputs = outputs[:, -self.args.pred_len:, f_dim:]\n batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)\n outputs = outputs.detach().cpu().numpy()\n batch_y = batch_y.detach().cpu().numpy()\n if test_data.scale and self.args.inverse:\n shape = outputs.shape\n outputs = test_data.inverse_transform(outputs.squeeze(0)).reshape(shape)\n batch_y = test_data.inverse_transform(batch_y.squeeze(0)).reshape(shape)\n\n pred = outputs\n true = batch_y\n\n preds.append(pred)\n trues.append(true)\n if i % 20 == 0:\n input = batch_x.detach().cpu().numpy()\n if test_data.scale and self.args.inverse:\n shape = input.shape\n input = test_data.inverse_transform(input.squeeze(0)).reshape(shape)\n gt = np.concatenate((input[0, :, -1], true[0, :, -1]), axis=0)\n pd = np.concatenate((input[0, :, -1], pred[0, :, -1]), axis=0)\n visual(gt, pd, os.path.join(folder_path, str(i) + '.pdf'))\n\n preds = np.array(preds)\n trues = np.array(trues)\n print('test shape:', preds.shape, trues.shape)\n preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])\n trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1])\n print('test shape:', preds.shape, trues.shape)\n\n # result save\n folder_path = './results/' + setting + '/'\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\n mae, mse, rmse, mape, mspe = metric(preds, trues)\n print('mse:{}, mae:{}'.format(mse, mae))\n f = open(\"result_long_term_forecast.txt\", 'a')\n f.write(setting + \" \\n\")\n f.write('mse:{}, mae:{}'.format(mse, mae))\n f.write('\\n')\n f.write('\\n')\n f.close()\n\n np.save(folder_path + 'metrics.npy', np.array([mae, mse, rmse, mape, mspe]))\n np.save(folder_path + 'pred.npy', preds)\n np.save(folder_path + 'true.npy', trues)\n\n return\n\n\n def predict(self, setting, load=False):\n pred_data, pred_loader = self._get_data(flag='pred')\n\n if load:\n path = os.path.join(self.args.checkpoints, setting)\n best_model_path = path + '/' + 'checkpoint.pth'\n self.model.load_state_dict(torch.load(best_model_path))\n\n preds = []\n\n self.model.eval()\n with torch.no_grad():\n for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(pred_loader):\n batch_x = batch_x.float().to(self.device)\n batch_y = batch_y.float()\n batch_x_mark = batch_x_mark.float().to(self.device)\n batch_y_mark = batch_y_mark.float().to(self.device)\n\n # decoder input\n dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()\n dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)\n # encoder - decoder\n if self.args.use_amp:\n with torch.cuda.amp.autocast():\n if self.args.output_attention:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n else:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n else:\n if self.args.output_attention:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n else:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n outputs = outputs.detach().cpu().numpy()\n if pred_data.scale and self.args.inverse:\n shape = outputs.shape\n outputs = pred_data.inverse_transform(outputs.squeeze(0)).reshape(shape)\n preds.append(outputs)\n\n preds = np.array(preds)\n preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])\n\n # result save\n folder_path = './results/' + setting + '/'\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\n np.save(folder_path + 'real_prediction.npy', preds)\n\n return" }, { "identifier": "Exp_Long_Term_Forecast_Partial", "path": "experiments/exp_long_term_forecasting_partial.py", "snippet": "class Exp_Long_Term_Forecast_Partial(Exp_Basic):\n def __init__(self, args):\n super(Exp_Long_Term_Forecast_Partial, self).__init__(args)\n\n def _build_model(self):\n model = self.model_dict[self.args.model].Model(self.args).float()\n\n if self.args.use_multi_gpu and self.args.use_gpu:\n model = nn.DataParallel(model, device_ids=self.args.device_ids)\n return model\n\n def _get_data(self, flag):\n data_set, data_loader = data_provider(self.args, flag)\n return data_set, data_loader\n\n def _select_optimizer(self):\n model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate)\n return model_optim\n\n def _select_criterion(self):\n criterion = nn.MSELoss()\n return criterion\n\n def vali(self, vali_data, vali_loader, criterion, partial_train=False):\n total_loss = []\n self.model.eval()\n with torch.no_grad():\n for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(vali_loader):\n batch_x = batch_x.float().to(self.device)\n batch_y = batch_y.float()\n\n if 'PEMS' in self.args.data or 'Solar' in self.args.data:\n batch_x_mark = None\n batch_y_mark = None\n else:\n batch_x_mark = batch_x_mark.float().to(self.device)\n batch_y_mark = batch_y_mark.float().to(self.device)\n\n if partial_train: # we train models with only partial variates from the dataset\n partial_start = self.args.partial_start_index\n partial_end = min(self.args.enc_in + partial_start, batch_x.shape[-1])\n batch_x = batch_x[:, :, partial_start:partial_end]\n batch_y = batch_y[:, :, partial_start:partial_end]\n\n # decoder input\n dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()\n dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)\n # encoder - decoder\n if self.args.use_amp:\n with torch.cuda.amp.autocast():\n if self.args.output_attention:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n else:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n else:\n if self.args.output_attention:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n elif self.args.channel_independence:\n B, Tx, N = batch_x.shape\n _, Ty, _ = dec_inp.shape\n if batch_x_mark == None:\n outputs = self.model(batch_x.permute(0, 2, 1).reshape(B * N, Tx, 1), batch_x_mark, \\\n dec_inp.permute(0, 2, 1).reshape(B * N, Ty, 1), batch_y_mark).reshape(\n B, N, -1).permute(0, 2, 1)\n else:\n outputs = self.model(batch_x.permute(0, 2, 1).reshape(B * N, Tx, 1),\n batch_x_mark.repeat(N, 1, 1), \\\n dec_inp.permute(0, 2, 1).reshape(B * N, Ty, 1),\n batch_y_mark.repeat(N, 1, 1)) \\\n .reshape(B, N, -1).permute(0, 2, 1)\n else:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n f_dim = -1 if self.args.features == 'MS' else 0\n outputs = outputs[:, -self.args.pred_len:, f_dim:]\n batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)\n\n pred = outputs.detach().cpu()\n true = batch_y.detach().cpu()\n\n loss = criterion(pred, true)\n\n total_loss.append(loss)\n total_loss = np.average(total_loss)\n self.model.train()\n return total_loss\n\n def train(self, setting):\n train_data, train_loader = self._get_data(flag='train')\n vali_data, vali_loader = self._get_data(flag='val')\n test_data, test_loader = self._get_data(flag='test')\n\n path = os.path.join(self.args.checkpoints, setting)\n if not os.path.exists(path):\n os.makedirs(path)\n\n time_now = time.time()\n\n train_steps = len(train_loader)\n early_stopping = EarlyStopping(patience=self.args.patience, verbose=True)\n\n model_optim = self._select_optimizer()\n criterion = self._select_criterion()\n\n if self.args.use_amp:\n scaler = torch.cuda.amp.GradScaler()\n\n for epoch in range(self.args.train_epochs):\n iter_count = 0\n train_loss = []\n\n self.model.train()\n epoch_time = time.time()\n for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(train_loader):\n iter_count += 1\n model_optim.zero_grad()\n batch_x = batch_x.float().to(self.device)\n\n batch_y = batch_y.float().to(self.device)\n if 'PEMS' in self.args.data or 'Solar' in self.args.data:\n batch_x_mark = None\n batch_y_mark = None\n else:\n batch_x_mark = batch_x_mark.float().to(self.device)\n batch_y_mark = batch_y_mark.float().to(self.device)\n\n # Variate Generalization training: \n # We train with partial variates (args.enc_in < number of dataset variates)\n # and test the obtained model directly on all variates.\n partial_start = self.args.partial_start_index\n partial_end = min(self.args.enc_in + partial_start, batch_x.shape[-1])\n batch_x = batch_x[:, :, partial_start:partial_end]\n batch_y = batch_y[:, :, partial_start:partial_end]\n # Efficient training strategy: randomly choose part of the variates\n # and only train the model with selected variates in each batch \n if self.args.efficient_training:\n _, _, N = batch_x.shape\n index = np.stack(random.sample(range(N), N))[-self.args.enc_in:]\n batch_x = batch_x[:, :, index]\n batch_y = batch_y[:, :, index]\n\n # decoder input\n dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()\n dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)\n\n # encoder - decoder\n if self.args.use_amp:\n with torch.cuda.amp.autocast():\n if self.args.output_attention:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n else:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n\n f_dim = -1 if self.args.features == 'MS' else 0\n outputs = outputs[:, -self.args.pred_len:, f_dim:]\n batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)\n loss = criterion(outputs, batch_y)\n train_loss.append(loss.item())\n else:\n if self.args.output_attention:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n elif self.args.channel_independence:\n B, Tx, N = batch_x.shape\n _, Ty, _ = dec_inp.shape\n if batch_x_mark == None:\n outputs = self.model(batch_x.permute(0, 2, 1).reshape(B * N, Tx, 1), batch_x_mark, \\\n dec_inp.permute(0, 2, 1).reshape(B * N, Ty, 1), batch_y_mark).reshape(\n B, N, -1).permute(0, 2, 1)\n else:\n a = batch_x.permute(0, 2, 1)\n b = batch_x.permute(0, 2, 1).reshape(B * N, Tx, 1)\n outputs = self.model(batch_x.permute(0, 2, 1).reshape(B * N, Tx, 1),\n batch_x_mark.repeat(N, 1, 1), \\\n dec_inp.permute(0, 2, 1).reshape(B * N, Ty, 1),\n batch_y_mark.repeat(N, 1, 1)) \\\n .reshape(B, N, -1).permute(0, 2, 1)\n else:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n\n f_dim = -1 if self.args.features == 'MS' else 0\n outputs = outputs[:, -self.args.pred_len:, f_dim:]\n batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)\n loss = criterion(outputs, batch_y)\n train_loss.append(loss.item())\n\n if (i + 1) % 100 == 0:\n print(\"\\titers: {0}, epoch: {1} | loss: {2:.7f}\".format(i + 1, epoch + 1, loss.item()))\n speed = (time.time() - time_now) / iter_count\n left_time = speed * ((self.args.train_epochs - epoch) * train_steps - i)\n print('\\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))\n iter_count = 0\n time_now = time.time()\n\n if self.args.use_amp:\n scaler.scale(loss).backward()\n scaler.step(model_optim)\n scaler.update()\n else:\n loss.backward()\n model_optim.step()\n\n print(\"Epoch: {} cost time: {}\".format(epoch + 1, time.time() - epoch_time))\n train_loss = np.average(train_loss)\n vali_loss = self.vali(vali_data, vali_loader, criterion, partial_train=True)\n test_loss = self.vali(test_data, test_loader, criterion, partial_train=False)\n\n print(\"Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}\".format(\n epoch + 1, train_steps, train_loss, vali_loss, test_loss))\n early_stopping(vali_loss, self.model, path)\n if early_stopping.early_stop:\n print(\"Early stopping\")\n break\n\n adjust_learning_rate(model_optim, epoch + 1, self.args)\n\n best_model_path = path + '/' + 'checkpoint.pth'\n self.model.load_state_dict(torch.load(best_model_path))\n\n return self.model\n\n def test(self, setting, test=0):\n\n test_data, test_loader = self._get_data(flag='test')\n if test:\n print('loading model')\n self.model.load_state_dict(torch.load(os.path.join('./checkpoints/' + setting, 'checkpoint.pth')))\n\n preds = []\n trues = []\n folder_path = './test_results/' + setting + '/'\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\n self.model.eval()\n with torch.no_grad():\n for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(test_loader):\n # During model inference, test the obtained model directly on all variates.\n batch_x = batch_x.float().to(self.device)\n batch_y = batch_y.float().to(self.device)\n\n if 'PEMS' in self.args.data or 'Solar' in self.args.data:\n batch_x_mark = None\n batch_y_mark = None\n else:\n batch_x_mark = batch_x_mark.float().to(self.device)\n batch_y_mark = batch_y_mark.float().to(self.device)\n\n # decoder input\n dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()\n dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)\n # encoder - decoder\n if self.args.use_amp:\n with torch.cuda.amp.autocast():\n if self.args.output_attention:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n else:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n else:\n if self.args.output_attention:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n elif self.args.channel_independence: # compare the result with channel_independence\n B, Tx, N = batch_x.shape\n _, Ty, _ = dec_inp.shape\n if batch_x_mark == None:\n outputs = self.model(batch_x.permute(0, 2, 1).reshape(B * N, Tx, 1), batch_x_mark, \\\n dec_inp.permute(0, 2, 1).reshape(B * N, Ty, 1), batch_y_mark).reshape(\n B, N, -1).permute(0, 2, 1)\n else:\n outputs = self.model(batch_x.permute(0, 2, 1).reshape(B * N, Tx, 1),\n batch_x_mark.repeat(N, 1, 1), \\\n dec_inp.permute(0, 2, 1).reshape(B * N, Ty, 1),\n batch_y_mark.repeat(N, 1, 1)) \\\n .reshape(B, N, -1).permute(0, 2, 1)\n else:\n # directly test the trained model on all variates without fine-tuning.\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n\n f_dim = -1 if self.args.features == 'MS' else 0\n outputs = outputs[:, -self.args.pred_len:, f_dim:]\n batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)\n outputs = outputs.detach().cpu().numpy()\n batch_y = batch_y.detach().cpu().numpy()\n if test_data.scale and self.args.inverse:\n shape = outputs.shape\n outputs = test_data.inverse_transform(outputs.squeeze(0)).reshape(shape)\n batch_y = test_data.inverse_transform(batch_y.squeeze(0)).reshape(shape)\n\n pred = outputs\n true = batch_y\n\n preds.append(pred)\n trues.append(true)\n if i % 20 == 0:\n input = batch_x.detach().cpu().numpy()\n if test_data.scale and self.args.inverse:\n shape = input.shape\n input = test_data.inverse_transform(input.squeeze(0)).reshape(shape)\n gt = np.concatenate((input[0, :, -1], true[0, :, -1]), axis=0)\n pd = np.concatenate((input[0, :, -1], pred[0, :, -1]), axis=0)\n visual(gt, pd, os.path.join(folder_path, str(i) + '.pdf'))\n\n preds = np.array(preds)\n trues = np.array(trues)\n print('test shape:', preds.shape, trues.shape)\n preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])\n trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1])\n print('test shape:', preds.shape, trues.shape)\n\n # result save\n folder_path = './results/' + setting + '/'\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\n mae, mse, rmse, mape, mspe = metric(preds, trues)\n print('mse:{}, mae:{}'.format(mse, mae))\n f = open(\"result_long_term_forecast.txt\", 'a')\n f.write(setting + \" \\n\")\n f.write('mse:{}, mae:{}'.format(mse, mae))\n f.write('\\n')\n f.write('\\n')\n f.close()\n\n np.save(folder_path + 'metrics.npy', np.array([mae, mse, rmse, mape, mspe]))\n np.save(folder_path + 'pred.npy', preds)\n np.save(folder_path + 'true.npy', trues)\n\n return\n\n def predict(self, setting, load=False):\n pred_data, pred_loader = self._get_data(flag='pred')\n\n if load:\n path = os.path.join(self.args.checkpoints, setting)\n best_model_path = path + '/' + 'checkpoint.pth'\n self.model.load_state_dict(torch.load(best_model_path))\n\n preds = []\n\n self.model.eval()\n with torch.no_grad():\n for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(pred_loader):\n batch_x = batch_x.float().to(self.device)\n batch_y = batch_y.float()\n batch_x_mark = batch_x_mark.float().to(self.device)\n batch_y_mark = batch_y_mark.float().to(self.device)\n\n # decoder input\n dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()\n dec_inp = torch.cat([batch_y[:, :self.args.label_len, :], dec_inp], dim=1).float().to(self.device)\n # encoder - decoder\n if self.args.use_amp:\n with torch.cuda.amp.autocast():\n if self.args.output_attention:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n else:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n else:\n if self.args.output_attention:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]\n else:\n outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n outputs = outputs.detach().cpu().numpy()\n if pred_data.scale and self.args.inverse:\n shape = outputs.shape\n outputs = pred_data.inverse_transform(outputs.squeeze(0)).reshape(shape)\n preds.append(outputs)\n\n preds = np.array(preds)\n preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])\n\n # result save\n folder_path = './results/' + setting + '/'\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\n np.save(folder_path + 'real_prediction.npy', preds)\n\n return" } ]
import argparse import torch import random import numpy as np from experiments.exp_long_term_forecasting import Exp_Long_Term_Forecast from experiments.exp_long_term_forecasting_partial import Exp_Long_Term_Forecast_Partial
9,456
if __name__ == '__main__': fix_seed = 2023 random.seed(fix_seed) torch.manual_seed(fix_seed) np.random.seed(fix_seed) parser = argparse.ArgumentParser(description='iTransformer') # basic config parser.add_argument('--is_training', type=int, required=True, default=1, help='status') parser.add_argument('--model_id', type=str, required=True, default='test', help='model id') parser.add_argument('--model', type=str, required=True, default='iTransformer', help='model name, options: [iTransformer, iInformer, iReformer, iFlowformer, iFlashformer]') # data loader parser.add_argument('--data', type=str, required=True, default='custom', help='dataset type') parser.add_argument('--root_path', type=str, default='./data/electricity/', help='root path of the data file') parser.add_argument('--data_path', type=str, default='electricity.csv', help='data csv file') parser.add_argument('--features', type=str, default='M', help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate') parser.add_argument('--target', type=str, default='OT', help='target feature in S or MS task') parser.add_argument('--freq', type=str, default='h', help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h') parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints') # forecasting task parser.add_argument('--seq_len', type=int, default=96, help='input sequence length') parser.add_argument('--label_len', type=int, default=48, help='start token length') # no longer needed in inverted Transformers parser.add_argument('--pred_len', type=int, default=96, help='prediction sequence length') # model define parser.add_argument('--enc_in', type=int, default=7, help='encoder input size') parser.add_argument('--dec_in', type=int, default=7, help='decoder input size') parser.add_argument('--c_out', type=int, default=7, help='output size') # applicable on arbitrary number of variates in inverted Transformers parser.add_argument('--d_model', type=int, default=512, help='dimension of model') parser.add_argument('--n_heads', type=int, default=8, help='num of heads') parser.add_argument('--e_layers', type=int, default=2, help='num of encoder layers') parser.add_argument('--d_layers', type=int, default=1, help='num of decoder layers') parser.add_argument('--d_ff', type=int, default=2048, help='dimension of fcn') parser.add_argument('--moving_avg', type=int, default=25, help='window size of moving average') parser.add_argument('--factor', type=int, default=1, help='attn factor') parser.add_argument('--distil', action='store_false', help='whether to use distilling in encoder, using this argument means not using distilling', default=True) parser.add_argument('--dropout', type=float, default=0.1, help='dropout') parser.add_argument('--embed', type=str, default='timeF', help='time features encoding, options:[timeF, fixed, learned]') parser.add_argument('--activation', type=str, default='gelu', help='activation') parser.add_argument('--output_attention', action='store_true', help='whether to output attention in ecoder') parser.add_argument('--do_predict', action='store_true', help='whether to predict unseen future data') # optimization parser.add_argument('--num_workers', type=int, default=10, help='data loader num workers') parser.add_argument('--itr', type=int, default=1, help='experiments times') parser.add_argument('--train_epochs', type=int, default=10, help='train epochs') parser.add_argument('--batch_size', type=int, default=32, help='batch size of train input data') parser.add_argument('--patience', type=int, default=3, help='early stopping patience') parser.add_argument('--learning_rate', type=float, default=0.0001, help='optimizer learning rate') parser.add_argument('--des', type=str, default='test', help='exp description') parser.add_argument('--loss', type=str, default='MSE', help='loss function') parser.add_argument('--lradj', type=str, default='type1', help='adjust learning rate') parser.add_argument('--use_amp', action='store_true', help='use automatic mixed precision training', default=False) # GPU parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu') parser.add_argument('--gpu', type=int, default=0, help='gpu') parser.add_argument('--use_multi_gpu', action='store_true', help='use multiple gpus', default=False) parser.add_argument('--devices', type=str, default='0,1,2,3', help='device ids of multile gpus') # iTransformer parser.add_argument('--exp_name', type=str, required=False, default='MTSF', help='experiemnt name, options:[MTSF, partial_train]') parser.add_argument('--channel_independence', type=bool, default=False, help='whether to use channel_independence mechanism') parser.add_argument('--inverse', action='store_true', help='inverse output data', default=False) parser.add_argument('--class_strategy', type=str, default='projection', help='projection/average/cls_token') parser.add_argument('--target_root_path', type=str, default='./data/electricity/', help='root path of the data file') parser.add_argument('--target_data_path', type=str, default='electricity.csv', help='data file') parser.add_argument('--efficient_training', type=bool, default=False, help='whether to use efficient_training (exp_name should be partial train)') # See Figure 8 of our paper for the detail parser.add_argument('--use_norm', type=int, default=True, help='use norm and denorm') parser.add_argument('--partial_start_index', type=int, default=0, help='the start index of variates for partial training, ' 'you can select [partial_start_index, min(enc_in + partial_start_index, N)]') args = parser.parse_args() args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False if args.use_gpu and args.use_multi_gpu: args.devices = args.devices.replace(' ', '') device_ids = args.devices.split(',') args.device_ids = [int(id_) for id_ in device_ids] args.gpu = args.device_ids[0] print('Args in experiment:') print(args) if args.exp_name == 'partial_train': # See Figure 8 of our paper, for the detail
if __name__ == '__main__': fix_seed = 2023 random.seed(fix_seed) torch.manual_seed(fix_seed) np.random.seed(fix_seed) parser = argparse.ArgumentParser(description='iTransformer') # basic config parser.add_argument('--is_training', type=int, required=True, default=1, help='status') parser.add_argument('--model_id', type=str, required=True, default='test', help='model id') parser.add_argument('--model', type=str, required=True, default='iTransformer', help='model name, options: [iTransformer, iInformer, iReformer, iFlowformer, iFlashformer]') # data loader parser.add_argument('--data', type=str, required=True, default='custom', help='dataset type') parser.add_argument('--root_path', type=str, default='./data/electricity/', help='root path of the data file') parser.add_argument('--data_path', type=str, default='electricity.csv', help='data csv file') parser.add_argument('--features', type=str, default='M', help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate') parser.add_argument('--target', type=str, default='OT', help='target feature in S or MS task') parser.add_argument('--freq', type=str, default='h', help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h') parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints') # forecasting task parser.add_argument('--seq_len', type=int, default=96, help='input sequence length') parser.add_argument('--label_len', type=int, default=48, help='start token length') # no longer needed in inverted Transformers parser.add_argument('--pred_len', type=int, default=96, help='prediction sequence length') # model define parser.add_argument('--enc_in', type=int, default=7, help='encoder input size') parser.add_argument('--dec_in', type=int, default=7, help='decoder input size') parser.add_argument('--c_out', type=int, default=7, help='output size') # applicable on arbitrary number of variates in inverted Transformers parser.add_argument('--d_model', type=int, default=512, help='dimension of model') parser.add_argument('--n_heads', type=int, default=8, help='num of heads') parser.add_argument('--e_layers', type=int, default=2, help='num of encoder layers') parser.add_argument('--d_layers', type=int, default=1, help='num of decoder layers') parser.add_argument('--d_ff', type=int, default=2048, help='dimension of fcn') parser.add_argument('--moving_avg', type=int, default=25, help='window size of moving average') parser.add_argument('--factor', type=int, default=1, help='attn factor') parser.add_argument('--distil', action='store_false', help='whether to use distilling in encoder, using this argument means not using distilling', default=True) parser.add_argument('--dropout', type=float, default=0.1, help='dropout') parser.add_argument('--embed', type=str, default='timeF', help='time features encoding, options:[timeF, fixed, learned]') parser.add_argument('--activation', type=str, default='gelu', help='activation') parser.add_argument('--output_attention', action='store_true', help='whether to output attention in ecoder') parser.add_argument('--do_predict', action='store_true', help='whether to predict unseen future data') # optimization parser.add_argument('--num_workers', type=int, default=10, help='data loader num workers') parser.add_argument('--itr', type=int, default=1, help='experiments times') parser.add_argument('--train_epochs', type=int, default=10, help='train epochs') parser.add_argument('--batch_size', type=int, default=32, help='batch size of train input data') parser.add_argument('--patience', type=int, default=3, help='early stopping patience') parser.add_argument('--learning_rate', type=float, default=0.0001, help='optimizer learning rate') parser.add_argument('--des', type=str, default='test', help='exp description') parser.add_argument('--loss', type=str, default='MSE', help='loss function') parser.add_argument('--lradj', type=str, default='type1', help='adjust learning rate') parser.add_argument('--use_amp', action='store_true', help='use automatic mixed precision training', default=False) # GPU parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu') parser.add_argument('--gpu', type=int, default=0, help='gpu') parser.add_argument('--use_multi_gpu', action='store_true', help='use multiple gpus', default=False) parser.add_argument('--devices', type=str, default='0,1,2,3', help='device ids of multile gpus') # iTransformer parser.add_argument('--exp_name', type=str, required=False, default='MTSF', help='experiemnt name, options:[MTSF, partial_train]') parser.add_argument('--channel_independence', type=bool, default=False, help='whether to use channel_independence mechanism') parser.add_argument('--inverse', action='store_true', help='inverse output data', default=False) parser.add_argument('--class_strategy', type=str, default='projection', help='projection/average/cls_token') parser.add_argument('--target_root_path', type=str, default='./data/electricity/', help='root path of the data file') parser.add_argument('--target_data_path', type=str, default='electricity.csv', help='data file') parser.add_argument('--efficient_training', type=bool, default=False, help='whether to use efficient_training (exp_name should be partial train)') # See Figure 8 of our paper for the detail parser.add_argument('--use_norm', type=int, default=True, help='use norm and denorm') parser.add_argument('--partial_start_index', type=int, default=0, help='the start index of variates for partial training, ' 'you can select [partial_start_index, min(enc_in + partial_start_index, N)]') args = parser.parse_args() args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False if args.use_gpu and args.use_multi_gpu: args.devices = args.devices.replace(' ', '') device_ids = args.devices.split(',') args.device_ids = [int(id_) for id_ in device_ids] args.gpu = args.device_ids[0] print('Args in experiment:') print(args) if args.exp_name == 'partial_train': # See Figure 8 of our paper, for the detail
Exp = Exp_Long_Term_Forecast_Partial
1
2023-10-19 03:23:15+00:00
12k
kylesargent/ZeroNVS
threestudio/models/geometry/base.py
[ { "identifier": "IsosurfaceHelper", "path": "threestudio/models/isosurface.py", "snippet": "class IsosurfaceHelper(nn.Module):\n points_range: Tuple[float, float] = (0, 1)\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"N 3\"]:\n raise NotImplementedError" }, { "identifier": "MarchingCubeCPUHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingCubeCPUHelper(IsosurfaceHelper):\n def __init__(self, resolution: int) -> None:\n super().__init__()\n self.resolution = resolution\n import mcubes\n\n self.mc_func: Callable = mcubes.marching_cubes\n self._grid_vertices: Optional[Float[Tensor, \"N3 3\"]] = None\n self._dummy: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_dummy\", torch.zeros(0, dtype=torch.float32), persistent=False\n )\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"N3 3\"]:\n if self._grid_vertices is None:\n # keep the vertices on CPU so that we can support very large resolution\n x, y, z = (\n torch.linspace(*self.points_range, self.resolution),\n torch.linspace(*self.points_range, self.resolution),\n torch.linspace(*self.points_range, self.resolution),\n )\n x, y, z = torch.meshgrid(x, y, z, indexing=\"ij\")\n verts = torch.cat(\n [x.reshape(-1, 1), y.reshape(-1, 1), z.reshape(-1, 1)], dim=-1\n ).reshape(-1, 3)\n self._grid_vertices = verts\n return self._grid_vertices\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support deformation. Ignoring.\"\n )\n level = -level.view(self.resolution, self.resolution, self.resolution)\n v_pos, t_pos_idx = self.mc_func(\n level.detach().cpu().numpy(), 0.0\n ) # transform to numpy\n v_pos, t_pos_idx = (\n torch.from_numpy(v_pos).float().to(self._dummy.device),\n torch.from_numpy(t_pos_idx.astype(np.int64)).long().to(self._dummy.device),\n ) # transform back to torch tensor on CUDA\n v_pos = v_pos / (self.resolution - 1.0)\n return Mesh(v_pos=v_pos, t_pos_idx=t_pos_idx)" }, { "identifier": "MarchingTetrahedraHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh" }, { "identifier": "Mesh", "path": "threestudio/models/mesh.py", "snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n # t_nrm_idx is always the same as t_pos_idx\n vn_idx[i] = self.t_pos_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.detach().cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n atlas.generate(co, po)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss" }, { "identifier": "BaseModule", "path": "threestudio/utils/base.py", "snippet": "class BaseModule(nn.Module, Updateable):\n @dataclass\n class Config:\n weights: Optional[str] = None\n\n cfg: Config # add this to every subclass of BaseModule to enable static type checking\n\n def __init__(\n self, cfg: Optional[Union[dict, DictConfig]] = None, *args, **kwargs\n ) -> None:\n super().__init__()\n self.cfg = parse_structured(self.Config, cfg)\n self.device = get_device()\n self.configure(*args, **kwargs)\n if self.cfg.weights is not None:\n # format: path/to/weights:module_name\n weights_path, module_name = self.cfg.weights.split(\":\")\n state_dict, epoch, global_step = load_module_weights(\n weights_path, module_name=module_name, map_location=\"cpu\"\n )\n self.load_state_dict(state_dict)\n self.do_update_step(\n epoch, global_step, on_load_weights=True\n ) # restore states\n # dummy tensor to indicate model state\n self._dummy: Float[Tensor, \"...\"]\n self.register_buffer(\"_dummy\", torch.zeros(0).float(), persistent=False)\n\n def configure(self, *args, **kwargs) -> None:\n pass" }, { "identifier": "chunk_batch", "path": "threestudio/utils/ops.py", "snippet": "def chunk_batch(func: Callable, chunk_size: int, *args, **kwargs) -> Any:\n if chunk_size <= 0:\n return func(*args, **kwargs)\n B = None\n for arg in list(args) + list(kwargs.values()):\n if isinstance(arg, torch.Tensor):\n B = arg.shape[0]\n break\n assert (\n B is not None\n ), \"No tensor found in args or kwargs, cannot determine batch size.\"\n out = defaultdict(list)\n out_type = None\n # max(1, B) to support B == 0\n for i in range(0, max(1, B), chunk_size):\n out_chunk = func(\n *[\n arg[i : i + chunk_size] if isinstance(arg, torch.Tensor) else arg\n for arg in args\n ],\n **{\n k: arg[i : i + chunk_size] if isinstance(arg, torch.Tensor) else arg\n for k, arg in kwargs.items()\n },\n )\n if out_chunk is None:\n continue\n out_type = type(out_chunk)\n if isinstance(out_chunk, torch.Tensor):\n out_chunk = {0: out_chunk}\n elif isinstance(out_chunk, tuple) or isinstance(out_chunk, list):\n chunk_length = len(out_chunk)\n out_chunk = {i: chunk for i, chunk in enumerate(out_chunk)}\n elif isinstance(out_chunk, dict):\n pass\n else:\n print(\n f\"Return value of func must be in type [torch.Tensor, list, tuple, dict], get {type(out_chunk)}.\"\n )\n exit(1)\n for k, v in out_chunk.items():\n v = v if torch.is_grad_enabled() else v.detach()\n out[k].append(v)\n\n if out_type is None:\n return None\n\n out_merged: Dict[Any, Optional[torch.Tensor]] = {}\n for k, v in out.items():\n if all([vv is None for vv in v]):\n # allow None in return value\n out_merged[k] = None\n elif all([isinstance(vv, torch.Tensor) for vv in v]):\n out_merged[k] = torch.cat(v, dim=0)\n else:\n raise TypeError(\n f\"Unsupported types in return value of func: {[type(vv) for vv in v if not isinstance(vv, torch.Tensor)]}\"\n )\n\n if out_type is torch.Tensor:\n return out_merged[0]\n elif out_type in [tuple, list]:\n return out_type([out_merged[i] for i in range(chunk_length)])\n elif out_type is dict:\n return out_merged" }, { "identifier": "scale_tensor", "path": "threestudio/utils/ops.py", "snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat" } ]
from dataclasses import dataclass, field from threestudio.models.isosurface import ( IsosurfaceHelper, MarchingCubeCPUHelper, MarchingTetrahedraHelper, ) from threestudio.models.mesh import Mesh from threestudio.utils.base import BaseModule from threestudio.utils.ops import chunk_batch, scale_tensor from threestudio.utils.typing import * import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio
7,648
def contract_to_unisphere( x: Float[Tensor, "... 3"], bbox: Float[Tensor, "2 3"], unbounded: bool = False ) -> Float[Tensor, "... 3"]: if unbounded: # import pdb # pdb.set_trace() x = scale_tensor(x, bbox, (0, 1)) x = x * 2 - 1 # aabb is at [-1, 1] mag = x.norm(dim=-1, keepdim=True) mask = mag.squeeze(-1) > 1 x = x.clone() x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask]) x = x / 4 + 0.5 # [-inf, inf] is at [0, 1] else: x = scale_tensor(x, bbox, (0, 1)) return x class BaseGeometry(BaseModule): @dataclass class Config(BaseModule.Config): pass cfg: Config @staticmethod def create_from( other: "BaseGeometry", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs ) -> "BaseGeometry": raise TypeError( f"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}" ) def export(self, *args, **kwargs) -> Dict[str, Any]: return {} class BaseImplicitGeometry(BaseGeometry): @dataclass class Config(BaseGeometry.Config): radius: float = 1.0 isosurface: bool = True isosurface_method: str = "mt" isosurface_resolution: int = 128 isosurface_threshold: Union[float, str] = 0.0 isosurface_chunk: int = 0 isosurface_coarse_to_fine: bool = True isosurface_deformable_grid: bool = False isosurface_remove_outliers: bool = True isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 cfg: Config def configure(self) -> None: self.bbox: Float[Tensor, "2 3"] self.register_buffer( "bbox", torch.as_tensor( [ [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius], [self.cfg.radius, self.cfg.radius, self.cfg.radius], ], dtype=torch.float32, ), ) self.isosurface_helper: Optional[IsosurfaceHelper] = None self.unbounded: bool = True def _initilize_isosurface_helper(self): if self.cfg.isosurface and self.isosurface_helper is None: if self.cfg.isosurface_method == "mc-cpu":
def contract_to_unisphere( x: Float[Tensor, "... 3"], bbox: Float[Tensor, "2 3"], unbounded: bool = False ) -> Float[Tensor, "... 3"]: if unbounded: # import pdb # pdb.set_trace() x = scale_tensor(x, bbox, (0, 1)) x = x * 2 - 1 # aabb is at [-1, 1] mag = x.norm(dim=-1, keepdim=True) mask = mag.squeeze(-1) > 1 x = x.clone() x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask]) x = x / 4 + 0.5 # [-inf, inf] is at [0, 1] else: x = scale_tensor(x, bbox, (0, 1)) return x class BaseGeometry(BaseModule): @dataclass class Config(BaseModule.Config): pass cfg: Config @staticmethod def create_from( other: "BaseGeometry", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs ) -> "BaseGeometry": raise TypeError( f"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}" ) def export(self, *args, **kwargs) -> Dict[str, Any]: return {} class BaseImplicitGeometry(BaseGeometry): @dataclass class Config(BaseGeometry.Config): radius: float = 1.0 isosurface: bool = True isosurface_method: str = "mt" isosurface_resolution: int = 128 isosurface_threshold: Union[float, str] = 0.0 isosurface_chunk: int = 0 isosurface_coarse_to_fine: bool = True isosurface_deformable_grid: bool = False isosurface_remove_outliers: bool = True isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 cfg: Config def configure(self) -> None: self.bbox: Float[Tensor, "2 3"] self.register_buffer( "bbox", torch.as_tensor( [ [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius], [self.cfg.radius, self.cfg.radius, self.cfg.radius], ], dtype=torch.float32, ), ) self.isosurface_helper: Optional[IsosurfaceHelper] = None self.unbounded: bool = True def _initilize_isosurface_helper(self): if self.cfg.isosurface and self.isosurface_helper is None: if self.cfg.isosurface_method == "mc-cpu":
self.isosurface_helper = MarchingCubeCPUHelper(
1
2023-10-24 19:02:44+00:00
12k
princeton-nlp/LLM-Shearing
llmshearing/models/composer_pythia.py
[ { "identifier": "L0Module", "path": "llmshearing/models/l0_module.py", "snippet": "class L0Module(nn.Module):\n def __init__(self, cfg, device):\n super(L0Module, self).__init__()\n\n # base and target model info\n n_matrix_mlp = 2 if \"pythia\" in cfg.name else 3\n self.base_model_info = self.set_model_info(cfg, n_matrix_mlp=n_matrix_mlp) \n l0_module_cfg = cfg.l0_module\n self.target_model_info = None\n target_model_cfg = getattr(l0_module_cfg, \"target_model\", None)\n if target_model_cfg is not None:\n self.target_model_info = self.set_model_info(target_model_cfg, n_matrix_mlp=n_matrix_mlp)\n \n # l0 config\n self.pruning_modules = l0_module_cfg.pruning_modules \n self.start_sparsity = l0_module_cfg.start_sparsity \n self.lagrangian_warmup_steps = Time.from_timestring(l0_module_cfg.lagrangian_warmup_steps).value\n self.device = device\n self.eval_target_model = l0_module_cfg.get(\"eval_target_model\", True)\n \n # l0 params\n self.lambdas = {}\n self.lambdas[\"lambda_1\"] = torch.nn.Parameter(torch.tensor(0.0, device=device))\n self.lambdas[\"lambda_2\"] = torch.nn.Parameter(torch.tensor(0.0, device=device))\n self.masks = {}\n for pruning_module in self.pruning_modules:\n self.initialize_one_module(pruning_module)\n self.masks = torch.nn.ModuleDict(self.masks)\n self.lambdas = torch.nn.ParameterDict(self.lambdas)\n \n # config after initialization\n self.prunable_model_size = self.calculate_prunable_model_size(self.base_model_info)\n if target_model_cfg is not None:\n self.prunable_target_model_size = self.calculate_prunable_model_size(self.target_model_info)\n self.target_sparsity = 1 - self.prunable_target_model_size / self.prunable_model_size\n else:\n self.target_sparsity = l0_module_cfg.target_sparsity\n\n print(\"********** Initializing L0 Module **********\") \n for pruning_module in self.pruning_modules:\n print(f\"***** {pruning_module} *****\")\n print(f\"z.shape\", self.masks[pruning_module].z_loga.shape)\n print(f\"size\", self.masks[pruning_module].mask_size)\n print(f\"prunable model size: {self.prunable_model_size}\")\n \n \n def set_model_info(self, cfg, n_matrix_mlp):\n ns = NS() \n ns.hidden_size = cfg.d_model\n ns.intermediate_size = cfg.intermediate_size\n ns.num_attention_heads = cfg.n_heads\n ns.mlp_num_per_layer = 1\n ns.dim_per_head = ns.hidden_size // ns.num_attention_heads \n ns.num_layers = cfg.n_layers\n ns.vocab_size = cfg.vocab_size\n\n ns.params_per_head_layer = ns.hidden_size * ns.hidden_size * 4\n ns.params_per_head = ns.params_per_head_layer // ns.num_attention_heads\n ns.params_per_mlp_layer = ns.hidden_size * ns.intermediate_size * n_matrix_mlp\n ns.params_per_intermediate_dim = ns.params_per_mlp_layer // ns.intermediate_size\n\n ns.full_model_size = (ns.params_per_head_layer + ns.params_per_mlp_layer) * ns.num_layers\n return ns\n \n def calculate_prunable_model_size(self, ns: NS):\n prunable_mlp_size = ns.params_per_mlp_layer * ns.num_layers\n prunable_head_layer_size = ns.params_per_head_layer * ns.num_layers\n prunable_model_size = 0\n if \"hidden\" in self.pruning_modules:\n return prunable_mlp_size + prunable_head_layer_size\n if \"head_layer\" in self.pruning_modules or \"head\" in self.pruning_modules:\n prunable_model_size += prunable_head_layer_size\n if \"mlp\" in self.pruning_modules or \"intermediate\" in self.pruning_modules:\n prunable_model_size += prunable_mlp_size\n return prunable_model_size\n \n def initialize_one_module(self, module_name: str):\n func_name = f\"initialize_{module_name}\"\n try:\n method = getattr(self, func_name)\n except AttributeError:\n raise NotImplementedError(\"Instance `{}` does not implement `{}`\".format(self, func_name))\n method()\n \n def initialize_hidden(self):\n mask_shape = [self.base_model_info.hidden_size]\n num_params_per_mask=self.base_model_info.hidden_size * 4 + self.base_model_info.hidden_size * 4 * 2\n \n target_hidden_sparsity = None; pd=None; target_mask_size=None; \n if self.target_model_info is not None:\n target_hidden_sparsity = 1 - self.target_model_info.hidden_size / self.base_model_info.hidden_size\n target_mask_size = self.target_model_info.hidden_size\n pd = {\"lambda_1_hidden\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_hidden\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n \n hidden_mask = Mask(name=\"hidden\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=[self.base_model_info.hidden_size],\n target_sparsity=target_hidden_sparsity,\n target_mask_size=target_mask_size,\n device=self.device,\n eval_target_model=self.eval_target_model)\n self.masks[\"hidden\"] = hidden_mask\n\n def initialize_head(self):\n mask_shape = [self.base_model_info.num_layers, self.base_model_info.num_attention_heads]\n num_params_per_mask = self.base_model_info.params_per_head\n mask_output_shape = [self.base_model_info.num_layers, 1, self.base_model_info.num_attention_heads, 1] \n \n target_head_sparsity = None; pd = {} ; target_mask_size=None; \n if self.target_model_info is not None:\n target_head_sparsity = 1 - self.target_model_info.num_attention_heads / self.base_model_info.num_attention_heads\n target_mask_size = self.target_model_info.num_attention_heads\n pd = {\"lambda_1_head\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_head\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n head_mask = Mask(name=\"head\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_head_sparsity,\n target_mask_size=target_mask_size,\n device=self.device,\n eval_target_model=self.eval_target_model)\n self.masks[\"head\"] = head_mask \n\n def initialize_qk_head_dim(self): # only campatible when target model info is available\n mask_shape = [self.base_model_info.num_layers, self.base_model_info.num_attention_heads, self.base_model_info.dim_per_head]\n num_params_per_mask = 2 * self.base_model_info.hidden_size\n mask_output_shape = [self.base_model_info.num_layers, self.base_model_info.hidden_size] \n \n target_qk_head_dim_sparsity = None; pd = {} \n if self.target_model_info is not None:\n target_qk_head_dim_sparsity = 1 - self.target_model_info.hidden_size / self.base_model_info.hidden_size\n pd = {\"lambda_1_qk_head_dim\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_qk_head_dim\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n qk_head_dim = Mask(name=\"qk_head_dim\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_qk_head_dim_sparsity,\n target_mask_size=self.target_model_info.hidden_size,\n device=self.device)\n self.masks[\"qk_head_dim\"] = qk_head_dim \n \n \n def initialize_vo_head_dim(self): # only campatible when target model info is available\n mask_shape = [self.base_model_info.num_layers, self.base_model_info.num_attention_heads, self.base_model_info.dim_per_head]\n num_params_per_mask = 2 * self.base_model_info.hidden_size\n mask_output_shape = [self.base_model_info.num_layers, self.base_model_info.hidden_size] \n \n target_vo_head_dim_sparsity = None; pd = {} \n if self.target_model_info is not None:\n target_vo_head_dim_sparsity = 1 - self.target_model_info.hidden_size / self.base_model_info.hidden_size\n pd = {\"lambda_1_vo_head_dim\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_vo_head_dim\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n vo_head_dim = Mask(name=\"vo_head_dim\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_vo_head_dim_sparsity,\n device=self.device)\n self.masks[\"vo_head_dim\"] = vo_head_dim \n \n def initialize_head_layer(self):\n mask_shape = [self.base_model_info.num_layers]\n num_params_per_mask=self.base_model_info.params_per_head * self.base_model_info.num_attention_heads\n mask_output_shape = [self.base_model_info.num_layers] \n \n target_head_layer_sparsity = None; pd = {}; target_mask_size=None; \n if self.target_model_info is not None:\n target_head_layer_sparsity = 1 - self.target_model_info.num_layers / self.base_model_info.num_layers\n target_mask_size = self.target_model_info.num_layers\n pd = {\"lambda_1_head_layer\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_head_layer\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n \n head_layer_mask = Mask(name=\"head_layer\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_head_layer_sparsity,\n target_mask_size=target_mask_size,\n device=self.device,\n eval_target_model=self.eval_target_model)\n self.masks[\"head_layer\"] = head_layer_mask\n \n def initialize_intermediate(self):\n mask_shape = [self.base_model_info.num_layers, self.base_model_info.intermediate_size]\n num_params_per_mask=self.base_model_info.params_per_intermediate_dim\n mask_output_shape = [self.base_model_info.num_layers, 1, 1, self.base_model_info.intermediate_size] \n \n target_int_sparsity = None; pd = {}; target_mask_size=None; \n if self.target_model_info is not None:\n target_int_sparsity = 1 - self.target_model_info.intermediate_size / self.base_model_info.intermediate_size\n target_mask_size = self.target_model_info.intermediate_size\n pd = {\"lambda_1_intermediate\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_intermediate\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n \n int_mask = Mask(name=\"intermediate\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_int_sparsity,\n target_mask_size=target_mask_size,\n device=self.device,\n eval_target_model=self.eval_target_model)\n self.masks[\"intermediate\"] = int_mask\n \n\n def initialize_mlp(self):\n mask_shape = [self.base_model_info.num_layers]\n num_params_per_mask=self.base_model_info.params_per_mlp_layer\n mask_output_shape = [self.base_model_info.num_layers] \n \n target_mlp_sparsity = None; pd = {}; target_mask_size=None; \n if self.target_model_info is not None:\n target_mlp_sparsity = 1 - self.target_model_info.num_layers / self.base_model_info.num_layers\n target_mask_size = self.target_model_info.num_layers\n pd = {\"lambda_1_mlp\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_mlp\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n \n mlp_mask = Mask(name=\"mlp\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_mlp_sparsity,\n target_mask_size=target_mask_size,\n device=self.device,\n eval_target_model=self.eval_target_model)\n self.masks[\"mlp\"] = mlp_mask \n\n def initialize_layer(self):\n mask_shape = [self.base_model_info.num_layers]\n num_params_per_mask=self.base_model_info.params_per_head * self.base_model_info.num_attention_heads + self.base_model_info.params_per_mlp_layer\n mask_output_shape = [self.base_model_info.num_layers] \n \n target_layer_sparsity = None; target_mask_size=None; pd = {}\n if self.target_model_info is not None:\n target_layer_sparsity = 1 - self.target_model_info.num_layers / self.base_model_info.num_layers\n target_mask_size = self.target_model_info.num_layers\n pd = {\"lambda_1_layer\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_layer\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n \n layer_mask = Mask(name=\"layer\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_layer_sparsity,\n target_mask_size=target_mask_size,\n device=self.device,\n eval_target_model=self.eval_target_model) \n self.masks[\"layer\"] = layer_mask \n \n def constrain_parameters(self):\n for key in self.masks:\n self.masks[key].constrain_parameters()\n\n def calculate_expected_score_sparsity(self):\n expected_scores = {}\n expected_sparsitys = {}\n for key in self.masks:\n score, sparsity = self.masks[key].calculate_expected_score_sparsity()\n expected_scores[key] = score\n expected_sparsitys[key] = sparsity\n return expected_scores, expected_sparsitys\n \n def transform_scores_for_head(self, expected_scores: dict):\n head_score = expected_scores[\"head\"] # 12 * 12\n\n head_layer_score = None\n if \"head_layer\" in expected_scores:\n head_layer_score = expected_scores[\"head_layer\"]\n elif \"layer\" in expected_scores:\n head_layer_score = expected_scores[\"layer\"] # 12\n if head_layer_score is not None:\n head_layer_score = head_layer_score.view(-1, 1) # 12 * 1\n \n return head_layer_score, head_score\n\n def transform_scores_for_mlp(self, expected_scores: dict):\n mlp_score = None\n if \"mlp\" in expected_scores:\n mlp_score = expected_scores[\"mlp\"] # 12\n elif \"layer\" in expected_scores:\n mlp_score = expected_scores[\"layer\"] # 12\n if mlp_score is not None:\n mlp_score = mlp_score.unsqueeze(-1)\n \n intermediate_score = expected_scores[\"intermediate\"] # 12 * 3072\n return mlp_score, intermediate_score\n\n\n def get_expected_num_params(self, expected_scores: dict): #! calculate the current parsity\n num_parameters = 0\n \n # 12 * 1 \n # 12 * 12\n head_layer_score, head_score = self.transform_scores_for_head(expected_scores)\n mlp_score, int_score = self.transform_scores_for_mlp(expected_scores)\n \n head_score = (head_layer_score * head_score) # 12 * 12\n int_score = (mlp_score * int_score) # 12 * 3072\n\n qk_score = None\n if \"qk_head_dim\" in expected_scores:\n qk_head_dim_score = expected_scores[\"qk_head_dim\"] # num_layer * hidden_size\n vo_head_dim_score = expected_scores[\"vo_head_dim\"] # num_layer * hidden_size\n qk_head_dim_score = qk_head_dim_score.view(qk_head_dim_score.shape[0], -1) # 12 * 768\n vo_head_dim_score = vo_head_dim_score.view(vo_head_dim_score.shape[0], -1) # 12 * 768\n head_score = torch.repeat_interleave(head_score, self.base_model_info.dim_per_head, dim=1) # 12 * 768\n\n qk_score = head_score * qk_head_dim_score # 12 * 768\n vo_score = head_score * vo_head_dim_score # 12 * 768\n \n if \"hidden\" in expected_scores:\n hidden_score = expected_scores[\"hidden\"] # 768 \n \n if qk_score is None:\n num_parameters += torch.outer(hidden_score, head_score.reshape(-1)).sum() * self.masks.head.num_params_per_mask / self.base_model_info.hidden_size # 768 * 144\n num_parameters += torch.outer(hidden_score, int_score.reshape(-1)).sum() * self.masks.intermediate.num_params_per_mask / self.base_model_info.hidden_size # 768 * 36864\n else:\n num_parameters += torch.sum(torch.matmul(hidden_score.reshape(1, -1, 1), qk_score.unsqueeze(1))) * 2 # 12 * 768 * 768\n num_parameters += torch.sum(torch.matmul(hidden_score.reshape(1, -1, 1), vo_score.unsqueeze(1))) * 2 # 12 * 768 * 768\n num_parameters += torch.sum(torch.matmul(hidden_score.reshape(1, -1, 1), int_score.unsqueeze(1))) * 3 # 12 * 768 * 3072\n else:\n num_parameters += torch.sum(head_score) * self.masks.head.num_params_per_mask\n num_parameters += torch.sum(int_score) * self.masks.intermediate.num_params_per_mask\n return num_parameters\n \n def get_target_sparsity(self, pruned_steps: int, full_sparsity: float = None):\n target_sparsity = full_sparsity\n if getattr(self, \"lagrangian_warmup_steps\", 0) > 0:\n target_sparsity = (target_sparsity - self.start_sparsity) * min(1, pruned_steps / self.lagrangian_warmup_steps) + self.start_sparsity\n return target_sparsity\n\n\n def lagrangian_regularization(self, pruned_steps: int):\n def _lag_loss(expected_sparsity: torch.tensor, target_sparsity: float, lambda_1: torch.tensor, lambda_2: torch.tensor):\n lagrangian_loss = lambda_1 * (expected_sparsity - target_sparsity) + lambda_2 * (expected_sparsity - target_sparsity) ** 2 \n lagrangian_loss = lagrangian_loss.mean()\n return lagrangian_loss\n\n target_sparsity = self.get_target_sparsity(pruned_steps, self.target_sparsity) \n expected_scores, expected_sparsitys = self.calculate_expected_score_sparsity()\n expected_size = self.get_expected_num_params(expected_scores) #! calculate \\bar s\n expected_sparsity = 1 - expected_size / self.prunable_model_size\n \n return_v = {}\n if self.target_model_info is None:\n lagrangian_loss = _lag_loss(expected_sparsity, target_sparsity, self.lambdas[\"lambda_1\"], self.lambdas[\"lambda_2\"])\n return_v = {\"expected_sparsity\": expected_sparsity.item(), \"target_sparsity\": target_sparsity}\n for key in expected_sparsitys:\n return_v[f\"expected_{key}_sparsity\"] = expected_sparsitys[key].mean().item()\n else:\n lagrangian_loss = 0\n return_v = {}\n for pruning_module in self.pruning_modules:\n ts = self.get_target_sparsity(pruned_steps, self.masks[pruning_module].target_sparsity)\n expected_ts = expected_sparsitys[pruning_module] \n lagrangian_loss += _lag_loss(expected_ts, ts, self.lambdas[f\"lambda_1_{pruning_module}\"], self.lambdas[f\"lambda_2_{pruning_module}\"])\n expected_ts = expected_ts.mean().item()\n return_v.update({\"expected_{}_sparsity\".format(pruning_module): expected_ts, \"target_{}_sparsity\".format(pruning_module): ts})\n return_v[\"expected_sparsity\"] = expected_sparsity.item()\n return_v[\"target_sparsity\"] = target_sparsity\n\n\n # return_v might not matter\n return lagrangian_loss, return_v\n \n def forward(self, calculate_lagrangian: bool = False, pruned_steps: int = 0):\n self.constrain_parameters()\n if calculate_lagrangian:\n return self.lagrangian_regularization(pruned_steps)\n \n zs = {f\"{pruning_module}_z\": [] for pruning_module in self.pruning_modules}\n \n if \"layer\" in self.pruning_modules:\n zs.pop(\"layer_z\")\n zs[\"mlp_z\"] = []\n zs[\"head_layer_z\"] = []\n \n if self.training:\n for pruning_module in self.pruning_modules:\n mask = self.masks[pruning_module]\n z = mask.sample_z()\n zs[f\"{pruning_module}_z\"] = z\n else: # removed layerwise! \n with torch.no_grad():\n for pruning_module in self.pruning_modules:\n mask = self.masks[pruning_module]\n z = mask.deterministic_z()\n zs[f\"{pruning_module}_z\"] = z\n if \"layer_z\" in zs:\n zs[\"mlp_z\"] = zs.pop(\"layer_z\")\n zs[\"head_layer_z\"] = zs[\"mlp_z\"]\n return zs " }, { "identifier": "ComposerMosaicLlama", "path": "llmshearing/models/composer_llama.py", "snippet": "class ComposerMosaicLlama(ComposerModel):\n \"\"\" Llama model with the Composer model interface. \"\"\"\n def __init__(self, cfg):\n super().__init__()\n self.model = LlamaModel(cfg)\n self.ref_model = None\n self.num_fwd_flops = self._compute_num_fwd_flops()\n self.train_metrics = {\n 'LanguageCrossEntropy': LanguageCrossEntropy(),\n 'Perplexity': LanguagePerplexity(),\n }\n self.eval_metrics = {\n 'LanguageCrossEntropy': LanguageCrossEntropy(),\n 'Perplexity': LanguagePerplexity(),\n }\n\n self.set_names = getattr(cfg, \"set_names\", None)\n if self.set_names is not None:\n self.set_name_to_id = {set_name: i for i, set_name in enumerate(self.set_names)}\n self.set_id_to_name = {i: set_name for i, set_name in enumerate(self.set_names)}\n \n for set_name in self.set_names:\n # add train and eval metrics for each set\n self.train_metrics[f'{set_name}_LanguageCrossEntropy'] = DomainLanguageCrossEntropy(set_name=set_name)\n self.eval_metrics[f'{set_name}_LanguageCrossEntropy'] = DomainLanguageCrossEntropy(set_name=set_name)\n self.train_metrics[f'{set_name}_count'] = DomainCount(set_name=set_name, set_index=self.set_name_to_id[set_name]) \n\n def prune_params(self, zs=None):\n self.model.prune_params(zs)\n \n def get_targets(self, batch):\n targets = torch.roll(batch['labels'], shifts=-1)\n targets[:, -1] = -100\n return targets\n \n def forward(self, batch):\n input_ids = batch['input_ids']\n key_padding_mask = batch['attention_mask'].bool(\n ) if 'attention_mask' in batch else None\n pruned_steps = batch.get('pruned_steps', None)\n if pruned_steps is not None:\n pruned_steps = pruned_steps[0].item()\n zs = {key: batch[key] for key in batch if \"_z\" in key}\n model_output = self.model(input_ids=input_ids, key_padding_mask=key_padding_mask, pruned_steps=pruned_steps, **zs)\n return model_output\n\n def eval_forward(self, batch, outputs=None):\n return outputs if outputs is not None else self.forward(batch)\n\n def loss(self, outputs, batch):\n logits = outputs[\"logits\"]\n l0_output = outputs[\"l0_output\"]\n targets = self.get_targets(batch)\n\n loss = F.cross_entropy(logits.view(-1, logits.size(-1)),\n targets.view(-1),\n ignore_index=-100)\n return_loss = {\"ce_loss\": loss}\n if l0_output is not None:\n lag_loss = l0_output[0]\n return_loss[\"lag_loss\"] = lag_loss\n return_loss[\"total\"] = sum(return_loss.values())\n return return_loss\n\n def get_metrics(self, is_train=False):\n return self.train_metrics if is_train else self.eval_metrics\n\n def update_metric(self, batch, outputs, metric) -> None:\n logits = outputs[\"logits\"]\n if isinstance(metric, DomainLanguageCrossEntropy):\n targets = self.get_targets(batch)\n set_id = self.set_name_to_id[metric.set_name]\n targets[batch[\"set\"] != set_id] = -100\n metric.update(logits, targets)\n elif isinstance(metric, DomainCount):\n with torch.inference_mode():\n idx = None\n selected_sets = batch['set']\n metric.update(selected_sets, idx)\n else:\n logits = logits.view(-1, logits.size(-1))\n targets = self.get_targets(batch).view(-1)\n metric.update(logits, targets)\n\n def add_eval_metrics(self, evaluator):\n evaluator_metrics = {\n m: METRIC_DEFAULT_CTORS[m]() for m in evaluator.metric_names\n }\n if self.eval_metrics is not None:\n self.eval_metrics.update(evaluator_metrics)\n else:\n self.eval_metrics = evaluator_metrics\n\n def _compute_num_fwd_flops(self):\n # Might not be correct for LLaMA structures\n n_params = sum(p.numel() for p in self.parameters())\n # the number of paramters is approximately the number of multiply-accumulates (MAC) in the network\n # each MAC has 2 FLOPs - we multiply by 2 ie 2 * n_param\n # this gets us FLOPs / token\n params_flops_per_token = 2 * n_params\n params_flops_per_seq = params_flops_per_token * self.model.cfg.max_seq_len\n # there are 2 FLOPS per mac; there is A=Q*K^T and out=A*V ops (ie mult by 2)\n attn_flops_per_seq = self.model.cfg.n_layers * 2 * 2 * (\n self.model.cfg.d_model * (self.model.cfg.max_seq_len**2))\n return params_flops_per_seq + attn_flops_per_seq\n\n def flops_per_batch(self, batch):\n # Note: this computation does not take into account padding, and assumes\n # that the dataset has been constructed without padding. Additionally, we\n # assume the backward pass is approximately 2x the forward pass\n return self.num_fwd_flops * 3 * batch['input_ids'].shape[0]\n\n def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding:\n if new_num_tokens is not None:\n self.model._resize_token_embeddings(new_num_tokens)" }, { "identifier": "prepare_decoder_attention_mask", "path": "llmshearing/models/composer_llama.py", "snippet": "def prepare_decoder_attention_mask(input_shape, inputs_embeds):\n # create causal mask\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n combined_attention_mask = None\n if input_shape[-1] > 1:\n combined_attention_mask = _make_causal_mask(input_shape, inputs_embeds.dtype).to(inputs_embeds.device)\n\n return combined_attention_mask" }, { "identifier": "turn_head_z", "path": "llmshearing/models/composer_llama.py", "snippet": "def turn_head_z(head_z, head_layer_z):\n head_z = head_z.squeeze().clone()\n if head_layer_z is not None:\n head_z *= head_layer_z\n to_prune_heads = torch.where(head_z == 0)[0].view(-1).tolist()\n return to_prune_heads" }, { "identifier": "turn_mlp_z", "path": "llmshearing/models/composer_llama.py", "snippet": "def turn_mlp_z(intermediate_z, mlp_z):\n intermediate_z_layer = intermediate_z.squeeze().clone()\n if mlp_z is not None:\n intermediate_z_layer *= mlp_z\n keep_intermediate_dims = torch.where(intermediate_z_layer != 0)[0].tolist()\n return keep_intermediate_dims " }, { "identifier": "normal_attn_fn", "path": "llmshearing/models/composer_llama.py", "snippet": "def normal_attn_fn(\n query,\n key, \n value,\n attention_mask=None,\n head_z=None\n):\n bsz, n_heads, q_len, head_dim = query.shape\n dim = n_heads * head_dim\n attn_weights = torch.matmul(query, key.transpose(2, 3)) / math.sqrt(head_dim)\n attn_weights = attn_weights + attention_mask\n attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min))\n\n # upcast attention to fp32\n attn_weights = torch.nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)\n attn_output = torch.matmul(attn_weights, value) # (bsz, n_heads, q_len, head_dim)\n if head_z is not None:\n attn_output *= head_z.unsqueeze(-1)\n attn_output = attn_output.transpose(1, 2)\n attn_output = attn_output.reshape(bsz, q_len, dim)\n return attn_output" }, { "identifier": "flash_attn_fn", "path": "llmshearing/models/composer_llama.py", "snippet": "def flash_attn_fn(\n query,\n key,\n value,\n softmax_scale=None,\n attn_bias=None,\n query_padding_mask=None,\n key_padding_mask=None,\n is_causal=False,\n dropout_p=0.0,\n training=False,\n needs_weights=False,\n head_z=None,\n \n):\n try:\n from flash_attn import bert_padding # type: ignore\n from flash_attn import flash_attn_interface # type: ignore\n except ImportError as e:\n raise e\n\n # check_valid_inputs(query, key, value)\n\n if attn_bias is not None:\n raise NotImplementedError(f'attn_bias not implemented for flash attn.')\n\n batch_size, seqlen = query.shape[:2]\n\n if query_padding_mask is None:\n query_padding_mask = torch.ones((batch_size, seqlen), dtype=torch.bool, device=query.device)\n if key_padding_mask is None:\n key_padding_mask = torch.ones((batch_size, seqlen), dtype=torch.bool, device=key.device)\n\n query_unpad, indices_q, cu_seqlens_q, max_seqlen_q = bert_padding.unpad_input(\n query, query_padding_mask)\n # query_unpad = rearrange(query_unpad, 'nnz (h d) -> nnz h d', h=n_heads)\n\n key_unpad, _, cu_seqlens_k, max_seqlen_k = bert_padding.unpad_input(\n key, key_padding_mask)\n # key_unpad = rearrange(key_unpad, 'nnz (h d) -> nnz h d', h=n_heads)\n\n value_unpad, _, _, _ = bert_padding.unpad_input(value, key_padding_mask)\n # value_unpad = rearrange(value_unpad, 'nnz (h d) -> nnz h d', h=n_heads)\n\n dropout_p = dropout_p if training else 0.0\n \n output_unpad = flash_attn_interface.flash_attn_unpadded_func(\n query_unpad,\n key_unpad,\n value_unpad,\n cu_seqlens_q,\n cu_seqlens_k,\n max_seqlen_q,\n max_seqlen_k,\n dropout_p,\n softmax_scale=softmax_scale,\n causal=is_causal,\n return_attn_probs=needs_weights)\n\n if head_z is not None:\n output_unpad = output_unpad * head_z # 1 * h * 1\n output = bert_padding.pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'), indices_q, batch_size, seqlen)\n return output, None" } ]
import math import torch import torch.nn as nn from typing import List, Optional, Tuple from einops import rearrange from omegaconf import DictConfig from torch.nn import functional as F from transformers.pytorch_utils import (find_pruneable_heads_and_indices, prune_linear_layer) from llmshearing.models.l0_module import L0Module from llmshearing.models.composer_llama import ComposerMosaicLlama, prepare_decoder_attention_mask, turn_head_z, turn_mlp_z, normal_attn_fn, flash_attn_fn from transformers.models.gpt_neox.modeling_gpt_neox import apply_rotary_pos_emb
9,275
input, self.normalized_shape, self.weight, self.bias, self.eps) return output def prune_params(self, hidden_z): remaining_index = torch.where(~hidden_z.eq(0))[0] # self.weight = torch.nn.Parameter(self.weight.data.mul(hidden_z.squeeze())[remaining_index]) self.weight = torch.nn.parameter.Parameter(self.weight.index_select(0, remaining_index)) self.bias = torch.nn.parameter.Parameter(self.bias.index_select(0, remaining_index)) self.normalized_shape = (len(remaining_index),) class PythiaEmbedding(nn.Embedding): def forward(self, input, hidden_z=None): embeddings = super().forward(input) if hidden_z is not None: embeddings = embeddings.mul(hidden_z) return embeddings def prune_params(self, hidden_z): remaining_index = torch.where(~hidden_z.eq(0))[0] self.weight.data = self.weight.data.mul(hidden_z) self.weight = torch.nn.parameter.Parameter(self.weight.index_select(1, remaining_index).clone()) self.embedding_dim = len(remaining_index) print(f" Embedding: {len(hidden_z)} -> {len(remaining_index)}") class PythiaModel(nn.Module): def __init__(self, cfg: DictConfig): super().__init__() print(f'Tried to build Pythia model with cfg.name={cfg.name}') self.cfg = cfg ### added ### self.l0_module = None if getattr(self.cfg, "l0_module", None) is not None: self.l0_module = L0Module(self.cfg, device=cfg.init_device) ############# layernorm_class = CoFiLayerNorm self.attn_impl = cfg.attn_impl self.embedding_fraction = cfg.get('embedding_fraction', 1) assert 0 < self.embedding_fraction <= 1, 'model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!' self.transformer = nn.ModuleDict({ "wte": PythiaEmbedding(cfg.vocab_size, cfg.d_model, device=cfg.init_device), }) self.transformer.update({ 'blocks': nn.ModuleList([ PythiaBlock(cfg, device=cfg.init_device) for _ in range(cfg.n_layers) ]) }) self.transformer.update({ "output": nn.Linear(cfg.d_model, cfg.vocab_size, device=cfg.init_device, bias=False), }) self.transformer.update({ "ln_f": layernorm_class(cfg.d_model, eps=cfg.layer_norm_eps, device=cfg.init_device), # TODO: add to config }) self.is_causal = True if cfg.get('verbose') and cfg.get('verbose') > 2: print(self) def prune_params(self, zs=None): # TODO if zs is None: self.l0_module.eval() zs = self.l0_module(calculate_lagrangian=False) # wte as well :) # ln_f if hidden states are to be pruned if "hidden_z" in zs: hidden_z = zs["hidden_z"] remaining_index = torch.where(~hidden_z.eq(0))[0] self.transformer.ln_f.prune_params(hidden_z) self.transformer.wte.weight.data = self.transformer.wte.weight.data.mul(hidden_z) self.transformer.wte.weight = torch.nn.parameter.Parameter( self.transformer.wte.weight.index_select(1, remaining_index).clone()) self.transformer.wte.embedding_dim = len(remaining_index) # self.transformer.output.weight.data = self.transformer.output.weight.data.mul(hidden_z) half = self.transformer.output.weight.data.dtype == torch.float16 self.transformer.output = prune_linear_layer(self.transformer.output, remaining_index, dim=1) if half: self.transformer.output = self.transformer.output.half() for i, block in enumerate(self.transformer.blocks): zs_block = self.get_zs_block(zs, i) block.prune_params(zs_block) def get_zs_block(self, zs, block_idx): zs_block = {} if zs is not None: for key in zs: if key == "hidden_z": zs_block["hidden_z"] = zs["hidden_z"] else: zs_block[key] = zs[key][block_idx] return zs_block def forward( self, input_ids: torch.LongTensor, key_padding_mask: Optional[torch.ByteTensor] = None, past_key_values: Optional[List[Tuple[torch.FloatTensor]]] = None, pruned_steps: int = 0, retain_grad: bool = False, **zs,): S = input_ids.size(1) assert S <= self.cfg.max_seq_len, f"Sequence length ({S}) exceeds model maximum sequence length ({self.cfg.max_seq_len})!" tok_emb = self.transformer.wte(input_ids) if "hidden_z" in zs: tok_emb = tok_emb.mul(zs["hidden_z"]) x = tok_emb attn_bias = None # only consider the flash attention case
class ComposerMosaicPythia(ComposerMosaicLlama): def __init__(self, cfg): super().__init__(cfg) self.model = PythiaModel(cfg) class CoFiLayerNorm(torch.nn.LayerNorm): def __init__(self, normalized_shape, eps: float = 1e-5, elementwise_affine: bool = True, device=None) -> None: super().__init__(normalized_shape, eps, elementwise_affine, device) def forward(self, input, hidden_z=None): if hidden_z is not None: remaining_index = torch.where(~hidden_z.eq(0))[0] compressed_input = torch.index_select( input, dim=-1, index=remaining_index) compressed_weight = self.weight[remaining_index] compressed_bias = self.bias[remaining_index] normalized_shape = len(remaining_index) normed_input = F.layer_norm( compressed_input, [normalized_shape], compressed_weight, compressed_bias, self.eps) output = input.clone() normed_input = normed_input.to(output.dtype) output[..., remaining_index] = normed_input else: output = F.layer_norm( input, self.normalized_shape, self.weight, self.bias, self.eps) return output def prune_params(self, hidden_z): remaining_index = torch.where(~hidden_z.eq(0))[0] # self.weight = torch.nn.Parameter(self.weight.data.mul(hidden_z.squeeze())[remaining_index]) self.weight = torch.nn.parameter.Parameter(self.weight.index_select(0, remaining_index)) self.bias = torch.nn.parameter.Parameter(self.bias.index_select(0, remaining_index)) self.normalized_shape = (len(remaining_index),) class PythiaEmbedding(nn.Embedding): def forward(self, input, hidden_z=None): embeddings = super().forward(input) if hidden_z is not None: embeddings = embeddings.mul(hidden_z) return embeddings def prune_params(self, hidden_z): remaining_index = torch.where(~hidden_z.eq(0))[0] self.weight.data = self.weight.data.mul(hidden_z) self.weight = torch.nn.parameter.Parameter(self.weight.index_select(1, remaining_index).clone()) self.embedding_dim = len(remaining_index) print(f" Embedding: {len(hidden_z)} -> {len(remaining_index)}") class PythiaModel(nn.Module): def __init__(self, cfg: DictConfig): super().__init__() print(f'Tried to build Pythia model with cfg.name={cfg.name}') self.cfg = cfg ### added ### self.l0_module = None if getattr(self.cfg, "l0_module", None) is not None: self.l0_module = L0Module(self.cfg, device=cfg.init_device) ############# layernorm_class = CoFiLayerNorm self.attn_impl = cfg.attn_impl self.embedding_fraction = cfg.get('embedding_fraction', 1) assert 0 < self.embedding_fraction <= 1, 'model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!' self.transformer = nn.ModuleDict({ "wte": PythiaEmbedding(cfg.vocab_size, cfg.d_model, device=cfg.init_device), }) self.transformer.update({ 'blocks': nn.ModuleList([ PythiaBlock(cfg, device=cfg.init_device) for _ in range(cfg.n_layers) ]) }) self.transformer.update({ "output": nn.Linear(cfg.d_model, cfg.vocab_size, device=cfg.init_device, bias=False), }) self.transformer.update({ "ln_f": layernorm_class(cfg.d_model, eps=cfg.layer_norm_eps, device=cfg.init_device), # TODO: add to config }) self.is_causal = True if cfg.get('verbose') and cfg.get('verbose') > 2: print(self) def prune_params(self, zs=None): # TODO if zs is None: self.l0_module.eval() zs = self.l0_module(calculate_lagrangian=False) # wte as well :) # ln_f if hidden states are to be pruned if "hidden_z" in zs: hidden_z = zs["hidden_z"] remaining_index = torch.where(~hidden_z.eq(0))[0] self.transformer.ln_f.prune_params(hidden_z) self.transformer.wte.weight.data = self.transformer.wte.weight.data.mul(hidden_z) self.transformer.wte.weight = torch.nn.parameter.Parameter( self.transformer.wte.weight.index_select(1, remaining_index).clone()) self.transformer.wte.embedding_dim = len(remaining_index) # self.transformer.output.weight.data = self.transformer.output.weight.data.mul(hidden_z) half = self.transformer.output.weight.data.dtype == torch.float16 self.transformer.output = prune_linear_layer(self.transformer.output, remaining_index, dim=1) if half: self.transformer.output = self.transformer.output.half() for i, block in enumerate(self.transformer.blocks): zs_block = self.get_zs_block(zs, i) block.prune_params(zs_block) def get_zs_block(self, zs, block_idx): zs_block = {} if zs is not None: for key in zs: if key == "hidden_z": zs_block["hidden_z"] = zs["hidden_z"] else: zs_block[key] = zs[key][block_idx] return zs_block def forward( self, input_ids: torch.LongTensor, key_padding_mask: Optional[torch.ByteTensor] = None, past_key_values: Optional[List[Tuple[torch.FloatTensor]]] = None, pruned_steps: int = 0, retain_grad: bool = False, **zs,): S = input_ids.size(1) assert S <= self.cfg.max_seq_len, f"Sequence length ({S}) exceeds model maximum sequence length ({self.cfg.max_seq_len})!" tok_emb = self.transformer.wte(input_ids) if "hidden_z" in zs: tok_emb = tok_emb.mul(zs["hidden_z"]) x = tok_emb attn_bias = None # only consider the flash attention case
attention_mask = prepare_decoder_attention_mask((tok_emb.size(0), tok_emb.size(1)), tok_emb)
2
2023-10-16 12:26:08+00:00
12k
hkchengrex/Cutie
cutie/inference/inference_core.py
[ { "identifier": "MemoryManager", "path": "cutie/inference/memory_manager.py", "snippet": "class MemoryManager:\n \"\"\"\n Manages all three memory stores and the transition between working/long-term memory\n \"\"\"\n def __init__(self, cfg: DictConfig, object_manager: ObjectManager):\n self.object_manager = object_manager\n self.sensory_dim = cfg.model.sensory_dim\n self.top_k = cfg.top_k\n self.chunk_size = cfg.chunk_size\n\n self.save_aux = cfg.save_aux\n\n self.use_long_term = cfg.use_long_term\n self.count_long_term_usage = cfg.long_term.count_usage\n # subtract 1 because the first-frame is now counted as \"permanent memory\"\n # and is not counted towards max_mem_frames\n # but we want to keep the hyperparameters consistent as before for the same behavior\n if self.use_long_term:\n self.max_mem_frames = cfg.long_term.max_mem_frames - 1\n self.min_mem_frames = cfg.long_term.min_mem_frames - 1\n self.num_prototypes = cfg.long_term.num_prototypes\n self.max_long_tokens = cfg.long_term.max_num_tokens\n self.buffer_tokens = cfg.long_term.buffer_tokens\n else:\n self.max_mem_frames = cfg.max_mem_frames - 1\n\n # dimensions will be inferred from input later\n self.CK = self.CV = None\n self.H = self.W = None\n\n # The sensory memory is stored as a dictionary indexed by object ids\n # each of shape bs * C^h * H * W\n self.sensory = {}\n\n # a dictionary indexed by object ids, each of shape bs * T * Q * C\n self.obj_v = {}\n\n self.work_mem = KeyValueMemoryStore(save_selection=self.use_long_term,\n save_usage=self.use_long_term)\n if self.use_long_term:\n self.long_mem = KeyValueMemoryStore(save_usage=self.count_long_term_usage)\n\n self.config_stale = True\n self.engaged = False\n\n def update_config(self, cfg: DictConfig) -> None:\n self.config_stale = True\n self.top_k = cfg['top_k']\n\n assert self.use_long_term == cfg.use_long_term, 'cannot update this'\n assert self.count_long_term_usage == cfg.long_term.count_usage, 'cannot update this'\n\n self.use_long_term = cfg.use_long_term\n self.count_long_term_usage = cfg.long_term.count_usage\n if self.use_long_term:\n self.max_mem_frames = cfg.long_term.max_mem_frames - 1\n self.min_mem_frames = cfg.long_term.min_mem_frames - 1\n self.num_prototypes = cfg.long_term.num_prototypes\n self.max_long_tokens = cfg.long_term.max_num_tokens\n self.buffer_tokens = cfg.long_term.buffer_tokens\n else:\n self.max_mem_frames = cfg.max_mem_frames - 1\n\n def _readout(self, affinity, v) -> torch.Tensor:\n # affinity: bs*N*HW\n # v: bs*C*N or bs*num_objects*C*N\n # returns bs*C*HW or bs*num_objects*C*HW\n if len(v.shape) == 3:\n # single object\n return v @ affinity\n else:\n bs, num_objects, C, N = v.shape\n v = v.view(bs, num_objects * C, N)\n out = v @ affinity\n return out.view(bs, num_objects, C, -1)\n\n def _get_mask_by_ids(self, mask: torch.Tensor, obj_ids: List[int]) -> torch.Tensor:\n # -1 because the mask does not contain the background channel\n return mask[:, [self.object_manager.find_tmp_by_id(obj) - 1 for obj in obj_ids]]\n\n def _get_sensory_by_ids(self, obj_ids: List[int]) -> torch.Tensor:\n return torch.stack([self.sensory[obj] for obj in obj_ids], dim=1)\n\n def _get_object_mem_by_ids(self, obj_ids: List[int]) -> torch.Tensor:\n return torch.stack([self.obj_v[obj] for obj in obj_ids], dim=1)\n\n def _get_visual_values_by_ids(self, obj_ids: List[int]) -> torch.Tensor:\n # All the values that the object ids refer to should have the same shape\n value = torch.stack([self.work_mem.value[obj] for obj in obj_ids], dim=1)\n if self.use_long_term and obj_ids[0] in self.long_mem.value:\n lt_value = torch.stack([self.long_mem.value[obj] for obj in obj_ids], dim=1)\n value = torch.cat([lt_value, value], dim=-1)\n\n return value\n\n def read(self, pix_feat: torch.Tensor, query_key: torch.Tensor, selection: torch.Tensor,\n last_mask: torch.Tensor, network: CUTIE) -> Dict[int, torch.Tensor]:\n \"\"\"\n Read from all memory stores and returns a single memory readout tensor for each object\n\n pix_feat: (1/2) x C x H x W\n query_key: (1/2) x C^k x H x W\n selection: (1/2) x C^k x H x W\n last_mask: (1/2) x num_objects x H x W (at stride 16)\n return a dict of memory readouts, indexed by object indices. Each readout is C*H*W\n \"\"\"\n h, w = pix_feat.shape[-2:]\n bs = pix_feat.shape[0]\n assert query_key.shape[0] == bs\n assert selection.shape[0] == bs\n assert last_mask.shape[0] == bs\n\n query_key = query_key.flatten(start_dim=2) # bs*C^k*HW\n selection = selection.flatten(start_dim=2) # bs*C^k*HW\n \"\"\"\n Compute affinity and perform readout\n \"\"\"\n all_readout_mem = {}\n buckets = self.work_mem.buckets\n for bucket_id, bucket in buckets.items():\n if self.use_long_term and self.long_mem.engaged(bucket_id):\n # Use long-term memory\n long_mem_size = self.long_mem.size(bucket_id)\n memory_key = torch.cat([self.long_mem.key[bucket_id], self.work_mem.key[bucket_id]],\n -1)\n shrinkage = torch.cat(\n [self.long_mem.shrinkage[bucket_id], self.work_mem.shrinkage[bucket_id]], -1)\n\n similarity = get_similarity(memory_key, shrinkage, query_key, selection)\n affinity, usage = do_softmax(similarity,\n top_k=self.top_k,\n inplace=True,\n return_usage=True)\n \"\"\"\n Record memory usage for working and long-term memory\n \"\"\"\n # ignore the index return for long-term memory\n work_usage = usage[:, long_mem_size:]\n self.work_mem.update_bucket_usage(bucket_id, work_usage)\n\n if self.count_long_term_usage:\n # ignore the index return for working memory\n long_usage = usage[:, :long_mem_size]\n self.long_mem.update_bucket_usage(bucket_id, long_usage)\n else:\n # no long-term memory\n memory_key = self.work_mem.key[bucket_id]\n shrinkage = self.work_mem.shrinkage[bucket_id]\n similarity = get_similarity(memory_key, shrinkage, query_key, selection)\n\n if self.use_long_term:\n affinity, usage = do_softmax(similarity,\n top_k=self.top_k,\n inplace=True,\n return_usage=True)\n self.work_mem.update_bucket_usage(bucket_id, usage)\n else:\n affinity = do_softmax(similarity, top_k=self.top_k, inplace=True)\n\n if self.chunk_size < 1:\n object_chunks = [bucket]\n else:\n object_chunks = [\n bucket[i:i + self.chunk_size] for i in range(0, len(bucket), self.chunk_size)\n ]\n\n for objects in object_chunks:\n this_sensory = self._get_sensory_by_ids(objects)\n this_last_mask = self._get_mask_by_ids(last_mask, objects)\n this_msk_value = self._get_visual_values_by_ids(objects) # (1/2)*num_objects*C*N\n visual_readout = self._readout(affinity,\n this_msk_value).view(bs, len(objects), self.CV, h, w)\n pixel_readout = network.pixel_fusion(pix_feat, visual_readout, this_sensory,\n this_last_mask)\n this_obj_mem = self._get_object_mem_by_ids(objects).unsqueeze(2)\n readout_memory, aux_features = network.readout_query(pixel_readout, this_obj_mem)\n for i, obj in enumerate(objects):\n all_readout_mem[obj] = readout_memory[:, i]\n\n if self.save_aux:\n aux_output = {\n 'sensory': this_sensory,\n 'pixel_readout': pixel_readout,\n 'q_logits': aux_features['logits'] if aux_features else None,\n 'q_weights': aux_features['q_weights'] if aux_features else None,\n 'p_weights': aux_features['p_weights'] if aux_features else None,\n 'attn_mask': aux_features['attn_mask'].float() if aux_features else None,\n }\n self.aux = aux_output\n\n return all_readout_mem\n\n def add_memory(self,\n key: torch.Tensor,\n shrinkage: torch.Tensor,\n msk_value: torch.Tensor,\n obj_value: torch.Tensor,\n objects: List[int],\n selection: torch.Tensor = None,\n *,\n as_permanent: bool = False) -> None:\n # key: (1/2)*C*H*W\n # msk_value: (1/2)*num_objects*C*H*W\n # obj_value: (1/2)*num_objects*Q*C\n # objects contains a list of object ids corresponding to the objects in msk_value/obj_value\n bs = key.shape[0]\n assert shrinkage.shape[0] == bs\n assert msk_value.shape[0] == bs\n assert obj_value.shape[0] == bs\n\n self.engaged = True\n if self.H is None or self.config_stale:\n self.config_stale = False\n self.H, self.W = msk_value.shape[-2:]\n self.HW = self.H * self.W\n # convert from num. frames to num. tokens\n self.max_work_tokens = self.max_mem_frames * self.HW\n if self.use_long_term:\n self.min_work_tokens = self.min_mem_frames * self.HW\n\n # key: bs*C*N\n # value: bs*num_objects*C*N\n key = key.flatten(start_dim=2)\n shrinkage = shrinkage.flatten(start_dim=2)\n self.CK = key.shape[1]\n\n msk_value = msk_value.flatten(start_dim=3)\n self.CV = msk_value.shape[2]\n\n if selection is not None:\n # not used in non-long-term mode\n selection = selection.flatten(start_dim=2)\n\n # insert object values into object memory\n for obj_id, obj in enumerate(objects):\n if obj in self.obj_v:\n \"\"\"streaming average\n each self.obj_v[obj] is (1/2)*num_summaries*(embed_dim+1)\n first embed_dim keeps track of the sum of embeddings\n the last dim keeps the total count\n averaging in done inside the object transformer\n\n incoming obj_value is (1/2)*num_objects*num_summaries*(embed_dim+1)\n self.obj_v[obj] = torch.cat([self.obj_v[obj], obj_value[:, obj_id]], dim=0)\n \"\"\"\n last_acc = self.obj_v[obj][:, :, -1]\n new_acc = last_acc + obj_value[:, obj_id, :, -1]\n\n self.obj_v[obj][:, :, :-1] = (self.obj_v[obj][:, :, :-1] +\n obj_value[:, obj_id, :, :-1])\n self.obj_v[obj][:, :, -1] = new_acc\n else:\n self.obj_v[obj] = obj_value[:, obj_id]\n\n # convert mask value tensor into a dict for insertion\n msk_values = {obj: msk_value[:, obj_id] for obj_id, obj in enumerate(objects)}\n self.work_mem.add(key,\n msk_values,\n shrinkage,\n selection=selection,\n as_permanent=as_permanent)\n\n for bucket_id in self.work_mem.buckets.keys():\n # long-term memory cleanup\n if self.use_long_term:\n # Do memory compressed if needed\n if self.work_mem.non_perm_size(bucket_id) >= self.max_work_tokens:\n # Remove obsolete features if needed\n if self.long_mem.non_perm_size(bucket_id) >= (self.max_long_tokens -\n self.num_prototypes):\n self.long_mem.remove_obsolete_features(\n bucket_id,\n self.max_long_tokens - self.num_prototypes - self.buffer_tokens)\n\n self.compress_features(bucket_id)\n else:\n # FIFO\n self.work_mem.remove_old_memory(bucket_id, self.max_work_tokens)\n\n def purge_except(self, obj_keep_idx: List[int]) -> None:\n # purge certain objects from the memory except the one listed\n self.work_mem.purge_except(obj_keep_idx)\n if self.use_long_term and self.long_mem.engaged():\n self.long_mem.purge_except(obj_keep_idx)\n self.sensory = {k: v for k, v in self.sensory.items() if k in obj_keep_idx}\n\n if not self.work_mem.engaged():\n # everything is removed!\n self.engaged = False\n\n def compress_features(self, bucket_id: int) -> None:\n HW = self.HW\n\n # perform memory consolidation\n prototype_key, prototype_value, prototype_shrinkage = self.consolidation(\n *self.work_mem.get_all_sliced(bucket_id, 0, -self.min_work_tokens))\n\n # remove consolidated working memory\n self.work_mem.sieve_by_range(bucket_id,\n 0,\n -self.min_work_tokens,\n min_size=self.min_work_tokens)\n\n # add to long-term memory\n self.long_mem.add(prototype_key,\n prototype_value,\n prototype_shrinkage,\n selection=None,\n supposed_bucket_id=bucket_id)\n\n def consolidation(self, candidate_key: torch.Tensor, candidate_shrinkage: torch.Tensor,\n candidate_selection: torch.Tensor, candidate_value: Dict[int, torch.Tensor],\n usage: torch.Tensor) -> (torch.Tensor, Dict[int, torch.Tensor], torch.Tensor):\n # find the indices with max usage\n bs = candidate_key.shape[0]\n assert bs in [1, 2]\n\n prototype_key = []\n prototype_selection = []\n for bi in range(bs):\n _, max_usage_indices = torch.topk(usage[bi], k=self.num_prototypes, dim=-1, sorted=True)\n prototype_indices = max_usage_indices.flatten()\n prototype_key.append(candidate_key[bi, :, prototype_indices])\n prototype_selection.append(candidate_selection[bi, :, prototype_indices])\n prototype_key = torch.stack(prototype_key, dim=0)\n prototype_selection = torch.stack(prototype_selection, dim=0)\n \"\"\"\n Potentiation step\n \"\"\"\n similarity = get_similarity(candidate_key, candidate_shrinkage, prototype_key,\n prototype_selection)\n affinity = do_softmax(similarity)\n\n # readout the values\n prototype_value = {k: self._readout(affinity, v) for k, v in candidate_value.items()}\n\n # readout the shrinkage term\n prototype_shrinkage = self._readout(affinity, candidate_shrinkage)\n\n return prototype_key, prototype_value, prototype_shrinkage\n\n def initialize_sensory_if_needed(self, sample_key: torch.Tensor, ids: List[int]):\n for obj in ids:\n if obj not in self.sensory:\n # also initializes the sensory memory\n bs, _, h, w = sample_key.shape\n self.sensory[obj] = torch.zeros((bs, self.sensory_dim, h, w),\n device=sample_key.device)\n\n def update_sensory(self, sensory: torch.Tensor, ids: List[int]):\n # sensory: 1*num_objects*C*H*W\n for obj_id, obj in enumerate(ids):\n self.sensory[obj] = sensory[:, obj_id]\n\n def get_sensory(self, ids: List[int]):\n # returns (1/2)*num_objects*C*H*W\n return self._get_sensory_by_ids(ids)\n \n def clear_non_permanent_memory(self):\n self.work_mem.clear_non_permanent_memory()\n if self.use_long_term:\n self.long_mem.clear_non_permanent_memory()\n\n def clear_sensory_memory(self):\n self.sensory = {}" }, { "identifier": "ObjectManager", "path": "cutie/inference/object_manager.py", "snippet": "class ObjectManager:\n \"\"\"\n Object IDs are immutable. The same ID always represent the same object.\n Temporary IDs are the positions of each object in the tensor. It changes as objects get removed.\n Temporary IDs start from 1.\n \"\"\"\n def __init__(self):\n self.obj_to_tmp_id: Dict[ObjectInfo, int] = {}\n self.tmp_id_to_obj: Dict[int, ObjectInfo] = {}\n self.obj_id_to_obj: Dict[int, ObjectInfo] = {}\n\n self.all_historical_object_ids: List[int] = []\n\n def _recompute_obj_id_to_obj_mapping(self) -> None:\n self.obj_id_to_obj = {obj.id: obj for obj in self.obj_to_tmp_id}\n\n def add_new_objects(\n self, objects: Union[List[ObjectInfo], ObjectInfo,\n List[int]]) -> (List[int], List[int]):\n if not isinstance(objects, list):\n objects = [objects]\n\n corresponding_tmp_ids = []\n corresponding_obj_ids = []\n for obj in objects:\n if isinstance(obj, int):\n obj = ObjectInfo(id=obj)\n\n if obj in self.obj_to_tmp_id:\n # old object\n corresponding_tmp_ids.append(self.obj_to_tmp_id[obj])\n corresponding_obj_ids.append(obj.id)\n else:\n # new object\n new_obj = ObjectInfo(id=obj.id)\n\n # new object\n new_tmp_id = len(self.obj_to_tmp_id) + 1\n self.obj_to_tmp_id[new_obj] = new_tmp_id\n self.tmp_id_to_obj[new_tmp_id] = new_obj\n self.all_historical_object_ids.append(new_obj.id)\n corresponding_tmp_ids.append(new_tmp_id)\n corresponding_obj_ids.append(new_obj.id)\n\n self._recompute_obj_id_to_obj_mapping()\n assert corresponding_tmp_ids == sorted(corresponding_tmp_ids)\n return corresponding_tmp_ids, corresponding_obj_ids\n\n def delete_object(self, obj_ids_to_remove: Union[int, List[int]]) -> None:\n # delete an object or a list of objects\n # re-sort the tmp ids\n if isinstance(obj_ids_to_remove, int):\n obj_ids_to_remove = [obj_ids_to_remove]\n\n new_tmp_id = 1\n total_num_id = len(self.obj_to_tmp_id)\n\n local_obj_to_tmp_id = {}\n local_tmp_to_obj_id = {}\n\n for tmp_iter in range(1, total_num_id + 1):\n obj = self.tmp_id_to_obj[tmp_iter]\n if obj.id not in obj_ids_to_remove:\n local_obj_to_tmp_id[obj] = new_tmp_id\n local_tmp_to_obj_id[new_tmp_id] = obj\n new_tmp_id += 1\n\n self.obj_to_tmp_id = local_obj_to_tmp_id\n self.tmp_id_to_obj = local_tmp_to_obj_id\n self._recompute_obj_id_to_obj_mapping()\n\n def purge_inactive_objects(self,\n max_missed_detection_count: int) -> (bool, List[int], List[int]):\n # remove tmp ids of objects that are removed\n obj_id_to_be_deleted = []\n tmp_id_to_be_deleted = []\n tmp_id_to_keep = []\n obj_id_to_keep = []\n\n for obj in self.obj_to_tmp_id:\n if obj.poke_count > max_missed_detection_count:\n obj_id_to_be_deleted.append(obj.id)\n tmp_id_to_be_deleted.append(self.obj_to_tmp_id[obj])\n else:\n tmp_id_to_keep.append(self.obj_to_tmp_id[obj])\n obj_id_to_keep.append(obj.id)\n\n purge_activated = len(obj_id_to_be_deleted) > 0\n if purge_activated:\n self.delete_object(obj_id_to_be_deleted)\n return purge_activated, tmp_id_to_keep, obj_id_to_keep\n\n def tmp_to_obj_cls(self, mask) -> torch.Tensor:\n # remap tmp id cls representation to the true object id representation\n new_mask = torch.zeros_like(mask)\n for tmp_id, obj in self.tmp_id_to_obj.items():\n new_mask[mask == tmp_id] = obj.id\n return new_mask\n\n def get_tmp_to_obj_mapping(self) -> Dict[int, ObjectInfo]:\n # returns the mapping in a dict format for saving it with pickle\n return {obj.id: tmp_id for obj, tmp_id in self.tmp_id_to_obj.items()}\n\n def realize_dict(self, obj_dict, dim=1) -> torch.Tensor:\n # turns a dict indexed by obj id into a tensor, ordered by tmp IDs\n output = []\n for _, obj in self.tmp_id_to_obj.items():\n if obj.id not in obj_dict:\n raise NotImplementedError\n output.append(obj_dict[obj.id])\n output = torch.stack(output, dim=dim)\n return output\n\n def make_one_hot(self, cls_mask) -> torch.Tensor:\n output = []\n for _, obj in self.tmp_id_to_obj.items():\n output.append(cls_mask == obj.id)\n if len(output) == 0:\n output = torch.zeros((0, *cls_mask.shape), dtype=torch.bool, device=cls_mask.device)\n else:\n output = torch.stack(output, dim=0)\n return output\n\n @property\n def all_obj_ids(self) -> List[int]:\n return [k.id for k in self.obj_to_tmp_id]\n\n @property\n def num_obj(self) -> int:\n return len(self.obj_to_tmp_id)\n\n def has_all(self, objects: List[int]) -> bool:\n for obj in objects:\n if obj not in self.obj_to_tmp_id:\n return False\n return True\n\n def find_object_by_id(self, obj_id) -> ObjectInfo:\n return self.obj_id_to_obj[obj_id]\n\n def find_tmp_by_id(self, obj_id) -> int:\n return self.obj_to_tmp_id[self.obj_id_to_obj[obj_id]]" }, { "identifier": "ImageFeatureStore", "path": "cutie/inference/image_feature_store.py", "snippet": "class ImageFeatureStore:\n \"\"\"\n A cache for image features.\n These features might be reused at different parts of the inference pipeline.\n This class provide an interface for reusing these features.\n It is the user's responsibility to delete redundant features.\n\n Feature of a frame should be associated with a unique index -- typically the frame id.\n \"\"\"\n def __init__(self, network: CUTIE, no_warning: bool = False):\n self.network = network\n self._store = {}\n self.no_warning = no_warning\n\n def _encode_feature(self, index: int, image: torch.Tensor) -> None:\n ms_features, pix_feat = self.network.encode_image(image)\n key, shrinkage, selection = self.network.transform_key(ms_features[0])\n self._store[index] = (ms_features, pix_feat, key, shrinkage, selection)\n\n def get_features(self, index: int,\n image: torch.Tensor) -> (Iterable[torch.Tensor], torch.Tensor):\n if index not in self._store:\n self._encode_feature(index, image)\n\n return self._store[index][:2]\n\n def get_key(self, index: int,\n image: torch.Tensor) -> (torch.Tensor, torch.Tensor, torch.Tensor):\n if index not in self._store:\n self._encode_feature(index, image)\n\n return self._store[index][2:]\n\n def delete(self, index: int) -> None:\n if index in self._store:\n del self._store[index]\n\n def __len__(self):\n return len(self._store)\n\n def __del__(self):\n if len(self._store) > 0 and not self.no_warning:\n warnings.warn(f'Leaking {self._store.keys()} in the image feature store')" }, { "identifier": "CUTIE", "path": "cutie/model/cutie.py", "snippet": "class CUTIE(nn.Module):\n def __init__(self, cfg: DictConfig, *, single_object=False):\n super().__init__()\n model_cfg = cfg.model\n self.ms_dims = model_cfg.pixel_encoder.ms_dims\n self.key_dim = model_cfg.key_dim\n self.value_dim = model_cfg.value_dim\n self.sensory_dim = model_cfg.sensory_dim\n self.pixel_dim = model_cfg.pixel_dim\n self.embed_dim = model_cfg.embed_dim\n self.single_object = single_object\n\n log.info(f'Single object: {self.single_object}')\n\n self.pixel_encoder = PixelEncoder(model_cfg)\n self.pix_feat_proj = nn.Conv2d(self.ms_dims[0], self.pixel_dim, kernel_size=1)\n self.key_proj = KeyProjection(model_cfg)\n self.mask_encoder = MaskEncoder(model_cfg, single_object=single_object)\n self.mask_decoder = MaskDecoder(model_cfg)\n self.pixel_fuser = PixelFeatureFuser(model_cfg, single_object=single_object)\n self.object_transformer = QueryTransformer(model_cfg)\n self.object_summarizer = ObjectSummarizer(model_cfg)\n self.aux_computer = AuxComputer(cfg)\n\n self.register_buffer(\"pixel_mean\", torch.Tensor(model_cfg.pixel_mean).view(-1, 1, 1), False)\n self.register_buffer(\"pixel_std\", torch.Tensor(model_cfg.pixel_std).view(-1, 1, 1), False)\n\n def _get_others(self, masks: torch.Tensor) -> torch.Tensor:\n # for each object, return the sum of masks of all other objects\n if self.single_object:\n return None\n\n num_objects = masks.shape[1]\n if num_objects >= 1:\n others = (masks.sum(dim=1, keepdim=True) - masks).clamp(0, 1)\n else:\n others = torch.zeros_like(masks)\n return others\n\n def encode_image(self, image: torch.Tensor) -> (Iterable[torch.Tensor], torch.Tensor):\n image = (image - self.pixel_mean) / self.pixel_std\n ms_image_feat = self.pixel_encoder(image)\n return ms_image_feat, self.pix_feat_proj(ms_image_feat[0])\n\n def encode_mask(\n self,\n image: torch.Tensor,\n ms_features: List[torch.Tensor],\n sensory: torch.Tensor,\n masks: torch.Tensor,\n *,\n deep_update: bool = True,\n chunk_size: int = -1,\n need_weights: bool = False) -> (torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor):\n image = (image - self.pixel_mean) / self.pixel_std\n others = self._get_others(masks)\n mask_value, new_sensory = self.mask_encoder(image,\n ms_features,\n sensory,\n masks,\n others,\n deep_update=deep_update,\n chunk_size=chunk_size)\n object_summaries, object_logits = self.object_summarizer(masks, mask_value, need_weights)\n return mask_value, new_sensory, object_summaries, object_logits\n\n def transform_key(self,\n final_pix_feat: torch.Tensor,\n *,\n need_sk: bool = True,\n need_ek: bool = True) -> (torch.Tensor, torch.Tensor, torch.Tensor):\n key, shrinkage, selection = self.key_proj(final_pix_feat, need_s=need_sk, need_e=need_ek)\n return key, shrinkage, selection\n\n # Used in training only.\n # This step is replaced by MemoryManager in test time\n def read_memory(self, query_key: torch.Tensor, query_selection: torch.Tensor,\n memory_key: torch.Tensor, memory_shrinkage: torch.Tensor,\n msk_value: torch.Tensor, obj_memory: torch.Tensor, pix_feat: torch.Tensor,\n sensory: torch.Tensor, last_mask: torch.Tensor,\n selector: torch.Tensor) -> (torch.Tensor, Dict[str, torch.Tensor]):\n \"\"\"\n query_key : B * CK * H * W\n query_selection : B * CK * H * W\n memory_key : B * CK * T * H * W\n memory_shrinkage: B * 1 * T * H * W\n msk_value : B * num_objects * CV * T * H * W\n obj_memory : B * num_objects * T * num_summaries * C\n pixel_feature : B * C * H * W\n \"\"\"\n batch_size, num_objects = msk_value.shape[:2]\n\n # read using visual attention\n with torch.cuda.amp.autocast(enabled=False):\n affinity = get_affinity(memory_key.float(), memory_shrinkage.float(), query_key.float(),\n query_selection.float())\n\n msk_value = msk_value.flatten(start_dim=1, end_dim=2).float()\n\n # B * (num_objects*CV) * H * W\n pixel_readout = readout(affinity, msk_value)\n pixel_readout = pixel_readout.view(batch_size, num_objects, self.value_dim,\n *pixel_readout.shape[-2:])\n pixel_readout = self.pixel_fusion(pix_feat, pixel_readout, sensory, last_mask)\n\n # read from query transformer\n mem_readout, aux_features = self.readout_query(pixel_readout, obj_memory, selector=selector)\n\n aux_output = {\n 'sensory': sensory,\n 'q_logits': aux_features['logits'] if aux_features else None,\n 'attn_mask': aux_features['attn_mask'] if aux_features else None,\n }\n\n return mem_readout, aux_output\n\n def pixel_fusion(self,\n pix_feat: torch.Tensor,\n pixel: torch.Tensor,\n sensory: torch.Tensor,\n last_mask: torch.Tensor,\n *,\n chunk_size: int = -1) -> torch.Tensor:\n last_mask = F.interpolate(last_mask, size=sensory.shape[-2:], mode='area')\n last_others = self._get_others(last_mask)\n fused = self.pixel_fuser(pix_feat,\n pixel,\n sensory,\n last_mask,\n last_others,\n chunk_size=chunk_size)\n return fused\n\n def readout_query(self,\n pixel_readout,\n obj_memory,\n *,\n selector=None,\n need_weights=False) -> (torch.Tensor, Dict[str, torch.Tensor]):\n return self.object_transformer(pixel_readout,\n obj_memory,\n selector=selector,\n need_weights=need_weights)\n\n def segment(self,\n ms_image_feat: List[torch.Tensor],\n memory_readout: torch.Tensor,\n sensory: torch.Tensor,\n *,\n selector: bool = None,\n chunk_size: int = -1,\n update_sensory: bool = True) -> (torch.Tensor, torch.Tensor, torch.Tensor):\n \"\"\"\n multi_scale_features is from the key encoder for skip-connection\n memory_readout is from working/long-term memory\n sensory is the sensory memory\n last_mask is the mask from the last frame, supplementing sensory memory\n selector is 1 if an object exists, and 0 otherwise. We use it to filter padded objects\n during training.\n \"\"\"\n sensory, logits = self.mask_decoder(ms_image_feat,\n memory_readout,\n sensory,\n chunk_size=chunk_size,\n update_sensory=update_sensory)\n\n prob = torch.sigmoid(logits)\n if selector is not None:\n prob = prob * selector\n\n # Softmax over all objects[]\n logits = aggregate(prob, dim=1)\n logits = F.interpolate(logits, scale_factor=4, mode='bilinear', align_corners=False)\n prob = F.softmax(logits, dim=1)\n\n return sensory, logits, prob\n\n def compute_aux(self, pix_feat: torch.Tensor, aux_inputs: Dict[str, torch.Tensor],\n selector: torch.Tensor) -> Dict[str, torch.Tensor]:\n return self.aux_computer(pix_feat, aux_inputs, selector)\n\n def forward(self, *args, **kwargs):\n raise NotImplementedError\n\n def load_weights(self, src_dict, init_as_zero_if_needed=False) -> None:\n if not self.single_object:\n # Map single-object weight to multi-object weight (4->5 out channels in conv1)\n for k in list(src_dict.keys()):\n if k == 'mask_encoder.conv1.weight':\n if src_dict[k].shape[1] == 4:\n log.info(f'Converting {k} from single object to multiple objects.')\n pads = torch.zeros((64, 1, 7, 7), device=src_dict[k].device)\n if not init_as_zero_if_needed:\n nn.init.orthogonal_(pads)\n log.info(f'Randomly initialized padding for {k}.')\n else:\n log.info(f'Zero-initialized padding for {k}.')\n src_dict[k] = torch.cat([src_dict[k], pads], 1)\n elif k == 'pixel_fuser.sensory_compress.weight':\n if src_dict[k].shape[1] == self.sensory_dim + 1:\n log.info(f'Converting {k} from single object to multiple objects.')\n pads = torch.zeros((self.value_dim, 1, 1, 1), device=src_dict[k].device)\n if not init_as_zero_if_needed:\n nn.init.orthogonal_(pads)\n log.info(f'Randomly initialized padding for {k}.')\n else:\n log.info(f'Zero-initialized padding for {k}.')\n src_dict[k] = torch.cat([src_dict[k], pads], 1)\n elif self.single_object:\n \"\"\"\n If the model is multiple-object and we are training in single-object, \n we strip the last channel of conv1.\n This is not supposed to happen in standard training except when users are trying to\n finetune a trained model with single object datasets.\n \"\"\"\n if src_dict['mask_encoder.conv1.weight'].shape[1] == 5:\n log.warning(f'Converting {k} from multiple objects to single object.'\n 'This is not supposed to happen in standard training.')\n src_dict[k] = src_dict[k][:, :-1]\n\n for k in src_dict:\n if k not in self.state_dict():\n log.info(f'Key {k} found in src_dict but not in self.state_dict()!!!')\n for k in self.state_dict():\n if k not in src_dict:\n log.info(f'Key {k} found in self.state_dict() but not in src_dict!!!')\n\n self.load_state_dict(src_dict, strict=False)\n\n @property\n def device(self) -> torch.device:\n return self.pixel_mean.device" }, { "identifier": "pad_divide_by", "path": "cutie/utils/tensor_utils.py", "snippet": "def pad_divide_by(in_img: torch.Tensor, d: int) -> (torch.Tensor, Iterable[int]):\n h, w = in_img.shape[-2:]\n\n if h % d > 0:\n new_h = h + d - h % d\n else:\n new_h = h\n if w % d > 0:\n new_w = w + d - w % d\n else:\n new_w = w\n lh, uh = int((new_h - h) / 2), int(new_h - h) - int((new_h - h) / 2)\n lw, uw = int((new_w - w) / 2), int(new_w - w) - int((new_w - w) / 2)\n pad_array = (int(lw), int(uw), int(lh), int(uh))\n out = F.pad(in_img, pad_array)\n return out, pad_array" }, { "identifier": "unpad", "path": "cutie/utils/tensor_utils.py", "snippet": "def unpad(img: torch.Tensor, pad: Iterable[int]) -> torch.Tensor:\n if len(img.shape) == 4:\n if pad[2] + pad[3] > 0:\n img = img[:, :, pad[2]:-pad[3], :]\n if pad[0] + pad[1] > 0:\n img = img[:, :, :, pad[0]:-pad[1]]\n elif len(img.shape) == 3:\n if pad[2] + pad[3] > 0:\n img = img[:, pad[2]:-pad[3], :]\n if pad[0] + pad[1] > 0:\n img = img[:, :, pad[0]:-pad[1]]\n elif len(img.shape) == 5:\n if pad[2] + pad[3] > 0:\n img = img[:, :, :, pad[2]:-pad[3], :]\n if pad[0] + pad[1] > 0:\n img = img[:, :, :, :, pad[0]:-pad[1]]\n else:\n raise NotImplementedError\n return img" }, { "identifier": "aggregate", "path": "cutie/utils/tensor_utils.py", "snippet": "def aggregate(prob: torch.Tensor, dim: int) -> torch.Tensor:\n with torch.cuda.amp.autocast(enabled=False):\n prob = prob.float()\n new_prob = torch.cat([torch.prod(1 - prob, dim=dim, keepdim=True), prob],\n dim).clamp(1e-7, 1 - 1e-7)\n logits = torch.log((new_prob / (1 - new_prob)))\n\n return logits" } ]
from typing import List, Optional, Iterable, Dict from omegaconf import DictConfig from cutie.inference.memory_manager import MemoryManager from cutie.inference.object_manager import ObjectManager from cutie.inference.image_feature_store import ImageFeatureStore from cutie.model.cutie import CUTIE from cutie.utils.tensor_utils import pad_divide_by, unpad, aggregate import logging import numpy as np import torch import torch.nn.functional as F
9,330
log = logging.getLogger() class InferenceCore: def __init__(self, network: CUTIE, cfg: DictConfig, *, image_feature_store: ImageFeatureStore = None): self.network = network self.cfg = cfg self.mem_every = cfg.mem_every stagger_updates = cfg.stagger_updates self.chunk_size = cfg.chunk_size self.save_aux = cfg.save_aux self.max_internal_size = cfg.max_internal_size self.flip_aug = cfg.flip_aug self.curr_ti = -1 self.last_mem_ti = 0 # at which time indices should we update the sensory memory if stagger_updates >= self.mem_every: self.stagger_ti = set(range(1, self.mem_every + 1)) else: self.stagger_ti = set( np.round(np.linspace(1, self.mem_every, stagger_updates)).astype(int)) self.object_manager = ObjectManager()
log = logging.getLogger() class InferenceCore: def __init__(self, network: CUTIE, cfg: DictConfig, *, image_feature_store: ImageFeatureStore = None): self.network = network self.cfg = cfg self.mem_every = cfg.mem_every stagger_updates = cfg.stagger_updates self.chunk_size = cfg.chunk_size self.save_aux = cfg.save_aux self.max_internal_size = cfg.max_internal_size self.flip_aug = cfg.flip_aug self.curr_ti = -1 self.last_mem_ti = 0 # at which time indices should we update the sensory memory if stagger_updates >= self.mem_every: self.stagger_ti = set(range(1, self.mem_every + 1)) else: self.stagger_ti = set( np.round(np.linspace(1, self.mem_every, stagger_updates)).astype(int)) self.object_manager = ObjectManager()
self.memory = MemoryManager(cfg=cfg, object_manager=self.object_manager)
0
2023-10-19 17:49:24+00:00
12k
MolecularAI/REINVENT4
tests/chemistry/library_design/test_fragment_reactions_slice_enumerator.py
[ { "identifier": "Conversions", "path": "reinvent/chemistry/conversions.py", "snippet": "class Conversions:\n @staticmethod\n def smiles_to_mols_and_indices(query_smiles: List[str]) -> Tuple[List[Mol], List[int]]:\n mols = [MolFromSmiles(smile) for smile in query_smiles]\n valid_mask = [mol is not None for mol in mols]\n valid_idxs = [idx for idx, is_valid in enumerate(valid_mask) if is_valid]\n valid_mols = [mols[idx] for idx in valid_idxs]\n return valid_mols, valid_idxs\n\n @staticmethod\n def mols_to_fingerprints(\n molecules: List[Mol], radius: int = 3, use_counts: bool = True, use_features: bool = True\n ) -> List[UIntSparseIntVect]:\n fingerprints = [\n AllChem.GetMorganFingerprint(\n mol, radius, useCounts=use_counts, useFeatures=use_features\n )\n for mol in molecules\n ]\n return fingerprints\n\n @staticmethod\n def smiles_to_mols(query_smiles: List[str]) -> List[Mol]:\n mols = [MolFromSmiles(smile) for smile in query_smiles]\n valid_mask = [mol is not None for mol in mols]\n valid_idxs = [idx for idx, is_valid in enumerate(valid_mask) if is_valid]\n valid_mols = [mols[idx] for idx in valid_idxs]\n return valid_mols\n\n def smiles_to_fingerprints(\n self, query_smiles: List[str], radius=3, use_counts=True, use_features=True\n ) -> List[UIntSparseIntVect]:\n mols = self.smiles_to_mols(query_smiles)\n fingerprints = self.mols_to_fingerprints(\n mols, radius=radius, use_counts=use_counts, use_features=use_features\n )\n return fingerprints\n\n def smile_to_mol(self, smile: str) -> Mol:\n \"\"\"\n Creates a Mol object from a SMILES string.\n :param smile: SMILES string.\n :return: A Mol object or None if it's not valid.\n \"\"\"\n if smile:\n return MolFromSmiles(smile)\n\n def mols_to_smiles(\n self, molecules: List[Mol], isomericSmiles=False, canonical=True\n ) -> List[str]:\n \"\"\"This method assumes that all molecules are valid.\"\"\"\n valid_smiles = [\n MolToSmiles(mol, isomericSmiles=isomericSmiles, canonical=canonical)\n for mol in molecules\n ]\n return valid_smiles\n\n def mol_to_smiles(self, molecule: Mol, isomericSmiles=False, canonical=True) -> str:\n \"\"\"\n Converts a Mol object into a canonical SMILES string.\n :param molecule: Mol object.\n :return: A SMILES string.\n \"\"\"\n if molecule:\n return MolToSmiles(molecule, isomericSmiles=isomericSmiles, canonical=canonical)\n\n def mol_to_random_smiles(self, molecule: Mol) -> str:\n \"\"\"\n Converts a Mol object into a random SMILES string.\n :return: A SMILES string.\n \"\"\"\n if molecule:\n new_atom_order = list(range(molecule.GetNumAtoms()))\n random.shuffle(new_atom_order)\n random_mol = RenumberAtoms(molecule, newOrder=new_atom_order)\n return MolToSmiles(random_mol, canonical=False, isomericSmiles=False)\n\n def convert_to_rdkit_smiles(\n self, smiles: str, allowTautomers=True, sanitize=False, isomericSmiles=False\n ) -> str:\n \"\"\"\n :param smiles: Converts a smiles string into a canonical SMILES string.\n :type allowTautomers: allows having same molecule represented in different tautomeric forms\n \"\"\"\n if allowTautomers:\n return MolToSmiles(\n MolFromSmiles(smiles, sanitize=sanitize), isomericSmiles=isomericSmiles\n )\n else:\n return MolStandardize.canonicalize_tautomer_smiles(smiles)\n\n def convert_to_standardized_smiles(self, smiles: str) -> Optional[str]:\n \"\"\"Standardize SMILES for Mol2Mol\n\n This should only be used to validate and transform user input\n because the code will abort execution on any error it finds.\n\n param smiles: single SMILES string\n return: single SMILES string\n \"\"\"\n\n mol = MolFromSmiles(smiles, sanitize=True)\n\n if not mol: # RDKit fails silently\n raise RuntimeError(f\"RDKit does not accept SMILES: {smiles}\")\n\n standardizer = Standardizer() # MolVS\n\n try:\n smol = standardizer(mol) # runs SanitizeMol() first\n smol = standardizer.charge_parent(smol) # largest fragment uncharged\n smi = MolToSmiles(smol, isomericSmiles=True)\n except Exception as error: # RDKit may raise multiple exceptions\n raise RuntimeError(f\"RDKit does not accept SMILES: {smiles} {error}\")\n\n # Sometimes when standardizing ChEMBL [H] are not removed so try a\n # second call\n if \"[H]\" in smi:\n return self.convert_to_standardized_smiles(smi)\n else:\n return smi\n\n def copy_mol(self, molecule: Mol) -> Mol:\n \"\"\"\n Copies, sanitizes, canonicalizes and cleans a molecule.\n :param molecule: A Mol object to copy.\n :return : Another Mol object copied, sanitized, canonicalized and cleaned.\n \"\"\"\n return self.smile_to_mol(self.mol_to_smiles(molecule))\n\n def randomize_smiles(self, smiles: str) -> str:\n \"\"\"\n Returns a random SMILES given a SMILES of a molecule.\n :param smiles: A smiles string\n :returns: A random SMILES string of the same molecule or None if the molecule is invalid.\n \"\"\"\n mol = MolFromSmiles(smiles)\n if mol:\n new_atom_order = list(range(mol.GetNumHeavyAtoms()))\n random.shuffle(new_atom_order)\n random_mol = RenumberAtoms(mol, newOrder=new_atom_order)\n return MolToSmiles(random_mol, canonical=False, isomericSmiles=False)\n\n def mol_to_inchi_key(self, molecule: Mol) -> str:\n \"\"\"Returns the standard InChI key for a molecule\"\"\"\n if molecule:\n inchi_key = MolToInchiKey(molecule)\n return inchi_key\n\n def mol_to_sdf(self, molecules: List, input_sdf_path: str):\n \"\"\"Write a set of molecules to sdf file\"\"\"\n writer = SDWriter(input_sdf_path)\n for mol in molecules:\n writer.write(mol)" }, { "identifier": "AttachmentPoints", "path": "reinvent/chemistry/library_design/attachment_points.py", "snippet": "class AttachmentPoints:\n def __init__(self):\n self._conversions = Conversions()\n self._tokens = TransformationTokens()\n\n def add_attachment_point_numbers(self, mol_or_smi, canonicalize=True):\n \"\"\"\n Adds the numbers for the attachment points throughout the molecule.\n :param mol_or_smi: SMILES string to convert.\n :param canonicalize: Canonicalize the SMILES so that the attachment points are always in the same order.\n :return : A converted SMILES string.\n \"\"\"\n if isinstance(mol_or_smi, str):\n smi = mol_or_smi\n if canonicalize:\n smi = self._conversions.mol_to_smiles(self._conversions.smile_to_mol(mol_or_smi))\n # only add numbers ordered by the SMILES ordering\n num = -1\n\n def _ap_callback(_):\n nonlocal num\n num += 1\n return \"[{}:{}]\".format(self._tokens.ATTACHMENT_POINT_TOKEN, num)\n\n return re.sub(self._tokens.ATTACHMENT_POINT_REGEXP, _ap_callback, smi)\n else:\n mol = mol_or_smi\n if canonicalize:\n mol = self._conversions.smile_to_mol(self._conversions.mol_to_smiles(mol))\n idx = 0\n for atom in mol.GetAtoms():\n if atom.GetSymbol() == self._tokens.ATTACHMENT_POINT_TOKEN:\n atom.SetProp(\"molAtomMapNumber\", str(idx))\n idx += 1\n return self._conversions.mol_to_smiles(mol)\n\n def get_attachment_points(self, smile: str) -> List:\n \"\"\"\n Gets all attachment points from SMILES string.\n :param smile: A SMILES string\n :return : A list with the numbers ordered by appearance.\n \"\"\"\n return [\n int(match.group(1))\n for match in re.finditer(self._tokens.ATTACHMENT_POINT_NUM_REGEXP, smile)\n ]\n\n def get_attachment_points_for_molecule(self, molecule: Mol) -> List:\n \"\"\"\n Gets all attachment points from RDKit Mol.\n :param molecule: A Mol object.\n :return : A list with the numbers ordered by appearance.\n \"\"\"\n if isinstance(molecule, Mol):\n return [\n int(atom.GetProp(\"molAtomMapNumber\"))\n for atom in molecule.GetAtoms()\n if atom.GetSymbol() == self._tokens.ATTACHMENT_POINT_TOKEN\n and atom.HasProp(\"molAtomMapNumber\")\n ]\n\n def add_first_attachment_point_number(self, smi, num):\n \"\"\"\n Changes/adds a number to the first attachment point.\n :param smi: SMILES string with the molecule.\n :param num: Number to add.\n :return: A SMILES string with the number added.\n \"\"\"\n return re.sub(\n self._tokens.ATTACHMENT_POINT_REGEXP,\n \"[{}:{}]\".format(self._tokens.ATTACHMENT_POINT_TOKEN, num),\n smi,\n count=1,\n )\n\n def remove_attachment_point_numbers(self, smile: str) -> str:\n \"\"\"\n Removes the numbers for the attachment points throughout the molecule.\n :param smile: SMILES string.\n :return : A converted SMILES string.\n \"\"\"\n result = re.sub(\n self._tokens.ATTACHMENT_POINT_NUM_REGEXP,\n \"[{}]\".format(self._tokens.ATTACHMENT_POINT_TOKEN),\n smile,\n )\n return result\n\n def remove_attachment_point_numbers_from_mol(self, molecule: Mol) -> Mol:\n \"\"\"\n Removes the numbers for the attachment points throughout the molecule.\n :param molecule: RDKit molecule.\n :return : A molecule.\n \"\"\"\n if isinstance(molecule, Mol):\n for atom in molecule.GetAtoms():\n atom.ClearProp(\"molAtomMapNumber\")\n return molecule\n\n def add_brackets_to_attachment_points(self, scaffold: str):\n \"\"\"\n Adds brackets to the attachment points (if they don't have them).\n :param scaffold: SMILES string.\n :return: A SMILES string with attachments in brackets.\n \"\"\"\n return re.sub(\n self._tokens.ATTACHMENT_POINT_NO_BRACKETS_REGEXP,\n \"[{}]\".format(self._tokens.ATTACHMENT_POINT_TOKEN),\n scaffold,\n )" }, { "identifier": "BondMaker", "path": "reinvent/chemistry/library_design/bond_maker.py", "snippet": "class BondMaker:\n def __init__(self):\n self._conversions = Conversions()\n self._tokens = TransformationTokens()\n self._attachment_points = AttachmentPoints()\n\n def join_scaffolds_and_decorations(\n self, scaffold_smi: str, decorations_smi, keep_labels_on_atoms=False\n ) -> Optional[Mol]:\n decorations_smi = [\n self._attachment_points.add_first_attachment_point_number(dec, i)\n for i, dec in enumerate(decorations_smi.split(self._tokens.ATTACHMENT_SEPARATOR_TOKEN))\n ]\n num_attachment_points = len(self._attachment_points.get_attachment_points(scaffold_smi))\n if len(decorations_smi) != num_attachment_points:\n return None\n\n mol = self._conversions.smile_to_mol(scaffold_smi)\n for decoration in decorations_smi:\n mol = self.join_molecule_fragments(\n mol,\n self._conversions.smile_to_mol(decoration),\n keep_label_on_atoms=keep_labels_on_atoms,\n )\n if not mol:\n return None\n return mol\n\n def join_molecule_fragments(self, scaffold: Mol, decoration: Mol, keep_label_on_atoms=False):\n \"\"\"\n Joins a RDKit MOL scaffold with a decoration. They must be labelled.\n :param scaffold: RDKit MOL of the scaffold.\n :param decoration: RDKit MOL of the decoration.\n :param keep_label_on_atoms: Add the labels to the atoms after attaching the molecule.\n This is useful when debugging, but it can give problems.\n :return: A Mol object of the joined scaffold.\n \"\"\"\n\n if scaffold and decoration:\n # obtain id in the decoration\n try:\n attachment_points = [\n atom.GetProp(\"molAtomMapNumber\")\n for atom in decoration.GetAtoms()\n if atom.GetSymbol() == self._tokens.ATTACHMENT_POINT_TOKEN\n ]\n if len(attachment_points) != 1:\n return None # more than one attachment point...\n attachment_point = attachment_points[0]\n except KeyError:\n return None\n\n combined_scaffold = RWMol(CombineMols(decoration, scaffold))\n attachments = [\n atom\n for atom in combined_scaffold.GetAtoms()\n if atom.GetSymbol() == self._tokens.ATTACHMENT_POINT_TOKEN\n and atom.HasProp(\"molAtomMapNumber\")\n and atom.GetProp(\"molAtomMapNumber\") == attachment_point\n ]\n if len(attachments) != 2:\n return None # something weird\n\n neighbors = []\n for atom in attachments:\n if atom.GetDegree() != 1:\n return None # the attachment is wrongly generated\n neighbors.append(atom.GetNeighbors()[0])\n\n bonds = [atom.GetBonds()[0] for atom in attachments]\n bond_type = BondType.SINGLE\n if any(bond for bond in bonds if bond.GetBondType() == BondType.DOUBLE):\n bond_type = BondType.DOUBLE\n\n combined_scaffold.AddBond(neighbors[0].GetIdx(), neighbors[1].GetIdx(), bond_type)\n combined_scaffold.RemoveAtom(attachments[0].GetIdx())\n combined_scaffold.RemoveAtom(attachments[1].GetIdx())\n\n if keep_label_on_atoms:\n for neigh in neighbors:\n self._add_attachment_point_num(neigh, attachment_point)\n\n # Label the atoms in the bond\n bondNumbers = [\n int(atom.GetProp(\"bondNum\"))\n for atom in combined_scaffold.GetAtoms()\n if atom.HasProp(\"bondNum\")\n ]\n\n if bondNumbers:\n bondNum = max(bondNumbers) + 1\n else:\n bondNum = 0\n\n for neighbor in neighbors:\n idx = neighbor.GetIdx()\n atom = combined_scaffold.GetAtomWithIdx(idx)\n atom.SetIntProp(\"bondNum\", bondNum)\n ##########################################\n\n scaffold = combined_scaffold.GetMol()\n try:\n SanitizeMol(scaffold)\n except ValueError: # sanitization error\n return None\n else:\n return None\n\n return scaffold\n\n def _add_attachment_point_num(self, atom, idx):\n idxs = []\n if atom.HasProp(\"molAtomMapNumber\"):\n idxs = atom.GetProp(\"molAtomMapNumber\").split(\",\")\n idxs.append(str(idx))\n idxs = sorted(list(set(idxs)))\n atom.SetProp(\"molAtomMapNumber\", \",\".join(idxs))\n # Fixme: This way of annotating fails in case of several attachment points when the mol is converted back to a\n # SMILES string (RuntimeError: boost::bad_any_cast: failed conversion using boost::any_cast)\n # For example combining scaffold '*C(*)CC' and warhead pair '*OC|*C' would result in\n # C[O:0][CH:0,1]([CH3:1])CC, which results in an error due to the '0,1'\n\n def randomize_scaffold(self, scaffold: Mol):\n smi = self._conversions.mol_to_random_smiles(scaffold)\n conv_smi = None\n if smi:\n conv_smi = self._attachment_points.add_brackets_to_attachment_points(smi)\n return conv_smi" }, { "identifier": "FragmentReactionSliceEnumerator", "path": "reinvent/chemistry/library_design/fragment_reaction_slice_enumerator.py", "snippet": "class FragmentReactionSliceEnumerator:\n def __init__(\n self,\n chemical_reactions: List[ReactionDTO],\n scaffold_conditions: List[FilteringConditionDTO],\n decoration_conditions: List[FilteringConditionDTO],\n ):\n \"\"\"\n Class to enumerate slicings given certain conditions.\n :param chemical_reactions: A list of ChemicalReaction objects.\n :param scaffold_conditions: Conditions to use when filtering scaffolds obtained from slicing molecules (see FragmentFilter).\n :param decoration_conditions: Conditions to use when filtering decorations obtained from slicing molecules.\n \"\"\"\n self._tockens = TransformationTokens()\n self._chemical_reactions = chemical_reactions\n self._scaffold_filter = FragmentFilter(scaffold_conditions)\n self._decoration_filter = FragmentFilter(decoration_conditions)\n self._reactions = FragmentReactions()\n self._conversions = Conversions()\n\n def enumerate(self, molecule: Mol, cuts: int) -> List[FragmentedMolecule]:\n \"\"\"\n Enumerates all possible combination of slicings of a molecule given a number of cuts.\n :param molecule: A mol object with the molecule to slice.\n :param cuts: The number of cuts to perform.\n :return : A list with all the possible (scaffold, decorations) pairs as SlicedMol objects.\n \"\"\"\n original_smiles = self._conversions.mol_to_smiles(molecule)\n sliced_mols = set()\n for cut in range(1, cuts + 1):\n if cut == 1:\n fragment_pairs = self._reactions.slice_molecule_to_fragments(\n molecule, self._chemical_reactions\n )\n\n for pair in fragment_pairs:\n for indx, _ in enumerate(pair):\n decorations = self._select_all_except(pair, indx)\n decoration = self._conversions.copy_mol(decorations[0])\n labeled_decoration = OrderedDict()\n labeled_decoration[0] = decoration # [ for decoration in decorations]\n\n scaffold = self._conversions.copy_mol(pair[indx])\n labeled_scaffold = self._label_scaffold(scaffold)\n\n # TODO: filtering should take place after scaffold is generated\n sliced_mol = FragmentedMolecule(\n labeled_scaffold, labeled_decoration, original_smiles\n )\n if sliced_mol.original_smiles == sliced_mol.reassembled_smiles:\n sliced_mols.add(sliced_mol)\n else:\n for slice in sliced_mols:\n to_add = self._scaffold_slicing(slice, cut)\n sliced_mols = sliced_mols.union(to_add)\n\n return list(filter(self._filter, sliced_mols))\n\n def _scaffold_slicing(self, slice: FragmentedMolecule, cut: int) -> Set[FragmentedMolecule]:\n to_add = set()\n if slice.decorations_count() == cut - 1:\n fragment_pairs = self._reactions.slice_molecule_to_fragments(\n slice.scaffold, self._chemical_reactions\n )\n\n for pair in fragment_pairs:\n scaffold, decoration = self._split_scaffold_from_decorations(pair, cut)\n if scaffold:\n labeled_scaffold = self._label_scaffold(scaffold)\n labeled_scaffold = self._conversions.copy_mol(labeled_scaffold)\n decoration = self._conversions.copy_mol(decoration)\n sliced_mol = self._create_sliced_molecule(slice, labeled_scaffold, decoration)\n\n if sliced_mol.original_smiles == sliced_mol.reassembled_smiles:\n to_add.add(sliced_mol)\n return to_add\n\n def _select_all_except(self, fragments: Tuple[Mol], to_exclude: int) -> List[Mol]:\n return [fragment for indx, fragment in enumerate(fragments) if indx != to_exclude]\n\n def _filter(self, sliced_mol: FragmentedMolecule) -> bool:\n return self._scaffold_filter.filter(sliced_mol.scaffold) and all(\n self._decoration_filter.filter(dec) for dec in sliced_mol.decorations.values()\n )\n\n def _split_scaffold_from_decorations(self, pair: Tuple[Mol], cuts: int) -> Tuple[Mol, Mol]:\n decoration = None\n scaffold = None\n for frag in pair:\n num_att = len(\n [\n atom\n for atom in frag.GetAtoms()\n if atom.GetSymbol() == self._tockens.ATTACHMENT_POINT_TOKEN\n ]\n )\n # detect whether there is one fragment with as many attachment points as cuts (scaffold)\n # the rest are decorations\n if num_att == cuts and not scaffold:\n scaffold = frag\n if num_att == 1:\n decoration = frag\n if decoration and scaffold:\n return scaffold, decoration\n else:\n return (None, None)\n\n def _label_scaffold(self, scaffold: Mol) -> Mol:\n highest_number = self._find_highest_number(scaffold)\n\n for atom in scaffold.GetAtoms():\n if atom.GetSymbol() == self._tockens.ATTACHMENT_POINT_TOKEN:\n try:\n atom_number = int(atom.GetProp(\"molAtomMapNumber\"))\n except:\n highest_number += 1\n num = atom.GetIsotope()\n atom.SetIsotope(0)\n atom.SetProp(\"molAtomMapNumber\", str(highest_number))\n scaffold.UpdatePropertyCache()\n\n return scaffold\n\n def _find_highest_number(self, cut_mol: Mol) -> int:\n highest_number = -1\n\n for atom in cut_mol.GetAtoms():\n if atom.GetSymbol() == self._tockens.ATTACHMENT_POINT_TOKEN:\n try:\n atom_number = int(atom.GetProp(\"molAtomMapNumber\"))\n if highest_number < atom_number:\n highest_number = atom_number\n except:\n pass\n return highest_number\n\n def _create_sliced_molecule(\n self, original_sliced_mol: FragmentedMolecule, scaffold: Mol, decoration: Mol\n ) -> FragmentedMolecule:\n old_decorations = OrderedDict()\n for k, v in original_sliced_mol.decorations.items():\n old_decorations[k] = v\n old_decorations[original_sliced_mol.decorations_count()] = decoration\n sliced_mol = FragmentedMolecule(\n scaffold, old_decorations, original_sliced_mol.original_smiles\n )\n return sliced_mol" }, { "identifier": "FilteringConditionDTO", "path": "reinvent/chemistry/library_design/dtos/filtering_condition_dto.py", "snippet": "class FilteringConditionDTO:\n name: str\n min: float = None\n max: float = None\n equals: float = None" }, { "identifier": "MolecularDescriptorsEnum", "path": "reinvent/chemistry/library_design/enums/molecular_descriptors_enum.py", "snippet": "class MolecularDescriptorsEnum:\n HEAVY_ATOM_COUNT = \"heavy_atom_count\"\n MOLECULAR_WEIGHT = \"molecular_weight\"\n CLOGP = \"clogp\"\n HYDROGEN_BOND_DONORS = \"hydrogen_bond_donors\"\n HYDROGEN_BOND_ACCEPTORS = \"hydrogen_bond_acceptors\"\n ROTATABLE_BONDS = \"rotatable_bonds\"\n RING_COUNT = \"ring_count\"" }, { "identifier": "FragmentReactions", "path": "reinvent/chemistry/library_design/fragment_reactions.py", "snippet": "class FragmentReactions:\n def __init__(self):\n self._conversions = Conversions()\n self._tokens = TransformationTokens()\n self._bond_mapper = BondMapper()\n\n def create_reactions_from_smarts(self, smarts: List[str]) -> List[ChemicalReaction]:\n reactions = [AllChem.ReactionFromSmarts(smirks) for smirks in smarts]\n return reactions\n\n def create_reaction_from_smirk(self, smirks: str) -> ReactionDTO:\n reaction = ReactionDTO(smirks, AllChem.ReactionFromSmarts(smirks))\n return reaction\n\n def create_reactions_from_smirks(self, smirks: List[str]) -> List[ReactionDTO]:\n reactions = [self.create_reaction_from_smirk(smirk) for smirk in smirks]\n return reactions\n\n def slice_molecule_to_fragments(\n self, molecule: Mol, reaction_dtos: List[ReactionDTO]\n ) -> List[Tuple[Mol]]:\n \"\"\"\n This method applies a list of chemical reactions on a molecule and\n decomposes the input molecule to complementary fragments.\n :param molecule:\n :param reaction_dtos:\n :return: Different slicing combinations are returned.\n \"\"\"\n list_of_outcomes = self.apply_reactions_on_molecule(molecule, reaction_dtos)\n all_outcomes = []\n\n for outcome in list_of_outcomes:\n all_outcomes.extend(outcome.reaction_outcomes)\n # TODO: the overall data processing is extremely slow. consider reducing redundancy here.\n return all_outcomes\n\n def apply_reactions_on_molecule(\n self, molecule: Mol, reaction_dtos: List[ReactionDTO]\n ) -> List[ReactionOutcomeDTO]:\n \"\"\"Build list of possible splits of a molecule given multiple reactions.\"\"\"\n list_of_outcomes = []\n for reaction_dto in reaction_dtos:\n outcome_dto = self.apply_reaction_on_molecule(molecule, reaction_dto)\n purged_outcome_dto = self._filter_pairs_with_no_ring_count_change(outcome_dto)\n list_of_outcomes.append(purged_outcome_dto)\n return list_of_outcomes\n\n def apply_reaction_on_molecule(\n self, molecule: Mol, reaction_dto: ReactionDTO\n ) -> ReactionOutcomeDTO:\n \"\"\"Build list of possible splits of a molecule given a single reaction.\"\"\"\n molecule = self._conversions.copy_mol(molecule)\n outcomes = reaction_dto.chemical_reaction.RunReactant(molecule, 0)\n outcome_dto = ReactionOutcomeDTO(reaction_dto.reaction_smarts, list(outcomes), molecule)\n return outcome_dto\n\n def _filter_pairs_with_no_ring_count_change(\n self, outcome_dto: ReactionOutcomeDTO\n ) -> ReactionOutcomeDTO:\n molecule_rings = RingCount(outcome_dto.targeted_molecule)\n acceptable_pairs = []\n for pair in outcome_dto.reaction_outcomes:\n if not self._detect_ring_break(molecule_rings, pair) and len(pair) == 2:\n acceptable_pairs.append(pair)\n outcome_dto.reaction_outcomes = acceptable_pairs\n return outcome_dto\n\n def _detect_ring_break(self, molecule_ring_count: int, pair: Tuple[Mol]) -> bool:\n reagent_rings = 0\n for reagent in pair:\n reagent_smiles = self._conversions.mol_to_smiles(reagent)\n reagent_mol = self._conversions.smile_to_mol(reagent_smiles)\n try:\n reagent_rings = reagent_rings + RingCount(reagent_mol)\n except:\n return True\n return molecule_ring_count != reagent_rings" }, { "identifier": "FRAGMENT_REACTION_SUZUKI", "path": "tests/chemistry/library_design/fixtures.py", "snippet": "FRAGMENT_REACTION_SUZUKI = [\"[*;$(c2aaaaa2),$(c2aaaa2):1]-!@[*;$(c2aaaaa2),$(c2aaaa2):2]>>[*:1][*].[*:2][*]\"]" }, { "identifier": "FRAGMENT_REACTIONS", "path": "tests/chemistry/library_design/fixtures.py", "snippet": "FRAGMENT_REACTIONS = [\n'[#6;$(C[C;$(C([#6]))]):4]-!@[N;$([NH1;D2](C)C);!$(N-[#6]=[*]);$(N([C])):3]>>[#6:4][*].[N:3][*]',\n'[C;$([CH;$(C([#6])[#6])]),$([CH2;$(C[#6])]):1]-!@[N;$(N(C=O)C=O):2]>>[*:1][*].[*:2][*]',\n'[C;$([CH;$(C([#6])[#6])]),$([CH2;$(C[#6])]):1]-!@[O;$(Oc1ccccc1):2]>>[*:1][*].[*:2][*]',\n'[C;$([CH;$(C([#6])[#6])]),$([CH2;$(C[#6])]):1]-!@[N;$(N([#6])S(=O)=O):2]>>[*:1][*].[*:2][*]',\n'[S;$(S(=O)(=O)[C,N]):1]-!@[N+0;$(NC):2]>>[*:1][*].[*:2][*]',\n'[N;$(N-[#6]):3]-!@[C;$(C=O):1]-!@[N+0;$(N[#6]);!$(N=*);!$([N-]);!$(N#*);!$([ND1]);!$(N[O,N]):2]>>[*:1][*].[*:2][*]',\n'[#6;!$([#6]=*);!$([#6]~[O,N,S]);$([#6]~[#6]):1][c:2]>>[*:2][*].[*:1][*]',\n'[#6;$(C=[#6!H0]):1][C;$(C#N):2]>>[*:1][*].[*][*:2]',\n'[#6:1]([N+]([O-])=O)=[#6:2]>>[*:1][*][N+]([O-])=O.[*:2][*]',\n'[#6;!$(A(A=[O,S]));!$(A=*);!$([A-]);!$(A~[P,S,O,N]):3][C:1](=[#7:2])[N!H0;!$(A(A=[O,S]));!$(A=*);!$([A-]);!$(A~[P,S,O,N]):4]>>[#6:3][C:1]([*])=[N:2].[#7!H0:4][*]',\n'[#6;!$(C(C=*)(C=*));!$([#6]~[O,N,S]);$([#6]~[#6]):1][C:2](=[O:3])[N;D2;$(N(C=[O,S]));!$(N~[O,P,S,N]):4][#6;!$(C=*);!$([#6](~[O,N,S])N);$([#6]~[#6]):5]>>[#6:1][C:2](=[O:3])[*].[*][N:4][#6:5]',\n'[#6;!R;!$(C=*);!$([#6]~[O,N,S]);$([#6]~[#6]):1][#6;!R;!$(C=*);!$([#6]~[O,N,S]);$([#6]~[#6]):2]>>[#6:1][*].[#6:2][*]',\n'[N;!H0:1]([C:2]([#7:5][#6:6])=[#8:3])[#6:4]>>[#8:3]=[C:2]([#7:1][#6:4])[*].[*][#7:5][#6:6]',\n'[#6;!$(C(C=*)(C=*));!$([#6]~[O,N,S]);$([#6]~[#6]):1][C:2](=[O:3])[N;D2;$(N(C=[O,S]));!$(N~[O,P,S,N]):4][#6;!$(C=*);!$([#6](~[O,N,S])N);$([#6]~[#6]):5]>>[#6:1][C:2](=[O:3])[*].[*][N:4][#6:5]',\n'[#6;!$([#6]=*);!$([#6]~[O,N,S,P]);$([#6]~[#6]):2]-!@[#6;!$([#6]=*);!$([#6]~[O,N,S,P]);$([#6]~[#6]):1]>>[#6;$([#6]~[#6]);!$([#6]~[S,N,O,P]):1][*].[*][#6;$([#6]~[#6]);!$([#6]~[S,N,O,P]):2]',\n'[CH2;$([#6]~[#6]):4]-!@[O:3]-!@[#6;$([#6]~[#6]);!$([#6]=O):2]>>[#6;$([#6]~[#6]);!$([#6]=O):2][#8][*].[*][#6;H2;$([#6]~[#6]):4]',\n'[*;$(c2aaaaa2),$(c2aaaa2):1]-!@[*;$(c2aaaaa2),$(c2aaaa2):2]>>[*:1][*].[*:2][*]',\n'[*;$(c2aaaaa2),$(c2aaaa2):4]/[#6:1]=!@[#6:2]/[*;$(c2aaaaa2),$(c2aaaa2):3]>>[#6;c,$(C(=O)O),$(C#N):3][#6;H1:2]=[#6;H1:1][*].[#6;$([#6]=[#6]),$(c:c):4][*]',\n'[#6:4][#6;H0:1]=!@[#6:2]([#6:5])[#6:3]>>[#6;c,$(C(=O)O),$(C#N):3][#6:2]([#6:5])=[#6;$([#6][#6]):1][*].[#6;$([#6]=[#6]),$(c:c):4][*]',\n'[*;$(c);$(C=C-[#6]),$(c):1]-!@[*;$(c):2]>>[#6;$(C=C-[#6]),$(c):1][*].[*][*;$(c):2]',\n'[C;$(C([#6])[#6]):1]([#6:5])([#6:2])([O;H1:3])[#6;!R:4]>>[#6:2][#6:1](*)([#6:5])[O:3].[*][#6:4]',\n'[#6;$(C=C-[#6]),$(c:c):1]-!@[C;$(C#CC):2]>>[#6;$(C=C-[#6]),$(c:c):1][*].[*][CH1;$(C#CC):2]',\n'[c;$(c1:[c,n]:[c,n]:[c,n]:[c,n]:[c,n]:1):1]-!@[N;$(NC)&!$(N=*)&!$([N-])&!$(N#*)&!$([ND1])&!$(N[O])&!$(N[C,S]=[S,O,N]),H2&$(Nc1:[c,n]:[c,n]:[c,n]:[c,n]:[c,n]:1):2]>>[*][c;$(c1:[c,n]:[c,n]:[c,n]:[c,n]:[c,n]:1):1].[*][N:2]',\n'[*;!$(c1ccccc1);$(c1[n,c]c[n,c]c[n,c]1):1]-!@[N;$(NC);!$(N=*);!$([N-]);!$(N#*);!$([ND3]);!$([ND4]);!$(n[c,O]);!$(N[C,S]=[S,O,N]):2]>>[*;!$(c1ccccc1);$(c1[n,c]c[n,c]c[n,c]1):1][*].[*][N:2]',\n'[*;$(c1c(N(~O)~O)cccc1):1]-!@[N;$(NC);!$(N=*);!$([N-]);!$(N#*);!$([ND1]);!$(N[O]);!$(N[C,S]=[S,O,N]):2]>>[*;$(c1c(N(~O)~O)cccc1):1][*].[*][N:2]',\n'[*;$(c1ccc(N(~O)~O)cc1):1]-!@[N;$(NC);!$(N=*);!$([N-]);!$(N#*);!$([ND1]);!$(N[O]);!$(N[C,S]=[S,O,N]):2]>>[*;$(c1ccc(N(~O)~O)cc1):1][*].[*][N:2]',\n'[#6;!$([#6]=*);!$([#6]~[O,N,S]);$([#6]~[#6]):1][#6;!$([#6]=*);!$([#6]~[O,N,S]);$([#6]~[#6]):2]>>[#6;!$([#6]=*);!$([#6]~[O,N,S]);$([#6]~[#6]):1][*].[#6;!$([#6]=*);!$([#6]~[O,N,S]);$([#6]~[#6]):2][*]',\n'[C:2]([#7;!D4:1])(=[O:3])[#6:4]>>[#7:1][*].[C,$(C=O):2](=[O:3])([*])[#6:4]',\n'[#6;$(C(=O)):1][#7,#8,#16:2]>>[*:1][*].[*:2][*]',\n'[O:2]=[#6:1][#7:5]>>[O:2]=[#6:1][*].[N:5][*]',\n'[#6;$(C=[O]):1][#8,#16:2]>>[*:1][*].[*][*:2]',\n'[N;!$(n1****1);!$(n1*****1);!$(N=*);!$(N(A=A));!$([N-]);!$(N~[O,P,S,N]):1]-!@[#6;!$(C=*);!$(C(A=A));!$([C-]);!$(C~[O,P,S]):2]>>[N:1][*].[*][#6:2]',\n'[#6:8][O:7][C:5](=[O:6])[C:4]([C:2](=[O:3])[#6:1])[#6:9]>>[#6:1][C:2]([C:4]([*])[C:5]([O:7][#6:8])=[O:6])=[O:3].[#6:9][*]',\n'[#6:1][C:2]([#6:7])[C:3](=[O:4])[O:5][#6:6]>>[C;!H0:2]([*])([C:3]([O:5][#6:6])=[O:4])[#6:1].[#6:7][*]',\n'[N;!$(n1****1);!$(n1*****1);!$(N(A=A));!$(N=*);!$([N-]);!$(N~[O,P,S,N]):1][*;$(c1aaaaa1),$(c1aaaa1);!$(C=*);!$(C(A=A));!$([C-]);!$(C~[O,P,S]):2]>>[N:1][*].[#6:2][*]',\n'[C:3]([C:1]([#8:5][#6:6])=[O:2])[#6:7]=[O:8]>>[#6:6][#8:5][C:1](=[O:2])[C!H0:3][*].[#6:7](=[O:8])[*]',\n'[N+:1]([#6:2])([#6:4])([#6:5])[#6:3]>>[N;!$(N=*);!$([N-]);!$(N~[O,P,S,N]):1]([#6:2])([#6:3])([*])[#6:4].[*][#6:5]',\n# '[c:1][C,N,S,O:2]>>[c:1][*].[*:2]'\n]" }, { "identifier": "CELECOXIB", "path": "tests/chemistry/fixtures/test_data.py", "snippet": "CELECOXIB = 'O=S(=O)(c3ccc(n1nc(cc1c2ccc(cc2)C)C(F)(F)F)cc3)N'" } ]
import unittest from reinvent.chemistry import Conversions from reinvent.chemistry.library_design import ( FragmentReactionSliceEnumerator, BondMaker, AttachmentPoints, ) from reinvent.chemistry.library_design.dtos import FilteringConditionDTO from reinvent.chemistry.library_design.enums import MolecularDescriptorsEnum from reinvent.chemistry.library_design.fragment_reactions import FragmentReactions from tests.chemistry.library_design.fixtures import FRAGMENT_REACTION_SUZUKI, FRAGMENT_REACTIONS from tests.chemistry.fixtures.test_data import CELECOXIB
9,975
class TestSingleFragmentReactionsSliceEnumerator(unittest.TestCase): def setUp(self): self.chemistry = Conversions() self.reactions = FragmentReactions() self._bond_maker = BondMaker() self._attachment_points = AttachmentPoints() self._suzuki_reaction_dto_list = self.reactions.create_reactions_from_smirks( FRAGMENT_REACTION_SUZUKI ) self.suzuki_positive_smile = CELECOXIB self.suzuki_positive_molecule = self.chemistry.smile_to_mol(self.suzuki_positive_smile) scaffold_conditions = [] decoration_conditions = []
class TestSingleFragmentReactionsSliceEnumerator(unittest.TestCase): def setUp(self): self.chemistry = Conversions() self.reactions = FragmentReactions() self._bond_maker = BondMaker() self._attachment_points = AttachmentPoints() self._suzuki_reaction_dto_list = self.reactions.create_reactions_from_smirks( FRAGMENT_REACTION_SUZUKI ) self.suzuki_positive_smile = CELECOXIB self.suzuki_positive_molecule = self.chemistry.smile_to_mol(self.suzuki_positive_smile) scaffold_conditions = [] decoration_conditions = []
self._slice_enumerator = FragmentReactionSliceEnumerator(
3
2023-10-20 06:43:16+00:00
12k
jhejna/cpl
research/algs/off_policy_algorithm.py
[ { "identifier": "ReplayBuffer", "path": "research/datasets/replay_buffer/buffer.py", "snippet": "class ReplayBuffer(torch.utils.data.IterableDataset):\n \"\"\"\n Generic Replay Buffer Class.\n\n This class adheres to the following conventions to support multiprocessing:\n 1. Variables/functions starting with \"_\", like \"_help\" are to be used only by the replay buffer internaly. They\n are carefully setup for multiprocesing.\n 2. variables/functions named regularly without a leading \"_\" are to be used by the main thread. This includes\n standard functions like \"add\".\n\n There are a few critical setup options.\n 1. Capacity: determines if the buffer is setup upon creation. If it is set to a known value, then we can add data\n online with `add`, or by pulling more data from disk. If is set to None, the dataset is initialized to the full\n size of the offline dataset.\n 2. path: path to offline data that will be loaded\n 3. _data_generator\n\n Some options are mutually exclusive. For example, it is bad to use a non-distributed layout with\n workers and online data. This will generate a bunch of copy on writes.\n\n Data is expected to be stored in a \"next\" format. This means that data is stored like this:\n s_0, dummy, dummy, dummy\n s_1, a_0 , r_0 , d_0\n s_2, a_1 , r_1 , d_1\n s_3, a_2 , r_2 , d_2 ... End of episode!\n s_0, dummy, dummy, dummy\n s_1, a_0 , r_0 , d_0\n s_2, a_1 , r_1 , d_1\n\n This format is expected from the load(path) funciton.\n\n \"\"\"\n\n def __init__(\n self,\n observation_space: gym.Space,\n action_space: gym.Space,\n sample_fn: Union[str, Callable] = \"sample\",\n sample_kwargs: Optional[Dict] = None,\n epoch_ratio: float = 1.0,\n path: Optional[str] = None,\n capacity: Optional[int] = None,\n exclude_keys: Optional[List[str]] = None,\n include_keys: Optional[Dict] = None,\n stacked_obs: bool = False,\n stacked_action: bool = False,\n distributed: bool = False,\n fetch_every: int = 1000,\n cleanup: bool = True,\n ) -> None:\n # Remove stacking if present.\n self.stacked_obs = stacked_obs\n if self.stacked_obs:\n observation_space = remove_stack_dim(observation_space)\n self.stacked_action = stacked_action\n if self.stacked_action:\n action_space = remove_stack_dim(action_space)\n\n self.observation_space = observation_space\n self.action_space = action_space\n\n # Construct the space for the buffer\n self.exclude_keys = [] if exclude_keys is None else exclude_keys # keys to exclude in the storage buffer\n buffer_space = {\n \"obs\": self.observation_space,\n \"action\": self.action_space,\n \"reward\": 0.0,\n \"done\": False,\n \"discount\": 1.0,\n }\n flattened_buffer_space = utils.flatten_dict(buffer_space)\n if include_keys is not None:\n flattened_buffer_space.update(include_keys)\n print(\"FLATTENED BUFFER SPACE\", flattened_buffer_space)\n for k in self.exclude_keys:\n if k in flattened_buffer_space:\n del flattened_buffer_space[k]\n self.buffer_space = utils.nest_dict(flattened_buffer_space)\n\n self.dummy_action = self.action_space.sample()\n self.capacity = capacity\n\n # Setup the sampler\n if isinstance(sample_fn, str):\n sample_fn = vars(sampling)[sample_fn]\n # Use functools partial to override the default args.\n sample_kwargs = {} if sample_kwargs is None else sample_kwargs\n self.sample_fn = functools.partial(sample_fn, **sample_kwargs)\n # Add sampling parameters\n self.epoch_ratio = epoch_ratio\n\n # Path for preloaded data\n self.path = path\n\n # Setup based on distributed value\n self.distributed = distributed\n if self.distributed:\n self.cleanup = cleanup\n self.fetch_every = fetch_every\n if self.capacity is not None:\n self.storage_path = tempfile.mkdtemp(prefix=\"replay_buffer_\")\n print(\"[research] Replay Buffer Storage Path\", self.storage_path)\n self.current_ep = utils.nest_dict({k: list() for k in flattened_buffer_space.keys()})\n self.num_episodes = 0\n else:\n self._alloc(self.capacity) # Alloc immediately\n\n def _alloc(self, capacity):\n # Create the data generator\n self._current_data_generator = self._data_generator()\n\n if capacity is None:\n # Allocte the entire dataset\n data = utils.concatenate(*list(self._current_data_generator), dim=0)\n self._storage = storage.FixedStorage(data)\n else:\n # Construct the buffer space. Remember to exclude any exclude keys\n self._storage = storage.CircularStorage(self.buffer_space, capacity)\n # Fill the storage.\n # if self.path is not None:\n for data in self._current_data_generator:\n self._storage.extend(data)\n if self._storage.size >= self._storage.capacity:\n break\n\n print(\"[ReplayBuffer] Allocated {:.2f} GB\".format(self._storage.bytes / 1024**3))\n\n def _data_generator(self):\n \"\"\"\n Can be overridden in order to load the initial data differently.\n By default assumes the data to be the standard format, and returned as a data dictionary.\n or\n None\n\n This function can be overriden by sub-classes in order to produce data batches.\n It should do the following:\n 1. split data across torch data workers\n 2. randomize the order of data\n 3. yield data of the form dicts\n \"\"\"\n if self.path is None:\n return\n\n # By default get all of the file names that are distributed at the correct index\n worker_info = torch.utils.data.get_worker_info()\n num_workers = 1 if worker_info is None else worker_info.num_workers\n worker_id = 0 if worker_info is None else worker_info.id\n\n ep_filenames = [os.path.join(self.path, f) for f in os.listdir(self.path) if f.endswith(\".npz\")]\n random.shuffle(ep_filenames) # Shuffle all the filenames\n\n if num_workers > 1 and len(ep_filenames) == 1:\n print(\n \"[ReplayBuffer] Warning: using multiple workers but single replay file. Reduce memory usage by sharding\"\n \" data with `save` instead of `save_flat`.\"\n )\n elif num_workers > 1 and len(ep_filenames) < num_workers:\n print(\"[ReplayBuffer] Warning: using more workers than dataset files.\")\n\n for ep_filename in ep_filenames:\n ep_idx, _ = [int(x) for x in os.path.splitext(ep_filename)[0].split(\"_\")[-2:]]\n # Spread loaded data across workers if we have multiple workers and files.\n if ep_idx % num_workers != worker_id and len(ep_filenames) > 1:\n continue # Only yield the files belonging to this worker.\n data = storage.load_data(ep_filename, exclude_keys=self.exclude_keys)\n yield data\n\n def _fetch_offline(self) -> int:\n \"\"\"\n This simple function fetches a new episode from the offline dataset and adds it to the buffer.\n This is done for each worker.\n \"\"\"\n try:\n data = next(self._current_data_generator)\n except StopIteration:\n self._current_data_generator = self._data_generator()\n data = next(self._current_data_generator)\n self._storage.extend(data)\n # Return the fetched size\n return len(data[\"done\"]) # data must have the done key for storage\n\n def _fetch_online(self) -> int:\n worker_info = torch.utils.data.get_worker_info()\n assert worker_info is not None, \"Must use distributed buffer for online fetching.\"\n\n ep_filenames = sorted([os.path.join(self.storage_path, f) for f in os.listdir(self.storage_path)], reverse=True)\n fetched_size = 0\n for ep_filename in ep_filenames:\n ep_idx, ep_len = [int(x) for x in os.path.splitext(ep_filename)[0].split(\"_\")[-2:]]\n if ep_idx % worker_info.num_workers != worker_info.id:\n continue\n if ep_filename in self._episode_filenames:\n break # We found something we have already loaded\n if fetched_size + ep_len > self._storage.capacity:\n break # do not fetch more than the size of the replay buffer\n\n data = storage.load_data(ep_filename, exclude_keys=self.exclude_keys)\n self._storage.extend(data)\n self._episode_filenames.add(ep_filename)\n if self.cleanup:\n try:\n os.remove(ep_filename)\n except OSError:\n pass\n\n return fetched_size\n\n def _get_dummy_transition(self, obs):\n flattened_buffer_space = utils.flatten_dict(self.buffer_space)\n dummy_transition = {\n k: v.sample() if isinstance(v, gym.Space) else v\n for k, v in flattened_buffer_space.items()\n if not k.startswith(\"obs\") and not k.startswith(\"action\")\n }\n dummy_transition = utils.nest_dict(dummy_transition)\n dummy_transition[\"obs\"] = obs\n dummy_transition[\"action\"] = self.dummy_action\n return dummy_transition\n\n def _reset_current_ep(self):\n ep_idx = self.num_episodes\n ep_len = len(self.current_ep[\"done\"])\n self.num_episodes += 1\n ts = datetime.datetime.now().strftime(\"%Y%m%dT%H%M%S\")\n ep_filename = f\"{ts}_{ep_idx}_{ep_len}.npz\"\n storage.save_data(self.current_ep, os.path.join(self.storage_path, ep_filename))\n\n flattened_buffer_space = utils.flatten_dict(self.buffer_space)\n ep = {k: list() for k in flattened_buffer_space.keys()}\n self.current_ep = utils.nest_dict(ep)\n\n def add(self, **kwargs):\n assert self.capacity is not None, \"Tried to extend to a static size buffer.\"\n # Preprocess here before adding to storage\n if len(kwargs) == 1:\n assert \"obs\" in kwargs\n kwargs = self._get_dummy_transition(kwargs[\"obs\"])\n if self.stacked_obs:\n kwargs[\"obs\"] = utils.get_from_batch(kwargs[\"obs\"], -1)\n else:\n # We have a full transitions\n if self.stacked_obs:\n kwargs[\"obs\"] = utils.get_from_batch(kwargs[\"obs\"], -1)\n if self.stacked_action:\n kwargs[\"action\"] = utils.get_from_batch(kwargs[\"action\"], -1)\n\n assert \"done\" in kwargs, \"Need done key for ReplayBuffer\"\n\n # This function is overwritten for distributed / local buffers\n if self.distributed:\n # Add to the current thread, and dump to disk\n utils.append(self.current_ep, kwargs)\n if kwargs[\"done\"]:\n self._reset_current_ep()\n else:\n # Add directly\n self._learning_online = True\n self._storage.add(kwargs)\n\n def extend(self, **kwargs):\n assert \"done\" in kwargs, \"Need done key for ReplayBuffer\"\n assert self.capacity is not None, \"Tried to extend to a static size buffer.\"\n # TODO: There is a chance that if we add a full sequence we will end up with (B, T, stack, ...)\n # which is not what we want. We could compare the shapes of the observation space to fix it\n # but this code might be unnecesary, as this class shouldn't really be used like that anyways.\n if self.distributed:\n # Add to the current thread, and dump to disk\n utils.extend(self.current_ep, kwargs)\n if kwargs[\"done\"][-1]:\n self._reset_current_ep()\n else:\n # Add directly\n self._learning_online = True\n self._storage.extend(kwargs)\n\n def save(self, path):\n os.makedirs(path, exist_ok=True)\n if self.distributed:\n if self.cleanup:\n print(\"[research] Warning, attempting to save a cleaned up replay buffer. There are likely no files\")\n srcs = os.listdir(self.storage_path)\n for src in srcs:\n shutil.move(os.path.join(self.storage_path, src), os.path.join(path, src))\n print(\"Successfully saved\", len(srcs), \"episodes.\")\n else:\n ep_len = self._storage.size\n ep_idx = 0\n ts = datetime.datetime.now().strftime(\"%Y%m%dT%H%M%S\")\n ep_filename = f\"{ts}_{ep_idx}_{ep_len}.npz\"\n save_path = os.path.join(path, ep_filename)\n self._storage.save(save_path)\n\n def sample(self, *args, **kwargs):\n return self.sample_fn(self._storage, *args, **kwargs)\n\n def __iter__(self):\n assert not hasattr(self, \"_iterated\"), \"__iter__ called twice!\"\n self._iterated = True\n worker_info = torch.utils.data.get_worker_info()\n assert (worker_info is not None) == self.distributed, \"ReplayBuffer.distributed not set correctly!\"\n\n # allocate the buffer with the given capacity\n if self.distributed:\n self._alloc(None if self.capacity is None else self.capacity // worker_info.num_workers)\n self._episode_filenames = set()\n\n self._learning_online = False\n\n samples_since_last_offline_fetch = 0\n samples_since_last_online_fetch = 0\n last_offline_fetch_size = 0\n\n batch_size = self.sample_fn.keywords.get(\"batch_size\", 1)\n stack_size = self.sample_fn.keywords.get(\"stack\", 1)\n seq_size = self.sample_fn.keywords.get(\"seq_length\", 1)\n\n while True:\n if self._storage.size < seq_size * stack_size + 1:\n yield {} # If the buffer is too small for sampling, continue.\n else:\n sample = self.sample_fn(self._storage)\n if batch_size == 1:\n sample = utils.squeeze(sample, 0)\n yield sample\n\n # Fetch new data if we have a circular buffer.\n if isinstance(self._storage, storage.CircularStorage):\n if self.distributed: # Always check for online data\n # We fetch from the online buffer\n samples_since_last_online_fetch += 1\n if samples_since_last_online_fetch >= self.fetch_every:\n fetch_size = self._fetch_online()\n self._learning_online = self._learning_online or (fetch_size > 0)\n samples_since_last_online_fetch = 0\n\n if not self._learning_online and self.path is not None:\n # We fetch from the offline buffer\n samples_since_last_offline_fetch += 1\n data_pts_since_last_offline_fetch = (\n samples_since_last_offline_fetch * batch_size * seq_size * stack_size\n )\n if data_pts_since_last_offline_fetch >= last_offline_fetch_size * self.epoch_ratio:\n last_offline_fetch_size = self._fetch_offline()\n samples_since_last_offline_fetch = 0\n\n def __del__(self):\n if not self.distributed:\n return\n if self.cleanup:\n return\n else:\n paths = [os.path.join(self.storage_path, f) for f in os.listdir(self.storage_path)]\n for path in paths:\n try:\n os.remove(path)\n except OSError:\n pass\n try:\n os.rmdir(self.storage_path)\n except OSError:\n pass" }, { "identifier": "storage", "path": "research/datasets/replay_buffer/storage.py", "snippet": "def load_data(path: str, exclude_keys: Optional[List[str]]) -> Dict:\ndef save_data(data: Dict, path: str) -> None:\ndef get_bytes(buffer: Union[Dict, np.ndarray]) -> int:\n def capacity(self):\n def size(self):\n def starts(self):\n def ends(self):\n def lengths(self):\n def bytes(self):\n def save(self, path):\n def __getitem__(self, key):\n def __getattr__(self, name):\n def __contains__(self, key):\n def add(self, data):\n def extend(self, data):\n def __init__(self, buffers: Dict) -> None:\n def add(self, data):\n def extend(self, data):\n def __init__(self, initial_capacity: int = 100, dtype=np.int64):\n def _reset(self):\n def append(self, value):\n def pop(self):\n def popleft(self):\n def view(self):\n def __len__(self):\n def first(self):\n def last(self):\n def __str__(self):\n def __init__(self, buffer_space: Union[Dict, gym.spaces.Dict], capacity: Optional[int] = None) -> None:\n def _update_markers(self, new_ends: Iterable = ()):\n def add(self, data):\n def extend(self, data):\nclass Storage(abc.ABC):\nclass FixedStorage(Storage):\nclass NPQueue(object):\nclass CircularStorage(Storage):" }, { "identifier": "EmptyEnv", "path": "research/envs/base.py", "snippet": "class EmptyEnv(gym.Env):\n\n \"\"\"\n An empty holder for defining supervised learning problems\n It works by specifying the ranges and shapes.\n \"\"\"\n\n def __init__(\n self,\n observation_low=None,\n observation_high=None,\n observation_shape=None,\n observation_dtype=np.float32,\n observation_space=None,\n action_low=None,\n action_high=None,\n action_shape=None,\n action_dtype=np.float32,\n action_space=None,\n ):\n if observation_space is not None:\n self.observation_space = observation_space\n else:\n self.observation_space = _get_space(observation_low, observation_high, observation_shape, observation_dtype)\n if action_space is not None:\n self.action_space = action_space\n else:\n self.action_space = _get_space(action_low, action_high, action_shape, action_dtype)\n\n def step(self, action):\n raise NotImplementedError(\"Empty Env does not have step\")\n\n def reset(self, **kwargs):\n raise NotImplementedError(\"Empty Env does not have reset\")" }, { "identifier": "ModuleContainer", "path": "research/networks/base.py", "snippet": "class ModuleContainer(torch.nn.Module):\n CONTAINERS = []\n\n def __init__(self, observation_space: gym.Space, action_space: gym.Space, **kwargs) -> None:\n super().__init__()\n # save the classes and containers\n base_kwargs = {k: v for k, v in kwargs.items() if not k.endswith(\"_class\") and not k.endswith(\"_kwargs\")}\n\n output_space = observation_space\n for container in self.CONTAINERS:\n module_class = kwargs.get(container + \"_class\", torch.nn.Identity)\n module_class = vars(research.networks)[module_class] if isinstance(module_class, str) else module_class\n if module_class is torch.nn.Identity:\n module_kwargs = dict()\n else:\n module_kwargs = base_kwargs.copy()\n module_kwargs.update(kwargs.get(container + \"_kwargs\", dict()))\n # Create the module, and attach it to self\n module = module_class(output_space, action_space, **module_kwargs)\n setattr(self, container, module)\n\n # Set a reset function\n setattr(self, \"reset_\" + container, partial(self._reset, container))\n\n if hasattr(getattr(self, container), \"output_space\"):\n # update the output space\n output_space = getattr(self, container).output_space\n\n # Done creating all sub-modules.\n\n @classmethod\n def create_subset(cls, containers):\n assert all([container in cls.CONTAINERS for container in containers])\n name = \"\".join([container.capitalize() for container in containers]) + \"Subset\"\n return type(name, (ModuleContainer,), {\"CONTAINERS\": containers})\n\n def _reset(self, container: str) -> None:\n module = getattr(self, container)\n with torch.no_grad():\n module.apply(reset)\n\n def compile(self, **kwargs):\n for container in self.CONTAINERS:\n attr = getattr(self, container)\n if type(attr).forward == torch.nn.Module.forward:\n assert hasattr(attr, \"compile\"), (\n \"container \" + container + \" is nn.Module without forward() but didn't define `compile`.\"\n )\n attr.compile(**kwargs)\n else:\n setattr(self, container, torch.compile(attr, **kwargs))\n\n def forward(self, x):\n # Use all of the modules in order\n for container in self.CONTAINERS:\n x = getattr(self, container)(x)\n return x" }, { "identifier": "runners", "path": "research/utils/runners.py", "snippet": "class CloudpickleWrapper:\nclass AsyncState(Enum):\nclass AsyncEnv(gym.Env):\nclass MPRunner(object):\n def __init__(self, fn: Callable):\n def __getstate__(self):\n def __setstate__(self, ob):\n def __call__(self):\ndef alloc_shared_buffer(space: Any):\ndef read_shared_buffer(shared_buffer: Any, space: gym.Space):\ndef write_shared_buffer(shared_buffer: Any, space: gym.Space, value: Any):\n def __init__(\n self, env_fn: Callable, observation_space: Optional[gym.Space] = None, action_space: Optional[gym.Space] = None\n ):\n def step_send(self, action):\n def step_recv(self):\n def step(self, action):\n def reset_send(self):\n def reset_recv(self):\n def reset(self):\n def close(self):\ndef _async_env_worker(env_fn, pipe, parent_pipe, obs_buffer, action_buffer):\n def __init__(\n self,\n env_fn,\n fn: Optional[Callable] = None,\n observation_space: Optional[gym.Space] = None,\n action_space: Optional[gym.Space] = None,\n **kwargs,\n ):\n def start(self, fn: Optional[Callable] = None, **kwargs):\n def started(self):\n def __call__(self, block=False):\n def step(self, *args, **kwargs):\n def reset(self, *args, **kwargs):\n def close(self):\n DEFAULT = \"default\"\n WAITING_RESET = \"reset\"\n WAITING_STEP = \"step\"" }, { "identifier": "utils", "path": "research/utils/utils.py", "snippet": "def to_device(batch: Any, device: torch.device) -> Any:\ndef to_tensor(batch: Any) -> Any:\ndef to_np(batch: Any) -> Any:\ndef remove_float64(batch: Any):\ndef unsqueeze(batch: Any, dim: int) -> Any:\ndef squeeze(batch: Any, dim: int) -> Any:\ndef get_from_batch(batch: Any, start: Union[int, np.ndarray, torch.Tensor], end: Optional[int] = None) -> Any:\ndef set_in_batch(batch: Any, value: Any, start: int, end: Optional[int] = None) -> None:\ndef batch_copy(batch: Any) -> Any:\ndef space_copy(space: gym.Space):\ndef contains_tensors(batch: Any) -> bool:\ndef get_device(batch: Any) -> Optional[torch.device]:\ndef concatenate(*args, dim: int = 0):\ndef append(lst, item):\ndef extend(lst1, lst2):\n def __init__(self, name: str = \"\"):\n def forward(self, x: Any) -> Any:\ndef np_dataset_alloc(\n space: gym.Space, capacity: int, begin_pad: Tuple[int] = tuple(), end_pad: Tuple[int] = tuple()\n) -> np.ndarray:\ndef np_bytes_per_instance(space: gym.Space) -> int:\ndef _flatten_dict_helper(flat_dict: Dict, value: Any, prefix: str, separator: str = \".\") -> None:\ndef flatten_dict(d: Dict, separator: str = \".\") -> Dict:\ndef nest_dict(d: Dict, separator: str = \".\") -> Dict:\ndef fetch_from_dict(d: Dict, keys: Union[str, List, Tuple], separator=\".\") -> List[Any]:\ndef create_optim_groups(params, kwargs):\nclass PrintNode(torch.nn.Module):" }, { "identifier": "Algorithm", "path": "research/algs/base.py", "snippet": "class Algorithm(ABC):\n _save_keys: Set[str]\n _compiled: bool\n\n def __init__(\n self,\n observation_space: gym.Space,\n action_space: gym.Space,\n network_class: Type[torch.nn.Module],\n dataset_class: Union[Type[torch.utils.data.IterableDataset], Type[torch.utils.data.Dataset]],\n network_kwargs: Optional[Dict] = None,\n dataset_kwargs: Optional[Dict] = None,\n validation_dataset_class: Optional[\n Union[Type[torch.utils.data.IterableDataset], Type[torch.utils.data.Dataset]]\n ] = None,\n validation_dataset_kwargs: Optional[Dict] = None,\n optim_class: Type[torch.optim.Optimizer] = torch.optim.Adam,\n optim_kwargs: Optional[Dict] = None,\n schedulers_class: Optional[Dict] = None,\n schedulers_kwargs: Optional[Dict[str, Dict]] = None,\n processor_class: Optional[Type[Processor]] = None,\n processor_kwargs: Optional[Dict] = None,\n checkpoint: Optional[str] = None,\n device: Union[str, torch.device] = \"auto\",\n ):\n # Initialize the _save_keys attribute using the superclass.\n # These are used for automatically identifying keys for saving/loading.\n super().__setattr__(\"_save_keys\", set())\n super().__setattr__(\"_module_keys\", set())\n super().__setattr__(\"_compiled\", False)\n\n # Save relevant values\n self.observation_space = observation_space\n self.action_space = action_space\n self.optim = {}\n\n # setup devices\n if device == \"auto\":\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n self._device = torch.device(device)\n\n # Setup the data preprocessor first. Thus, if we need to reference it in network setup we can.\n # Everything here is saved in self.processor\n self.setup_processor(processor_class, {} if processor_kwargs is None else processor_kwargs)\n\n # Create the network.\n network_kwargs = {} if network_kwargs is None else network_kwargs\n self.setup_network(network_class, network_kwargs)\n\n # Save values for optimizers, which will be lazily initialized later\n self.optim = {}\n self.optim_class = optim_class\n self.optim_kwargs = {\"lr\": 0.0001} if optim_kwargs is None else optim_kwargs\n\n # Save values for schedulers, which will be lazily initialized later\n self.schedulers = {}\n self.schedulers_class = {} if schedulers_class is None else schedulers_class\n self.schedulers_kwargs = {} if schedulers_kwargs is None else schedulers_kwargs\n\n # Save values for datasets, which will be lazily initialized later\n self.dataset_class = dataset_class\n self.dataset_kwargs = {} if dataset_kwargs is None else dataset_kwargs\n self.validation_dataset_class = validation_dataset_class\n self.validation_dataset_kwargs = validation_dataset_kwargs\n\n self._training = False\n\n # Load a check point if we have one -- using non-strict enforcement.\n # NOTE: this only loads the network and will _not_ load the optimizer checkpoint.\n if checkpoint is not None:\n self.load(checkpoint, strict=False)\n\n @property\n def device(self):\n return self._device\n\n @property\n def training(self) -> bool:\n return self._training\n\n def __setattr__(self, name: str, value: Any) -> None:\n # Check to see if the value is a module etc.\n if (hasattr(self, \"_save_keys\") and name in self._save_keys) or (\n hasattr(self, \"_module_keys\") and name in self._module_keys\n ):\n pass\n elif isinstance(value, torch.nn.Parameter):\n self._save_keys.add(name)\n elif isinstance(value, torch.nn.Module):\n self._module_keys.add(name)\n if sum(p.numel() for p in value.parameters()) > 0:\n self._save_keys.add(name) # store if we have a module with more than zero parameters.\n return super().__setattr__(name, value)\n\n @property\n def save_keys(self) -> List[str]:\n return self._save_keys\n\n @property\n def module_keys(self) -> List[str]:\n return self._module_keys\n\n @property\n def compiled(self) -> bool:\n return self._compiled\n\n def to(self, device) -> \"Algorithm\":\n for k in self.save_keys:\n if k == \"processor\" and not self.processor.supports_gpu:\n continue\n else:\n setattr(self, k, getattr(self, k).to(device))\n return self\n\n def compile(self, **kwargs):\n for k in self.save_keys:\n attr = getattr(self, k)\n if isinstance(attr, torch.nn.Module):\n if type(attr).forward == torch.nn.Module.forward:\n # In this case, the forward method hasn't been overriden.\n # Thus we assume there is a compile argument.\n assert hasattr(attr, \"compile\"), (\n \"save key \" + k + \" is nn.Module without forward() but didn't define `compile`.\"\n )\n attr.compile(**kwargs)\n else:\n setattr(self, k, torch.compile(attr, **kwargs))\n # indicate that we have compiled the models.\n self._compiled = True\n\n def train(self) -> None:\n for k in self._module_keys:\n getattr(self, k).train()\n self._training = True\n\n def eval(self) -> None:\n for k in self._module_keys:\n getattr(self, k).eval()\n self._training = False\n\n @property\n def num_params(self):\n _num_params = 0\n for k in self.save_keys:\n attr = getattr(self, k)\n if hasattr(attr, \"parameters\"):\n _num_params += sum(p.numel() for p in attr.parameters() if p.requires_grad)\n else:\n assert isinstance(attr, torch.nn.Parameter), \"Can only save Modules or Parameters.\"\n if attr.requires_grad:\n _num_params += attr.numel()\n return _num_params\n\n @property\n def nbytes(self):\n # Returns the size of all the parameters in bytes\n _bytes = 0\n for k in self.save_keys:\n attr = getattr(self, k)\n if hasattr(attr, \"parameters\"):\n for p in attr.parameters():\n _bytes += p.nelement() * p.element_size()\n if hasattr(attr, \"buffers\"):\n for b in attr.buffers():\n _bytes += b.nelement() * b.element_size()\n return _bytes\n\n def setup_processor(self, processor_class: Optional[Type[Processor]], processor_kwargs: Dict) -> None:\n if processor_class is None:\n processor = Identity(self.observation_space, self.action_space)\n else:\n processor = processor_class(self.observation_space, self.action_space, **processor_kwargs)\n\n if processor.supports_gpu: # move it to device if it supports GPU computation.\n self.processor = processor.to(self.device)\n else:\n self.processor = processor\n\n def setup_network(self, network_class: Type[torch.nn.Module], network_kwargs: Dict) -> None:\n self.network = network_class(\n self.processor.observation_space, self.processor.action_space, **network_kwargs\n ).to(self.device)\n\n def setup_optimizers(self) -> None:\n \"\"\"\n This is only called by the Trainer, and not called when we load the model.\n This is done so that inference jobs don't load the optimizer state.\n \"\"\"\n # Setup Optimizers\n assert len(self.optim) == 0, \"setup_optimizers called twice!\"\n for k in self.save_keys:\n attr = getattr(self, k)\n if hasattr(attr, \"parameters\"):\n parameters = attr.parameters()\n else:\n assert isinstance(attr, torch.nn.Parameter), \"Can only save Modules or Parameters.\"\n parameters = [attr]\n # Constrcut the optimizer\n self.optim[k] = self.optim_class(parameters, **self.optim_kwargs)\n\n def setup_schedulers(self):\n assert len(self.schedulers) == 0, \"setup_schedulers called twice!\"\n for k in self.schedulers_class.keys():\n if self.schedulers_class[k] is not None:\n assert k in self.optim, \"Did not find schedule key in optimizers dict.\"\n self.schedulers[k] = self.schedulers_class[k](self.optim[k], **self.schedulers_kwargs.get(k, dict()))\n\n def setup_datasets(self, env: gym.Env, total_steps: int):\n \"\"\"\n Called after everything else has been setup, right before training starts\n This is _only_ called by the trainer and is not called by default.\n This function is responsible for creating the following attributes:\n self.dataset (required)\n self.validation_dataset\n \"\"\"\n assert not hasattr(self, \"dataset\"), \"setup_datasets called twice!\"\n assert not hasattr(self, \"validation_dataset\"), \"setup_datasets called twice!\"\n # Setup the train dataset\n self.dataset = self.dataset_class(self.observation_space, self.action_space, **self.dataset_kwargs)\n # Setup the validation dataset\n if self.validation_dataset_class is not None:\n self.validation_dataset = self.validation_dataset_class(\n self.observation_space, self.action_space, **self.validation_dataset_kwargs\n )\n elif self.validation_dataset_kwargs is not None:\n validation_dataset_kwargs = copy.deepcopy(self.dataset_kwargs)\n validation_dataset_kwargs.update(self.validation_dataset_kwargs)\n self.validation_dataset = self.dataset_class(\n self.observation_space, self.action_space, **validation_dataset_kwargs\n )\n else:\n self.validation_dataset = None\n\n def save(self, path: str, extension: str, metadata: Optional[Dict] = None) -> None:\n \"\"\"\n Saves a checkpoint of the model and the optimizers\n \"\"\"\n save_dict = {}\n if len(self.optim) > 0:\n save_dict[\"optim\"] = {k: v.state_dict() for k, v in self.optim.items()}\n if len(self.schedulers) > 0:\n save_dict[\"schedulers\"] = {k: v.state_dict() for k, v in self.schedulers.items()}\n for k in self._save_keys:\n attr = getattr(self, k)\n if hasattr(attr, \"state_dict\"):\n save_dict[k] = attr.state_dict()\n else:\n assert isinstance(attr, torch.nn.Parameter), \"Can only save Modules or Parameters.\"\n save_dict[k] = attr\n\n # Add the metadata\n save_dict[\"metadata\"] = {} if metadata is None else metadata\n save_path = os.path.join(path, extension)\n if not save_path.endswith(\".pt\"):\n save_path += \".pt\"\n torch.save(save_dict, save_path)\n\n def load(self, checkpoint: str, strict: bool = True) -> Dict:\n \"\"\"\n Loads the model and its associated checkpoints.\n If we haven't created the optimizers and schedulers, do not load those.\n \"\"\"\n print(\"[research] loading checkpoint:\", checkpoint)\n checkpoint = torch.load(checkpoint, map_location=self.device)\n remaining_checkpoint_keys = set(checkpoint.keys())\n\n # First load everything except for the optim\n for k in self.save_keys: # Loop through keys in the Algorithm.\n if k not in checkpoint:\n if strict:\n raise ValueError(\"Checkpoint did not have key \" + str(k))\n else:\n print(\"[research] Warning: Checkpoint did not have key\", k)\n continue\n\n if isinstance(getattr(self, k), torch.nn.Parameter):\n # directly set the data, this is for nn.Parameters\n getattr(self, k).data = checkpoint[k].data\n else:\n # Otherwise, load via state dict\n getattr(self, k).load_state_dict(checkpoint[k], strict=strict)\n remaining_checkpoint_keys.remove(k)\n\n # Now load the optimizer and its associated keys\n for k in self.optim.keys():\n if strict and k not in checkpoint[\"optim\"]:\n raise ValueError(\"Strict mode was enabled, but couldn't find optimizer key\")\n elif k not in checkpoint[\"optim\"]:\n print(\"[research] Warning: Checkpoint did not have optimizer key\", k)\n continue\n self.optim[k].load_state_dict(checkpoint[\"optim\"][k])\n if \"optim\" in checkpoint:\n remaining_checkpoint_keys.remove(\"optim\")\n\n # Now load the schedulers\n for k in self.schedulers.keys():\n if strict and k not in checkpoint[\"schedulers\"]:\n raise ValueError(\"Strict mode was enabled, but couldn't find scheduler key\")\n elif k not in checkpoint[\"schedulers\"]:\n print(\"[research] Warning: Checkpoint did not have scheduler key\", k)\n continue\n self.schedulers[k].load_state_dict(checkpoint[\"schedulers\"][k])\n if \"schedulers\" in checkpoint:\n remaining_checkpoint_keys.remove(\"schedulers\")\n\n remaining_checkpoint_keys.remove(\"metadata\") # Do not count metadata key, which is always addded.\n if strict and len(remaining_checkpoint_keys) > 0:\n raise ValueError(\"Algorithm did not have keys \", +str(remaining_checkpoint_keys))\n elif len(remaining_checkpoint_keys) > 0:\n print(\"[research] Warning: Checkpoint keys\", remaining_checkpoint_keys, \"were not loaded.\")\n\n return checkpoint[\"metadata\"]\n\n def format_batch(self, batch: Any) -> Any:\n # Convert items to tensor if they are not.\n # Checking first makes sure we do not distrub memory pinning\n if not utils.contains_tensors(batch):\n batch = utils.to_tensor(batch)\n if self.processor.supports_gpu:\n # Move to CUDA first.\n batch = utils.to_device(batch, self.device)\n batch = self.processor(batch)\n else:\n batch = self.processor(batch)\n batch = utils.to_device(batch, self.device)\n return batch\n\n @abstractmethod\n def train_step(self, batch: Any, step: int, total_steps: int) -> Dict:\n \"\"\"\n Train the model. Should return a dict of loggable values\n \"\"\"\n return {}\n\n def validation_step(self, batch: Any) -> Dict:\n \"\"\"\n perform a validation step. Should return a dict of loggable values.\n \"\"\"\n raise NotImplementedError\n\n def env_step(self, env: gym.Env, step: int, total_steps: int) -> Dict:\n \"\"\"\n Perform any extra training operations. This is done before the train step is called.\n A common use case for this would be stepping the environment etc.\n \"\"\"\n return {}\n\n def validation_extras(self, path: str, step: int) -> Dict:\n \"\"\"\n Perform any extra validation operations.\n A common usecase for this is saving visualizations etc.\n \"\"\"\n return {}\n\n def _predict(self, batch: Any, **kwargs) -> Any:\n \"\"\"\n Internal prediction function, can be overridden\n By default, we call torch.no_grad(). If this behavior isn't desired,\n override the _predict funciton in your algorithm.\n \"\"\"\n with torch.no_grad():\n if len(kwargs) > 0:\n raise ValueError(\"Default predict method does not accept key word args, but they were provided.\")\n pred = self.network(batch)\n return pred\n\n def predict(self, batch: Any, is_batched: bool = False, **kwargs) -> Any:\n is_np = not utils.contains_tensors(batch)\n if not is_batched:\n # Unsqeeuze everything\n batch = utils.unsqueeze(batch, 0)\n batch = self.format_batch(batch)\n pred = self._predict(batch, **kwargs)\n if not is_batched:\n pred = utils.get_from_batch(pred, 0)\n if is_np:\n pred = utils.to_np(pred)\n return pred" } ]
import datetime import functools import os import sys import tempfile import gym import numpy as np import torch from abc import abstractmethod from typing import Any, Dict, Optional, Union from research.datasets import ReplayBuffer from research.datasets.replay_buffer import storage from research.envs.base import EmptyEnv from research.networks.base import ModuleContainer from research.utils import runners, utils from .base import Algorithm from research.utils.config import Config
9,788
class OffPolicyAlgorithm(Algorithm): def __init__( self, *args, offline_steps: int = 0, # Run fully offline by setting to -1 random_steps: int = 1000, async_runner_ep_lag: int = 1, **kwargs, ): super().__init__(*args, **kwargs) self.offline_steps = offline_steps self.random_steps = random_steps self.async_runner_ep_lag = async_runner_ep_lag def setup_datasets(self, env: gym.Env, total_steps: int): super().setup_datasets(env, total_steps) # Assign the correct update function based on what is passed in. if env is None or isinstance(env, EmptyEnv) or self.offline_steps < 0: self.env_step = self._empty_step
class OffPolicyAlgorithm(Algorithm): def __init__( self, *args, offline_steps: int = 0, # Run fully offline by setting to -1 random_steps: int = 1000, async_runner_ep_lag: int = 1, **kwargs, ): super().__init__(*args, **kwargs) self.offline_steps = offline_steps self.random_steps = random_steps self.async_runner_ep_lag = async_runner_ep_lag def setup_datasets(self, env: gym.Env, total_steps: int): super().setup_datasets(env, total_steps) # Assign the correct update function based on what is passed in. if env is None or isinstance(env, EmptyEnv) or self.offline_steps < 0: self.env_step = self._empty_step
elif isinstance(env, runners.AsyncEnv):
4
2023-10-19 17:25:45+00:00
12k
nbasyl/LLM-FP4
configs/FPQ_baseline_config_llama.py
[ { "identifier": "FPPTQSLBatchingQuantLinear_fpq_baseline", "path": "quant_layers/fp_linear.py", "snippet": "class FPPTQSLBatchingQuantLinear_fpq_baseline(FPPTQSLQuantLinear):\n def __init__(self, \n in_features: int,\n out_features: int,\n bias: bool = True,\n mode = \"raw\",\n w_bit = 8,\n a_bit = 8,\n w_exponent_bit = 4, a_exponent_bit = 4,\n bias_bit = None,\n bias_correction = False,\n metric=\"L2_norm\", search_round=1, eq_alpha=0, eq_beta=1, eq_n=100, parallel_eq_n=10, n_H=1, n_V=1, n_a=1):\n super().__init__(in_features, out_features, bias=bias, mode=mode, w_bit=w_bit, a_bit=a_bit, w_exponent_bit= w_exponent_bit, a_exponent_bit=a_exponent_bit, bias_bit=bias_bit, bias_correction=bias_correction, metric=metric, search_round=search_round, eq_alpha=eq_alpha, eq_beta=eq_beta, eq_n=eq_n, parallel_eq_n=parallel_eq_n, n_H=n_H, n_V=n_V, n_a=n_a)\n self.calib_size = None\n self.calib_batch_size = None\n self.calib_need_batching = False\n self.w_maxval = None\n self.w_intervals = None\n self.a_maxval = None\n self.a_intervals = None\n\n def _initialize_calib_parameters(self):\n \"\"\" \n set parameters for feeding calibration data\n \"\"\"\n self.calib_size = int(self.raw_input.shape[0])\n self.calib_batch_size = int(self.raw_input.shape[0])\n i = 0\n while True:\n numel = (2*(self.raw_input.numel()+self.raw_out.numel())/self.calib_size*self.calib_batch_size) # number of parameters on GPU\n self.parallel_eq_n = int((3*1024*1024*1024/4)//numel)\n if self.parallel_eq_n <= 1:\n self.calib_need_batching = True\n self.calib_batch_size //= 2\n else:\n break\n \n def _initialize_intervals(self):\n # weight intervals \n print(\"channel-wise weight\")\n self.n_V = self.out_features\n self.crb_rows = self.out_features // self.n_V\n w_maxval = self.weight.view(self.n_V, self.crb_rows,self.n_H,self.crb_cols).abs().amax([1,3],keepdim=True)\n self.w_maxval = w_maxval\n self.w_interval=(2**self.w_exponent_bit - torch.log2(w_maxval) + math.log2(2 - 2 ** (-self.w_mantissa_bit)) - 1)\n self.w_intervals = []\n if self.w_bit == 8:\n for i in range(self.w_bit-3):\n M = i + 2\n E = self.w_bit - 1 - M\n self.w_intervals.append(2**E - torch.log2(self.w_maxval) + math.log2(2 - 2 ** (-M)) - 1)\n\n else:\n for i in range(self.w_bit-1):\n M = i\n E = self.w_bit - 1 - M\n self.w_intervals.append(2**E - torch.log2(self.w_maxval) + math.log2(2 - 2 ** (-M)) - 1)\n\n # activation intervals\n tmp_a_maxvals = []\n for b_st in range(0,self.calib_size,self.calib_batch_size):\n b_ed = min(self.calib_size, b_st+self.calib_batch_size)\n x_ = self.raw_input[b_st:b_ed].to(self.weight.device)\n x_maxval = x_.abs().max()\n tmp_a_maxvals.append(x_maxval)\n \n # print(f'tmp_a_intervals[0] {tmp_a_intervals[0].shape}')\n tmp_a_maxvals = torch.tensor(tmp_a_maxvals).to(x_.device)\n # print(f'tmp_a_maxvals {tmp_a_maxvals.shape}')\n self.a_maxval = tmp_a_maxvals.amax(dim=0, keepdim=True)\n self.a_interval = (2**self.a_exponent_bit - torch.log2(self.a_maxval) + math.log2(2 - 2 ** (-self.a_mantissa_bit)) - 1).detach().view(1,1).repeat(self.n_a,1)\n\n self.a_intervals = []\n if self.a_bit == 8:\n for i in range(self.a_bit-3):\n M = i + 2\n E = self.a_bit - 1 - M\n a_interval_=(2**E - torch.log2(self.a_maxval) + math.log2(2 - 2 ** (-M)) - 1).detach().view(1,1).repeat(self.n_a,1)\n\n self.a_intervals.append(a_interval_.clone())\n else:\n for i in range(self.a_bit-1):\n M = i\n E = self.a_bit - 1 - M\n a_interval_=(2**E - torch.log2(self.a_maxval) + math.log2(2 - 2 ** (-M)) - 1).detach().view(1,1).repeat(self.n_a,1)\n self.a_intervals.append(a_interval_.clone())\n\n def _initialize_intervals_eval(self):\n self._initialize_calib_parameters()\n print(\"channel-wise weight\")\n self.n_V = self.out_features\n self.crb_rows = self.out_features // self.n_V\n w_maxval = self.weight.view(self.n_V, self.crb_rows,self.n_H,self.crb_cols).abs().amax([1,3],keepdim=True)\n self.w_maxval = w_maxval\n self.w_interval=(2**self.w_exponent_bit - torch.log2(w_maxval) + math.log2(2 - 2 ** (-self.w_mantissa_bit)) - 1)\n\n # activation intervals\n tmp_a_maxvals = []\n for b_st in range(0,self.calib_size,self.calib_batch_size):\n b_ed = min(self.calib_size, b_st+self.calib_batch_size)\n x_ = self.raw_input[b_st:b_ed].to(self.weight.device)\n x_maxval = x_.abs().max()\n tmp_a_maxvals.append(x_maxval)\n \n tmp_a_maxvals = torch.tensor(tmp_a_maxvals).to(x_.device)\n self.a_maxval = tmp_a_maxvals.amax(dim=0, keepdim=True)\n self.a_interval = (2**self.a_exponent_bit - torch.log2(self.a_maxval) + math.log2(2 - 2 ** (-self.a_mantissa_bit)) - 1).detach().view(1,1).repeat(self.n_a,1)\n\n self.calibrated = True\n\n def _get_similarity(self, tensor_raw, tensor_sim, metric=None, raw_grad=None):\n \"\"\"\n tensor_raw: *, features\n tensor_sim: *, features\n similarity: *\n It's your job to calculate mean on * dims!\n \"\"\"\n if metric == \"cosine\":\n similarity = F.cosine_similarity(tensor_raw, tensor_sim, dim=-1)\n else:\n if metric == \"L1_norm\":\n similarity = -torch.abs(tensor_raw - tensor_sim)\n elif metric == \"linear_weighted_L2_norm\":\n similarity = -tensor_raw.abs() * (tensor_raw - tensor_sim) ** 2\n elif metric == \"square_weighted_L2_norm\":\n similarity = -(tensor_raw * (tensor_raw - tensor_sim)) ** 2\n elif metric == \"L2_norm\":\n similarity = -(tensor_raw - tensor_sim) ** 2\n else:\n raise NotImplementedError(f\"metric {metric} not implemented!\")\n similarity = torch.mean(similarity, dim=-1)\n return similarity\n\n def _get_pearson_w(self, tensor_raw, tensor_sim):\n \"\"\"\n Quick implementation of similarity-aware linear quantization\n tensor_sim: b,*,parallel_eq_n,n_V,crb_rows\n tensor_raw: b,*,1,n_V,crb_rows\n \"\"\"\n b, parallel_eq_n, n_V = tensor_sim.shape[0],tensor_sim.shape[-3],tensor_sim.shape[-2]\n tensor_sim = tensor_sim.transpose(-1,-3).contiguous_().view(b,-1,n_V,parallel_eq_n)\n tensor_raw = tensor_raw.transpose(-1,-3).view(b,-1,n_V,1)\n tensor_sim_mean = tensor_sim.mean(dim=[0,1],keepdim=True)\n tensor_raw_mean = tensor_raw.mean(dim=[0,1],keepdim=True)\n similarity = torch.cosine_similarity(tensor_raw-tensor_raw_mean, tensor_sim-tensor_sim_mean, dim=1) # shape: b,n_V,parallel_eq_n\n similarity = similarity.permute(0,2,1).contiguous_()\n return similarity\n \n def _get_pearson_a(self, tensor_raw, tensor_sim):\n \"\"\"\n Quick implementation of similarity-aware linear quantization\n tensor_sim: b,*,parallel_eq_n,oc\n tensor_raw: b,*,1,oc\n \"\"\"\n b, parallel_eq_n = tensor_sim.shape[0],tensor_sim.shape[-2]\n tensor_sim = tensor_sim.transpose(-1,-2).contiguous_().view(b,-1,parallel_eq_n)\n tensor_raw = tensor_raw.transpose(-1,-2).view(b,-1,1)\n tensor_sim_mean = tensor_sim.mean(dim=[0,1],keepdim=True)\n tensor_raw_mean = tensor_raw.mean(dim=[0,1],keepdim=True)\n similarity = torch.cosine_similarity(tensor_raw-tensor_raw_mean, tensor_sim-tensor_sim_mean, dim=1) # shape: b,parallel_eq_n\n return similarity\n\n def _search_best_w_interval(self, weight_interval_candidates):\n \n # tmp_w_interval = self.w_interval.unsqueeze(0) # shape: 1,n_V,1,n_H,1\n # print(f\"weight_interval_candidates shape {weight_interval_candidates.shape}\")\n for man in range(weight_interval_candidates.shape[0]):\n tmp_w_interval = self.w_intervals[man].unsqueeze(0) # shape: 1,n_V,1,n_H,1\n for h in range(self.n_H):\n batch_similarities = [] # similarities, need to concatenate and calculate sum (equivalent to mean with argmax)\n # print(f\"before search E{self.w_bit-1-man}M{man} self.w_intervals[man] {self.w_intervals[man][0][0]}\")\n for b_st in range(0, self.calib_size, self.calib_batch_size):\n b_ed = min(self.calib_size, b_st + self.calib_batch_size)\n x = self.raw_input[b_st:b_ed].to(self.weight.device)\n raw_out_expanded = self.raw_out[b_st:b_ed].to(self.weight.device).unsqueeze(-2) # shape: b,*,1,oc\n raw_out_expanded = torch.cat(torch.chunk(raw_out_expanded.unsqueeze(-2), chunks=self.n_V, dim=-1), dim=-2) # shape: b,*,1,n_V,crb_rows\n raw_grad = self.raw_grad\n similarities = []\n for p_st in range(0,self.eq_n,self.parallel_eq_n):\n p_ed = min(self.eq_n, p_st+self.parallel_eq_n)\n cur_w_interval = tmp_w_interval.repeat(p_ed-p_st,1,1,1,1)\n # print(f\"cur_w_interval {cur_w_interval.shape}\")\n cur_w_interval[:,:,:,h:h+1,:] = weight_interval_candidates[man][p_st:p_ed,:,:,h:h+1,:]\n # quantize weight and bias \n w_sim = self.weight.view(self.n_V,self.crb_rows,self.n_H,self.crb_cols).unsqueeze(0) # shape: 1,n_V,crb_rows,n_H,crb_cols\n\n if self.w_bit == 8:\n w, cur_w_scale = self.get_scale(w_sim, bits = self.w_bit, mantissa_bit= man+2, bias= cur_w_interval)\n else:\n w, cur_w_scale = self.get_scale(w_sim, bits = self.w_bit, mantissa_bit= man, bias= cur_w_interval)\n \n \n w_sim = (w/cur_w_scale).round_().mul_(cur_w_scale) # shape: parallel_eq_n,n_V,crb_rows,n_H,crb_cols\n w_sim = w_sim.view(-1,self.in_features) # shape: parallel_eq_n*oc,ic\n bias_sim = self.bias.repeat(p_ed-p_st) if self.bias is not None else None\n # quantize input\n x_sim = self.quant_input(x)\n # calculate similarity and store them\n out_sim = F.linear(x_sim, w_sim, bias_sim) # shape: b,*,parallel_eq_n*oc\n out_sim = torch.cat(torch.chunk(out_sim.unsqueeze(-2), chunks=p_ed-p_st, dim=-1), dim=-2) # shape: b,*,parallel_eq_n,oc\n out_sim = torch.cat(torch.chunk(out_sim.unsqueeze(-2), chunks=self.n_V, dim=-1), dim=-2) # shape: b,*,parallel_eq_n,n_V,crb_rows\n if self.metric != \"pearson\":\n similarity = self._get_similarity(raw_out_expanded, out_sim, self.metric, raw_grad) # shape: b,*,parallel_eq_n,n_V\n if len(similarity.shape) > 3:\n similarity = torch.mean(similarity, dim=list(range(1,len(similarity.shape)-2))) # shape: b, parallel_eq_n, n_V\n else:\n similarity = self._get_pearson_w(raw_out_expanded, out_sim)\n similarity = similarity.sum(dim=0, keepdim=True) # shape: 1, parallel_eq_n, n_V\n similarities.append(similarity)\n # store best weight interval of h into tmp_w_interval\n similarities = torch.cat(similarities, dim=1) # shape: 1, eq_n, n_V\n batch_similarities.append(similarities)\n batch_similarities = torch.cat(batch_similarities, dim=0).sum(dim=0, keepdim=False) # shape: eq_n, n_V\n h_best_index = batch_similarities.argmax(dim=0).reshape(1,-1,1,1,1) # shape: 1,n_V,1,1,1\n tmp_w_interval[:,:,:,h:h+1,:] = torch.gather(weight_interval_candidates[man][:,:,:,h:h+1,:],dim=0,index=h_best_index)\n self.w_intervals[man] = tmp_w_interval.squeeze(dim=0)\n\n def _search_best_w_format(self):\n \n # print(f\"before search linear weight E{self.w_exponent_bit}M{self.w_mantissa_bit}\")\n \n # format candidate\n w_mantissa_bits_candidate = [i for i in range(self.w_bit-1)]\n \n batch_similarities = [] # similarities, need to concatenate and calculate sum (equivalent to mean with argmax)\n for b_st in range(0, self.calib_size, self.calib_batch_size):\n b_ed = min(self.calib_size, b_st + self.calib_batch_size)\n x = self.raw_input[b_st:b_ed].to(self.weight.device)\n raw_out = self.raw_out[b_st:b_ed].to(self.weight.device) # shape: b,*,1,oc\n raw_grad = self.raw_grad\n similarities = []\n # quantize input\n x_sim = self.quant_input(x)\n for w_mantissa_bit in w_mantissa_bits_candidate:\n if self.w_bit == 8:\n w_mantissa_bit = w_mantissa_bit + 2\n else:\n w_mantissa_bit = w_mantissa_bit\n w_sim = self.weight.view(self.n_V,self.crb_rows,self.n_H,self.crb_cols)\n w,cur_w_scale = self.get_scale(w_sim, bits = self.w_bit, mantissa_bit= w_mantissa_bit, bias= self.w_intervals[w_mantissa_bit])\n w_sim = (w/cur_w_scale).round_().mul_(cur_w_scale)\n w_sim = w_sim.view(-1,self.in_features)\n bias_sim = self.bias if self.bias is not None else None\n out_sim = F.linear(x_sim, w_sim, bias_sim) # shape: B,*,oc\n similarity = self._get_similarity(raw_out, out_sim, self.metric, raw_grad) #B,*,oc\n # print(f\"weight similarity shape {similarity.shape}\")\n similarity = torch.mean(similarity) # shape: 1\n similarities.append(similarity)\n similarities = torch.tensor(similarities)\n # print(f\"weight similarities {similarities}\")\n batch_similarities.append(similarities)\n batch_similarities = torch.vstack(batch_similarities)\n # print(f\"weight batch_similarities {batch_similarities}\")\n best_mantissa_bit = batch_similarities.sum(dim=0, keepdim=True).argmax(dim=1).item()\n \n if self.w_bit == 8:\n self.w_mantissa_bit = torch.tensor(best_mantissa_bit + 2).to(self.weight.device)\n self.w_exponent_bit = torch.tensor(self.w_bit - 1 - best_mantissa_bit).to(self.weight.device) \n \n else:\n self.w_mantissa_bit = torch.tensor(best_mantissa_bit).to(self.weight.device)\n self.w_exponent_bit = torch.tensor(self.w_bit - 1 - self.w_mantissa_bit).to(self.weight.device) \n \n self.w_interval = self.w_intervals[self.w_mantissa_bit]\n # print(f\"search result E{self.w_exponent_bit}M{self.w_mantissa_bit}\")\n # print(f\"after calibrate bias {self.w_interval[[0,10,30,40,50]]}\")\n # print(\"finish searching fp format for linear weight\")\n\n def _search_best_a_interval(self, input_interval_candidates):\n \n # print(f\"input_interval_candidates shape {input_interval_candidates.shape}\")\n for man in range(input_interval_candidates.shape[0]):\n tmp_a_interval = self.a_intervals[man].unsqueeze(-1) # shape: n_a,1,1\n # print(f\"tmp_a_interval.shape {tmp_a_interval.shape}\")\n for a in range(self.n_a):\n batch_similarities = [] # similarities, need to concatenate and calculate sum (equivalent to mean with argmax)\n for b_st in range(0, self.calib_size, self.calib_batch_size):\n b_ed = min(self.calib_size, b_st + self.calib_batch_size)\n x = self.raw_input[b_st:b_ed].to(self.weight.device)\n raw_out_expanded = self.raw_out[b_st:b_ed].to(self.weight.device).unsqueeze(-2) # shape: b,*,1,oc\n raw_grad = self.raw_grad\n similarities = []\n for p_st in range(0,self.eq_n,self.parallel_eq_n):\n p_ed = min(self.eq_n, p_st+self.parallel_eq_n)\n cur_a_interval = tmp_a_interval.repeat(1,1,p_ed-p_st) # shape: n_a,1,parallel_eq_n\n cur_a_interval[a:a+1,:,:] = input_interval_candidates[man][a:a+1,:,p_st:p_ed]\n # quantize weight and bias \n w_sim, bias_sim = self.quant_weight_bias()\n # quantize input\n x_sim=torch.cat(torch.chunk(x.unsqueeze(-2), chunks=self.n_a, dim=-1), dim=-2).unsqueeze(-1)\n \n if self.a_bit == 8:\n # print(f\"CUR a E{self.a_bit - 1 - man -2}M{man+2}\")\n cur_a, cur_a_scale = self.get_scale(x_sim, bits = self.a_bit, mantissa_bit= man+2, bias= cur_a_interval)\n else:\n cur_a, cur_a_scale = self.get_scale(x_sim, bits = self.a_bit, mantissa_bit= man, bias= cur_a_interval)\n\n x_sim=(cur_a/(cur_a_scale)).round_()*(cur_a_scale) # shape: b,*,n_a,crb_acts,parallel_eq_n\n x_sim = x_sim.permute(*list(range(len(x_sim.shape)-3)),-1,-3,-2).reshape(*x.shape[:-1],p_ed-p_st,x.shape[-1]) # shape: b,*,parallel_eq_n,ic\n # calculate similarity and store them\n out_sim = F.linear(x_sim, w_sim, bias_sim) # shape: b,*,parallel_eq_n,oc\n if self.metric != \"pearson\":\n similarity = self._get_similarity(raw_out_expanded, out_sim, self.metric, raw_grad) # shape: b,*,parallel_eq_n\n if len(similarity.shape) > 2:\n similarity = torch.mean(similarity, dim=list(range(1,len(similarity.shape)-1))) # shape: b, parallel_eq_n\n else:\n similarity = self._get_pearson_a(raw_out_expanded, out_sim)\n similarity = torch.sum(similarity, dim=0, keepdim=True) # shape: 1, parallel_eq_n\n similarities.append(similarity)\n # store best input interval and store in tmp_a_interval\n similarities = torch.cat(similarities, dim=1) # shape: 1, eq_n\n batch_similarities.append(similarities)\n batch_similarities = torch.cat(batch_similarities, dim=0).sum(dim=0, keepdim=False) # shape: eq_n\n a_best_index = batch_similarities.argmax(dim=0, keepdim=True).reshape(1,1,-1)\n tmp_a_interval[a:a+1,:,:] = torch.gather(input_interval_candidates[man][a:a+1,:,:],dim=2,index=a_best_index)\n self.a_intervals[man] = tmp_a_interval.squeeze(-1)\n\n def _search_best_a_format(self):\n \n batch_similarities = [] # similarities, need to concatenate and calculate sum (equivalent to mean with argmax)\n\n # format candidate\n if self.a_bit == 8:\n a_mantissa_bits_candidate = [i for i in range(self.a_bit-3)]\n else:\n a_mantissa_bits_candidate = [i for i in range(self.a_bit-1)]\n # quantize input\n w_sim, bias_sim = self.quant_weight_bias()\n # print(f\"before search linear activation E{self.a_exponent_bit}M{self.a_mantissa_bit}\")\n for b_st in range(0, self.calib_size, self.calib_batch_size):\n b_ed = min(self.calib_size, b_st + self.calib_batch_size)\n x = self.raw_input[b_st:b_ed].to(self.weight.device)\n raw_out = self.raw_out[b_st:b_ed].to(self.weight.device) # shape: b,*,oc\n raw_grad = self.raw_grad\n similarities = []\n \n for a_mantissa_bit in a_mantissa_bits_candidate:\n if self.a_bit == 8:\n a_mantissa_bit = a_mantissa_bit + 2\n \n x_sim = torch.cat(torch.chunk(x.unsqueeze(-2), chunks=self.n_a, dim=-1), dim=-2)\n cur_a, cur_a_scale = self.get_scale(x_sim, bits = self.a_bit, mantissa_bit= a_mantissa_bit, bias= self.a_intervals[a_mantissa_bit])\n x_sim=(cur_a/(cur_a_scale)).round_()*(cur_a_scale) # shape: B,*,n_a,crb_acts\n # print(f\"x_sim shape {x_sim.shape}\")\n if len(x.shape) == 3:\n x_sim = x_sim.view(x.shape[0],x.shape[1],x.shape[2])\n else:\n # print(f\"x {x.shape}\")\n # print(f\"raw_out {raw_out.shape}\")\n x_sim = x_sim.view(x.shape[0],1,x.shape[1])\n out_sim = F.linear(x_sim, w_sim, bias_sim) # shape: B,*,oc \n # print(f\"E{self.a_bit - 1 - a_mantissa_bit}M{a_mantissa_bit}\")\n # print(f\"search act out_sim {out_sim.shape}\")\n # print(f\"search act out_sim {out_sim[0][2][0:10]}\")\n # print(f\"raw_out {raw_out[0][2][0:10]}\")\n similarity = self._get_similarity(raw_out, out_sim, self.metric, raw_grad) #B,*,oc\n # print(f\"activation similarity shape {similarity.shape}\")\n similarity = torch.mean(similarity)\n # print(f\"similarity: {similarity}\")\n similarities.append(similarity)\n similarities = torch.tensor(similarities)\n batch_similarities.append(similarities)\n \n batch_similarities = torch.vstack(batch_similarities)\n best_mantissa_bit = batch_similarities.sum(dim=0, keepdim=True).argmax(dim=1).item()\n \n if self.a_bit == 8:\n self.a_mantissa_bit = torch.tensor(best_mantissa_bit + 2).to(self.weight.device)\n self.a_exponent_bit = torch.tensor(self.a_bit - 1 - best_mantissa_bit).to(self.weight.device) \n \n else:\n self.a_mantissa_bit = torch.tensor(best_mantissa_bit).to(self.weight.device)\n self.a_exponent_bit = torch.tensor(self.a_bit - 1 - best_mantissa_bit).to(self.weight.device) \n \n self.a_interval = self.a_intervals[self.a_mantissa_bit]\n # print(f\"search result linear activation E{self.a_exponent_bit}M{self.a_mantissa_bit}\")\n # print(f\"after calibrate bias {self.w_interval[[0,10,30,40,50]]}\")\n # print(\"finish searching fp format for linear activation\")\n\n def calibration_step2(self):\n \"\"\"\n Only use cached raw inputs/outs/grads\n \"\"\"\n self._initialize_calib_parameters()\n self._initialize_intervals()\n\n # prepare weight intervals and similarities\n weight_interval_candidates = []\n if self.w_bit == 8:\n for m in range(self.w_bit-3):\n weight_interval_candidate = torch.tensor([self.eq_alpha + i*(self.eq_beta - self.eq_alpha)/self.eq_n for i in range(self.eq_n + 1)]).to(self.weight.device).view(-1,1,1,1,1) * self.w_intervals[m].unsqueeze(0)\n weight_interval_candidates.append(weight_interval_candidate.unsqueeze(0)) # shape: num_man_options,eq_n,n_V,1,n_H,1\n else:\n for m in range(self.w_bit-1):\n weight_interval_candidate = torch.tensor([self.eq_alpha + i*(self.eq_beta - self.eq_alpha)/self.eq_n for i in range(self.eq_n + 1)]).to(self.weight.device).view(-1,1,1,1,1) * self.w_intervals[m].unsqueeze(0)\n weight_interval_candidates.append(weight_interval_candidate.unsqueeze(0)) # shape: num_man_options,eq_n,n_V,1,n_H,1\n weight_interval_candidates = torch.vstack(weight_interval_candidates)\n\n input_interval_candidates = []\n if self.a_bit == 8:\n for m in range(self.a_bit-3): \n input_interval_candidate = torch.tensor([self.eq_alpha + i*(self.eq_beta - self.eq_alpha)/self.eq_n for i in range(self.eq_n + 1)]).to(self.weight.device).view(1,1,-1) * self.a_intervals[m].unsqueeze(-1)\n input_interval_candidates.append(input_interval_candidate.unsqueeze(0)) # shape: n_a,1,eq_n\n else:\n for m in range(self.a_bit-1): \n input_interval_candidate = torch.tensor([self.eq_alpha + i*(self.eq_beta - self.eq_alpha)/self.eq_n for i in range(self.eq_n + 1)]).to(self.weight.device).view(1,1,-1) * self.a_intervals[m].unsqueeze(-1)\n input_interval_candidates.append(input_interval_candidate.unsqueeze(0)) # shape: n_a,1,eq_n\n input_interval_candidates = torch.vstack(input_interval_candidates)\n \n for e in range(self.search_round):\n # search for best weight interval\n self._search_best_w_interval(weight_interval_candidates)\n # search for best weight format\n self._search_best_w_format()\n # search for best input interval\n self._search_best_a_interval(input_interval_candidates)\n # search for best input format\n self._search_best_a_format()\n\n\n self.calibrated = True\n del self.raw_input, self.raw_out, self.raw_grad\n return None" }, { "identifier": "FPPTQSLQuantEmbedding_fpq_baseline", "path": "quant_layers/fp_embed.py", "snippet": "class FPPTQSLQuantEmbedding_fpq_baseline(FPPTQSLQuantEmbedding):\n def __init__(self, \n num_embeddings: int,\n embedding_dim: int,\n padding_idx: int,\n mode = \"raw\",\n bit = 8,\n exponent_bit = 4,\n bias_bit = None,\n bias_correction = False,\n metric=\"L2_norm\", search_round=1, eq_alpha=0, eq_beta=1, eq_n=100, parallel_eq_n=1, n_H=1, n_V=1):\n super().__init__(num_embeddings, embedding_dim, padding_idx, mode=mode, bit=bit, exponent_bit= exponent_bit, bias_bit=bias_bit, bias_correction=bias_correction, metric=metric, search_round=search_round, eq_alpha=eq_alpha, eq_beta=eq_beta, eq_n=eq_n, parallel_eq_n=parallel_eq_n, n_H=n_H, n_V=n_V)\n self.maxval = None\n self.intervals = None\n\n def _initialize_intervals_eval(self):\n\n self.n_V = self.num_embeddings\n self.crb_rows = self.num_embeddings // self.n_V\n maxval = self.weight.view(self.n_V, self.crb_rows,self.n_H,self.crb_cols).abs().amax([1,3],keepdim=True)\n self.maxval = maxval\n self.interval=(2**self.exponent_bit - torch.log2(maxval) + math.log2(2 - 2 ** (-self.mantissa_bit)) - 1)\n self.calibrated = True\n\n def _initialize_intervals(self):\n\n self.n_V = self.num_embeddings\n self.crb_rows = self.num_embeddings // self.n_V\n maxval = self.weight.view(self.n_V, self.crb_rows,self.n_H,self.crb_cols).abs().amax([1,3],keepdim=True)\n self.maxval = maxval\n self.interval=(2**self.exponent_bit - torch.log2(maxval) + math.log2(2 - 2 ** (-self.mantissa_bit)) - 1)\n self.intervals = []\n if self.bit == 8: ## need to constrain the exponent as too big exponent bits will result in overflow\n # E7M0, E6M1, E5M2, E4M3, E3M4, E2M5, E1M6, start with E5M2 as E7M0 and E6M1 usually performs quite bad and results in overflow\n for i in range(self.bit-3):\n M = i + 2\n E = self.bit - 1 - M\n self.intervals.append(2**E - torch.log2(self.maxval) + math.log2(2 - 2 ** (-M)) - 1)\n\n else:\n for i in range(self.bit-1):\n M = i\n E = self.bit - 1 - M\n self.intervals.append(2**E - torch.log2(self.maxval) + math.log2(2 - 2 ** (-M)) - 1)\n\n def _get_similarity(self, tensor_raw, tensor_sim, metric=None):\n \"\"\"\n tensor_raw: *, features\n tensor_sim: *, features\n similarity: *\n It's your job to calculate mean on * dims!\n \"\"\"\n if metric == \"cosine\":\n similarity = F.cosine_similarity(tensor_raw, tensor_sim, dim=-1)\n else:\n if metric == \"L1_norm\":\n similarity = -torch.abs(tensor_raw - tensor_sim)\n elif metric == \"L2_norm\":\n similarity = -(tensor_raw - tensor_sim) ** 2\n elif metric == \"linear_weighted_L2_norm\":\n similarity = -tensor_raw.abs() * (tensor_raw - tensor_sim) ** 2\n elif metric == \"square_weighted_L2_norm\":\n similarity = -(tensor_raw * (tensor_raw - tensor_sim)) ** 2\n else:\n raise NotImplementedError(f\"metric {metric} not implemented!\")\n similarity = torch.mean(similarity, dim=-1)\n return similarity\n\n def _search_best_interval(self, interval_candidates):\n \n # print(f\"interval_candidates shape {interval_candidates.shape}\")\n for man in range(interval_candidates.shape[0]):\n tmp_interval = self.intervals[man].unsqueeze(0) # shape: 1,n_V,1,n_H,1\n for h in range(self.n_H):\n similarities = []\n for p_st in range(0,self.eq_n,self.parallel_eq_n):\n p_ed = min(self.eq_n, p_st+self.parallel_eq_n)\n cur_w_interval = tmp_interval.repeat(p_ed-p_st,1,1,1,1)\n cur_w_interval[:,:,:,h:h+1,:] = interval_candidates[man][p_st:p_ed,:,:,h:h+1,:]\n # quantize weight and bias \n w_sim = self.weight.view(self.n_V,self.crb_rows,self.n_H,self.crb_cols).unsqueeze(0) # shape: 1,n_V,crb_rows,n_H,crb_cols\n \n if self.bit >= 8:\n w, cur_w_scale = self.get_scale(w_sim, bits = self.bit, mantissa_bit= man+2, bias= cur_w_interval)\n else:\n w, cur_w_scale = self.get_scale(w_sim, bits = self.bit, mantissa_bit= man, bias= cur_w_interval)\n\n w_sim = (w/cur_w_scale).round_().mul_(cur_w_scale) # shape: parallel_eq_n,n_V,crb_rows,n_H,crb_cols\n w_sim = w_sim.view(-1,self.num_embeddings,self.embedding_dim) # shape: parallel_eq_n*oc,ic\n \n\n similarity = self._get_similarity(self.weight.unsqueeze(0), w_sim, self.metric) # shape: B,*,parallel_eq_n,n_V\n if self.n_V == 1:\n similarity = similarity.sum(dim=1, keepdim=True)\n \n similarities.append(similarity)\n # store best weight interval of h into tmp_interval\n similarities = torch.cat(similarities, dim=0) # shape: eq_n, n_V\n h_best_index = similarities.argmax(dim=0).reshape(1,-1,1,1,1) # shape: 1,n_V,1,1,1\n tmp_interval[:,:,:,h:h+1,:] = torch.gather(interval_candidates[man][:,:,:,h:h+1,:],dim=0,index=h_best_index)\n self.intervals[man] = tmp_interval.squeeze(dim=0)\n\n def _search_best_format(self):\n \n # print(f\"before search linear weight E{self.w_exponent_bit}M{self.w_mantissa_bit}\")\n \n # format candidate\n if self.bit >= 8:\n mantissa_bits_candidate = [i for i in range(self.bit-3)]\n else:\n mantissa_bits_candidate = [i for i in range(self.bit-1)]\n \n similarities = []\n for mantissa_bit in mantissa_bits_candidate:\n if self.bit >= 8:\n shift_mantissa_bit = mantissa_bit + 2\n else:\n shift_mantissa_bit = mantissa_bit\n \n w_sim = self.weight.view(self.n_V,self.crb_rows,self.n_H,self.crb_cols)\n w, cur_w_scale = self.get_scale(w_sim, bits = self.bit, mantissa_bit= shift_mantissa_bit, bias= self.intervals[mantissa_bit])\n \n w_sim = (w/cur_w_scale)\n \n w_sim = w_sim.round_().mul_(cur_w_scale)\n\n \n w_sim = w_sim.view(-1,self.num_embeddings,self.embedding_dim)\n\n similarity = self._get_similarity(self.weight.unsqueeze(0), w_sim, self.metric) #B,*,oc\n similarity = torch.mean(similarity) # shape: 1\n similarities.append(similarity)\n similarities = torch.tensor(similarities)\n best_mantissa_bit = similarities.argmax(dim=0).item()\n \n if self.bit >= 8:\n self.mantissa_bit = torch.tensor(best_mantissa_bit + 2).to(self.weight.device)\n self.exponent_bit = torch.tensor(self.bit - 1 - best_mantissa_bit).to(self.weight.device) \n \n else:\n self.mantissa_bit = torch.tensor(best_mantissa_bit).to(self.weight.device) \n self.exponent_bit = torch.tensor(self.bit - 1 - best_mantissa_bit).to(self.weight.device) \n \n self.interval = self.intervals[best_mantissa_bit]\n\n def calibration_step2(self):\n\n self._initialize_intervals()\n\n # prepare intervals and similarities\n interval_candidates = []\n if self.bit >=8:\n for m in range(self.bit-3): #m 2 ~ 6\n interval_candidate = torch.tensor([self.eq_alpha + i*(self.eq_beta - self.eq_alpha)/self.eq_n for i in range(self.eq_n + 1)]).to(self.weight.device).view(-1,1,1,1,1) * self.intervals[m].unsqueeze(0)\n interval_candidates.append(interval_candidate.unsqueeze(0)) # shape: num_man_options,eq_n,n_V,1,n_H,1\n \n else:\n for m in range(self.bit-1): #m 0 ~ 6\n interval_candidate = torch.tensor([self.eq_alpha + i*(self.eq_beta - self.eq_alpha)/self.eq_n for i in range(self.eq_n + 1)]).to(self.weight.device).view(-1,1,1,1,1) * self.intervals[m].unsqueeze(0)\n interval_candidates.append(interval_candidate.unsqueeze(0)) # shape: num_man_options,eq_n,n_V,1,n_H,1\n interval_candidates = torch.vstack(interval_candidates)\n\n for e in range(self.search_round):\n # search for best weight interval\n self._search_best_interval(interval_candidates)\n # search for best weight format\n self._search_best_format()\n\n print(f\"search format E{self.exponent_bit}M{self.mantissa_bit}\")\n\n self.calibrated = True\n return None" } ]
from quant_layers.fp_linear import FPPTQSLBatchingQuantLinear_fpq_baseline from quant_layers.fp_embed import FPPTQSLQuantEmbedding_fpq_baseline
9,686
bit = 8 exp_bit = 4 embed_name_list = ["qembedding"] fc_name_list = [ "qlinear_query", "qlinear_key", "qlinear_value", "qlinear_o","qlinear_gate","qlinear_down","qlinear_up","qlinear_score"] matmul_name_list = [ "qmatmul_qk", "qmatmul_scorev"] w_bit = {name: bit for name in fc_name_list} a_bit = {name: bit for name in fc_name_list} embed_bit = {name: bit for name in embed_name_list} A_bit = {name: bit for name in matmul_name_list} B_bit = {name: bit for name in matmul_name_list} w_exp_bit = {name: exp_bit for name in fc_name_list} a_exp_bit = {name: exp_bit for name in fc_name_list} embed_exp_bit = {name: exp_bit for name in embed_name_list} A_exp_bit = {name: exp_bit for name in matmul_name_list} B_exp_bit = {name: exp_bit for name in matmul_name_list} ptqsl_embedding_kwargs = { "metric": "L2_norm", "eq_alpha": 0.01, "eq_beta": 1.2, "eq_n": 100, 'search_round': 3, "n_V": 1, "n_H": 1 } ptqsl_linear_kwargs = { "metric": "L2_norm", "eq_alpha": 0.01, "eq_beta": 1.2, "eq_n": 100, 'search_round': 3, "n_V": 1, "n_H": 1, "n_a": 1, "bias_correction":True # Conventionally I'll not add an actual bias correction in linear } def get_module(module_type, *args, **kwargs): if "embedding" in module_type: kwargs.update(ptqsl_embedding_kwargs) module= FPPTQSLQuantEmbedding_fpq_baseline(*args,**kwargs,bit= embed_bit[module_type], exponent_bit=embed_exp_bit[module_type], padding_idx=0) elif "qlinear" in module_type: kwargs.update(ptqsl_linear_kwargs) if module_type == "qlinear_score": kwargs["n_V"] = 1
bit = 8 exp_bit = 4 embed_name_list = ["qembedding"] fc_name_list = [ "qlinear_query", "qlinear_key", "qlinear_value", "qlinear_o","qlinear_gate","qlinear_down","qlinear_up","qlinear_score"] matmul_name_list = [ "qmatmul_qk", "qmatmul_scorev"] w_bit = {name: bit for name in fc_name_list} a_bit = {name: bit for name in fc_name_list} embed_bit = {name: bit for name in embed_name_list} A_bit = {name: bit for name in matmul_name_list} B_bit = {name: bit for name in matmul_name_list} w_exp_bit = {name: exp_bit for name in fc_name_list} a_exp_bit = {name: exp_bit for name in fc_name_list} embed_exp_bit = {name: exp_bit for name in embed_name_list} A_exp_bit = {name: exp_bit for name in matmul_name_list} B_exp_bit = {name: exp_bit for name in matmul_name_list} ptqsl_embedding_kwargs = { "metric": "L2_norm", "eq_alpha": 0.01, "eq_beta": 1.2, "eq_n": 100, 'search_round': 3, "n_V": 1, "n_H": 1 } ptqsl_linear_kwargs = { "metric": "L2_norm", "eq_alpha": 0.01, "eq_beta": 1.2, "eq_n": 100, 'search_round': 3, "n_V": 1, "n_H": 1, "n_a": 1, "bias_correction":True # Conventionally I'll not add an actual bias correction in linear } def get_module(module_type, *args, **kwargs): if "embedding" in module_type: kwargs.update(ptqsl_embedding_kwargs) module= FPPTQSLQuantEmbedding_fpq_baseline(*args,**kwargs,bit= embed_bit[module_type], exponent_bit=embed_exp_bit[module_type], padding_idx=0) elif "qlinear" in module_type: kwargs.update(ptqsl_linear_kwargs) if module_type == "qlinear_score": kwargs["n_V"] = 1
module= FPPTQSLBatchingQuantLinear_fpq_baseline(*args,**kwargs,w_bit=w_bit[module_type],a_bit=a_bit[module_type],w_exponent_bit=w_exp_bit[module_type],a_exponent_bit=a_exp_bit[module_type])
0
2023-10-15 06:05:13+00:00
12k
bcmi/libcom
libcom/controllable_composition/source/ControlCom/ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "libcom/controllable_composition/source/ControlCom/ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "libcom/controllable_composition/source/ControlCom/ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "libcom/controllable_composition/source/ControlCom/ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "libcom/controllable_composition/source/ControlCom/ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "libcom/controllable_composition/source/ControlCom/ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "libcom/controllable_composition/source/ControlCom/ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "libcom/controllable_composition/source/ControlCom/ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "libcom/controllable_composition/source/ControlCom/ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "libcom/controllable_composition/source/ControlCom/ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates\n else torch.tensor(-1,dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n #remove as '.'-character is not allowed in buffers\n s_name = name.replace('.','')\n self.m_name2s_name.update({name:s_name})\n self.register_buffer(s_name,p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self,model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "libcom/controllable_composition/source/ControlCom/ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "libcom/controllable_composition/source/ControlCom/ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "VQModelInterface", "path": "libcom/controllable_composition/source/ControlCom/ldm/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "IdentityFirstStage", "path": "libcom/controllable_composition/source/ControlCom/ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "libcom/controllable_composition/source/ControlCom/ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(list(self.encoder.parameters())+\n list(self.decoder.parameters())+\n list(self.quant_conv.parameters())+\n list(self.post_quant_conv.parameters()),\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "libcom/controllable_composition/source/ControlCom/ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "libcom/controllable_composition/source/ControlCom/ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "libcom/controllable_composition/source/ControlCom/ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "libcom/controllable_composition/source/ControlCom/ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n # if conditioning is not None:\n # if isinstance(conditioning, dict):\n # cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n # if cbs != batch_size:\n # print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n # else:\n # if conditioning.shape[0] != batch_size:\n # print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n # if unconditional_guidance_scale > 1 and unconditional_conditioning != None: \n # print('using classifier-free guidance for sampling')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n **kwargs\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,**kwargs):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n \n if 'test_model_kwargs' in kwargs:\n inputs = kwargs['test_model_kwargs']\n elif 'rest' in kwargs:\n inputs = kwargs['rest']\n else:\n raise Exception(\"kwargs must contain either 'test_model_kwargs' or 'rest' key\")\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n # intermediates = {'x_inter': [img], 'mask_inter': [mask], 'pred_x0': []}\n intermediates = {'x_inter': [img], 'pred_x0': [], 'attn': []}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n # iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(time_range):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None and i > 0.25*len(time_range):\n print('using blended diffusiong')\n img_orig = self.model.q_sample(inputs['bg_latent'], ts) # TODO: deterministic forward pass?\n img = img_orig * (1 - mask) + mask * img\n \n outs = self.p_sample_ddim(img, cond, ts, index=index, mask=mask, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,**kwargs)\n # img, pred_mask, pred_x0 = outs\n img, pred_x0, attn_t = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n # intermediates['mask_inter'].append(pred_mask)\n intermediates['pred_x0'].append(pred_x0)\n if len(attn_t) > 0:\n intermediates['attn'].extend([a[0:1] for a in attn_t])\n\n # return torch.cat([img, pred_mask], dim=1), intermediates\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, mask=None, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,**kwargs):\n b, *_, device = *x.shape, x.device\n if 'test_model_kwargs' in kwargs:\n inputs = kwargs['test_model_kwargs']\n elif 'rest' in kwargs:\n inputs = kwargs['rest']\n else:\n raise Exception(\"kwargs must contain either 'test_model_kwargs' or 'rest' key\")\n # x_start = inputs['latent']\n bg_latent = inputs['bg_latent']\n m = inputs['bg_mask']\n bbox = inputs['bbox']\n x_noisy = x\n x_input = torch.cat([x_noisy, bg_latent, 1-m], dim=1)\n\n if unconditional_conditioning is None or unconditional_guidance_scale <= 1:\n e_t, attn_t = self.model.apply_model(x_input, bbox, t, c)\n else:\n x_in = torch.cat([x_input] * 2)\n bbox_in = torch.cat([bbox] * 2) \n t_in = torch.cat([t] * 2)\n c_in = [torch.cat([uc_item, c_item]) for uc_item,c_item in zip(unconditional_conditioning, c)]\n e_t_double, attn_t = self.model.apply_model(x_in, bbox_in, t_in, c_in)\n e_t_uncond, e_t = e_t_double.chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if x.shape[1]!=4:\n pred_x0 = (x[:,:4,:,:] - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(dir_xt.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0, attn_t\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n return x_dec" } ]
from genericpath import samefile from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.rank_zero import rank_zero_only from libcom.controllable_composition.source.ControlCom.ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from libcom.controllable_composition.source.ControlCom.ldm.modules.ema import LitEma from libcom.controllable_composition.source.ControlCom.ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from libcom.controllable_composition.source.ControlCom.ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL from libcom.controllable_composition.source.ControlCom.ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from libcom.controllable_composition.source.ControlCom.ldm.models.diffusion.ddim import DDIMSampler from torchvision.transforms import Resize, Normalize from torch.autograd import Variable from omegaconf import OmegaConf from ldm.util import instantiate_from_config from PIL import Image from torch.utils.data import DataLoader import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import torch.nn.functional as F import math import time import random import os, torchvision import shutil
9,468
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., u_cond_percent=0, ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization # print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size self.channels = channels self.u_cond_percent=u_cond_percent self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) # count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) # print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else:
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., u_cond_percent=0, ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization # print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size self.channels = channels self.u_cond_percent=u_cond_percent self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) # count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) # print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else:
betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
14
2023-10-19 05:08:12+00:00
12k
e4s2023/E4S2023
img_recon.py
[ { "identifier": "CelebAHQDataset", "path": "datasets/dataset.py", "snippet": "class CelebAHQDataset(Dataset):\n \"\"\"\n CelebA-HQ数据集,具体数据来自于 https://github.com/ZPdesu/SEAN\n \"\"\"\n def __init__(self, dataset_root, mode=\"test\",\n img_transform=TO_TENSOR, label_transform=TO_TENSOR,\n load_vis_img=False, fraction=1.0,\n flip_p=-1, # negative means not flipping\n specific_ids: Union[list, tuple] = None,\n paired: bool = False,\n shuffle: bool = False,\n ):\n assert mode in (\"train\", \"test\", \"all\"), \"CelebAHQDataset mode type unsupported!\"\n self.mode = mode\n if mode in (\"all\",):\n self.roots = [osp.join(dataset_root, \"train\"), osp.join(dataset_root, \"test\")]\n else:\n self.roots = [osp.join(dataset_root, self.mode)]\n self.img_transform = img_transform\n self.label_transform = label_transform\n self.load_vis_img = load_vis_img\n self.fraction = fraction\n self.flip_p = flip_p\n self.paired = paired\n\n self.imgs = []\n self.labels = []\n self.labels_vis = []\n for root in self.roots:\n imgs = sorted(make_dataset(osp.join(root, \"images\")))\n imgs = imgs[:int(len(imgs)*self.fraction)]\n\n labels = sorted(make_dataset(osp.join(root, \"labels\")))\n labels = labels[:int(len(labels)*self.fraction)]\n\n labels_vis = sorted(make_dataset(osp.join(root, \"vis\"))) if self.load_vis_img else None\n labels_vis = labels_vis[:int(len(labels_vis)*self.fraction)] if self.load_vis_img else []\n\n self.imgs.extend(imgs)\n self.labels.extend(labels)\n self.labels_vis.extend(labels_vis)\n\n self.imgs, self.labels, self.labels_vis = self._filter_specific_ids(specific_ids)\n\n if self.load_vis_img:\n assert len(self.imgs) == len(self.labels) == len(self.labels_vis)\n else:\n assert len(self.imgs) == len(self.labels)\n\n print(f\"[CelebAHQDataset] files loaded. mode={self.mode}, #imgs={len(self.imgs)}, \"\n f\"#labels={len(self.labels)}, #vis={len(self.labels_vis)}\")\n\n # # 优化 600 个iteration 的style code保存路径\n # self.optim_codes_dir = \"/apdcephfs/share_1290939/zhianliu/py_projects/pytorch-DDP-demo/work_dirs/v0_8_stage2_entypeSEAN/optim_Results\"\n \n # image pairs indices\n self.indices = np.arange(len(self.imgs))\n\n # TODO: shuffle the indices\n if shuffle:\n np.random.shuffle(self.indices)\n\n self.pair_indices = self.indices.reshape(-1, 2)\n\n def __len__(self):\n if not self.paired:\n return len(self.indices)\n else:\n return len(self.pair_indices)\n\n def _filter_specific_ids(self, specific_ids: tuple):\n \"\"\" filter the images according to the specific_ids\n \"\"\"\n if specific_ids is None:\n return self.imgs, self.labels, self.labels_vis\n elif self.fraction < 1.0:\n raise ValueError(\"[CelebAHQDataset] specific_ids and fraction cannot be set simultaneously!\")\n\n # parse the tuple into two lists, e.g. ((\"train\",\"12\"), (\"test\",\"45\")) -> (\"train\",\"train\") and (\"12\",\"45\")\n spec_modes, spec_ids = [], []\n id_order_dict = {}\n for idx, spec_id in enumerate(specific_ids):\n one_mode, one_id = spec_id[0], spec_id[1]\n spec_modes.append(one_mode)\n spec_ids.append(one_id)\n id_order_dict[one_id] = {\n \"mode\": one_mode, \"order\": idx,\n }\n\n # filter and re-order\n ret_imgs = [\"\"] * len(specific_ids)\n ret_labels = [\"\"] * len(specific_ids)\n ret_labels_vis = [\"\"] * len(specific_ids)\n found_cnt = 0\n for k in range(len(spec_ids)): # target specific ids\n one_spec_mode = spec_modes[k]\n one_spec_id = spec_ids[k]\n for idx in range(len(self.imgs)): # full dataset\n one_img = self.imgs[idx]\n one_label = self.labels[idx]\n one_label_vis = self.labels_vis[idx] if self.load_vis_img else None\n if one_spec_mode in one_img and one_spec_id == osp.basename(one_img): # found one\n found_cnt += 1\n one_spec_order = id_order_dict[one_spec_id][\"order\"]\n ret_imgs[one_spec_order] = one_img\n ret_labels[one_spec_order] = one_label\n ret_labels_vis[one_spec_order] = one_label_vis\n break\n\n if found_cnt < len(specific_ids):\n print(f\"[[Warning]][CelebAHQDataset] not enough images found (={found_cnt}) for \"\n f\"specific ids (={len(specific_ids)})!\")\n\n ret_imgs = list(filter(None, ret_imgs))\n ret_labels = list(filter(None, ret_labels))\n ret_labels_vis = list(filter(None, ret_labels_vis))\n return ret_imgs, ret_labels, ret_labels_vis\n\n def load_single_image(self, index):\n \"\"\"把一张图片的 原图, seg mask, 以及mask对应可视化的图都加载进来\n Args:\n index (int): 图片的索引\n Return:\n img: RGB图\n label: seg mask\n label_vis: seg mask的可视化图\n \"\"\"\n img_path = self.imgs[index]\n img = Image.open(img_path).convert('RGB')\n if self.img_transform is not None:\n img = self.img_transform(img)\n\n label = self.labels[index]\n # label = osp.join(\"/apdcephfs/share_1290939/zhianliu/py_projects/our_editing/ui_results\",\"%s_mask.png\"%osp.basename(label)[:-4])\n label = Image.open(label).convert('L')\n if self.label_transform is not None:\n label = self.label_transform(label)\n\n if self.load_vis_img:\n label_vis = self.labels_vis[index]\n label_vis = Image.open(label_vis).convert('RGB')\n label_vis = TO_TENSOR(label_vis)\n else:\n label_vis = -1 # unified interface\n return img, label, label_vis, img_path\n\n def _output_item(self, idx):\n if not self.paired:\n index = self.indices[idx]\n img, label, label_vis, img_path = self.load_single_image(index)\n if self.flip_p > 0:\n if random.random() < self.flip_p:\n img = TF.hflip(img)\n label = TF.hflip(label)\n return img, label, label_vis, img_path\n else:\n index1 = self.indices[idx * 2]\n index2 = self.indices[idx * 2 + 1]\n img1, label1, label_vis1, img_path1 = self.load_single_image(index1)\n img2, label2, label_vis2, img_path2 = self.load_single_image(index2)\n if self.flip_p > 0:\n if random.random() < self.flip_p:\n img1 = TF.hflip(img1)\n label1 = TF.hflip(label1)\n if random.random() < self.flip_p:\n img2 = TF.hflip(img2)\n label2 = TF.hflip(label2)\n return {\n \"bag1\": (img1, label1, label_vis1, img_path1),\n \"bag2\": (img2, label2, label_vis2, img_path2)\n }\n\n def __getitem__(self, idx):\n return self._output_item(idx)\n \n # # 1阶段重建的图片\n # img_name = osp.basename(self.imgs[index])[:-4]\n # recon_img = Image.open(osp.join(self.optim_codes_dir,img_name,\"%s_recon.png\"%img_name)).convert('RGB')\n # if self.img_transform is not None:\n # recon_img = self.img_transform(recon_img)\n \n # # 优化后的code\n # optim_code_path = osp.join(self.optim_codes_dir,img_name,\"%s_0600.npy\"%img_name)\n # assert osp.exists(optim_code_path), \"%s 文件不存在!\"%optim_code_path\n # optimed_style_code = np.load(optim_code_path)[0]\n \n # return img, recon_img, optimed_style_code, label, label_vis\n \n # pair_indices = self.pair_indices[idx, :]\n\n # img1, label1, label_vis1 = self.load_single_image(pair_indices[0])\n # img2, label2, label_vis2 = self.load_single_image(pair_indices[1])\n\n # return (img1, img2), (label1, label2), (label_vis1, label_vis2)" }, { "identifier": "get_transforms", "path": "datasets/dataset.py", "snippet": "def get_transforms(normalize=True, toTensor=True):\n transform_list = []\n if toTensor:\n transform_list += [transforms.ToTensor()]\n\n if normalize:\n transform_list += [transforms.Normalize((0.5, 0.5, 0.5),\n (0.5, 0.5, 0.5))]\n return transforms.Compose(transform_list)" }, { "identifier": "TO_TENSOR", "path": "datasets/dataset.py", "snippet": "TO_TENSOR = transforms.ToTensor()" }, { "identifier": "NORMALIZE", "path": "datasets/dataset.py", "snippet": "NORMALIZE = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))" }, { "identifier": "MASK_CONVERT_TF", "path": "datasets/dataset.py", "snippet": "MASK_CONVERT_TF = transforms.Lambda(\n lambda celebAHQ_mask: __celebAHQ_masks_to_faceParser_mask(celebAHQ_mask))" }, { "identifier": "MASK_CONVERT_TF_DETAILED", "path": "datasets/dataset.py", "snippet": "MASK_CONVERT_TF_DETAILED = transforms.Lambda(\n lambda celebAHQ_mask: __celebAHQ_masks_to_faceParser_mask_detailed(celebAHQ_mask))" }, { "identifier": "Net3", "path": "models/networks.py", "snippet": "class Net3(nn.Module):\n \"\"\" FSEncoder + styleGAN2 \"\"\"\n\n def __init__(self,opts,):\n super(Net3, self).__init__()\n self.opts=opts\n assert self.opts.fsencoder_type in [\"psp\",\"sean\"]\n if self.opts.fsencoder_type==\"psp\":\n self.encoder = FSEncoder_PSP(mode='ir_se', opts=self.opts)\n dim_s_code = 256 + 512 + 512\n else:\n self.encoder = FSEncoder_SEAN(input_nc=3, output_nc=512,in_size = 256)\n dim_s_code = 512\n \n self.split_layer_idx = 5\n self.remaining_layer_idx = self.opts.remaining_layer_idx\n \n # 区分component 的 W+ space 的 MLPs\n self.MLPs = nn.ModuleList()\n for i in range(self.opts.num_seg_cls):\n self.MLPs.append(\n LocalMLP(\n dim_component=dim_s_code,\n dim_style=512,\n num_w_layers= self.remaining_layer_idx if self.remaining_layer_idx != 17 else 18\n )\n )\n \n self.G = Generator(size=self.opts.out_size, style_dim=512, n_mlp=8, split_layer_idx = self.split_layer_idx, remaining_layer_idx = self.remaining_layer_idx)\n\n # styleGAN的参数是否更新\n if not self.opts.train_G:\n for param in self.G.parameters():\n param.requires_grad = False\n # 注意,styleGAN的8层FC是永远不更新的\n else:\n for param in self.G.style.parameters():\n param.requires_grad = False\n \n # styleGAN的倒数几层不更新 (包括convs 和 ToRGBs)\n if self.remaining_layer_idx != 17:\n for param in self.G.convs[-(17-self.remaining_layer_idx):].parameters():\n param.requires_grad = False\n for param in self.G.to_rgbs[-(17-self.remaining_layer_idx)//2 - 1:].parameters():\n param.requires_grad = False\n \n \n def forward(self, img,mask, resize=False, randomize_noise=True,return_latents=False):\n \"\"\"输入一张RGB图和对应的mask,\n (1) encoder 得到对应的F/S空间的特征,\n (2) 再送到styleGAN得到一张输出的图片\n\n Args:\n img (Tensor): 一对RGB图, each with shape [bs,3,1024,1024]\n mask ([type]): 一对RGB图对应的mask图, each with shape [bs,#seg_cls,1024,1024]\n resize (bool, optional): G生成的图片是否 resize. Defaults to True.\n randomize_noise (bool, optional): 是否加入随机噪声. Defaults to True.\n return_latents (bool, optional): 是否返回style codes. Defaults to False.\n\n Returns:\n [type]: [description]\n \"\"\"\n if self.opts.fsencoder_type==\"psp\":\n codes_vector, structure_feats = self.encoder(F.interpolate(img,(256,256),mode='bilinear'), mask) # [bs,#seg_cls, D], [bs,C,32,32]\n else:\n codes_vector, structure_feats = self.encoder(F.interpolate(img,(256,256),mode='bilinear'), mask) # [bs,#seg_cls, D], [bs,C,32,32]\n codes=[]\n bs, num_comp = codes_vector.size(0), codes_vector.size(1)\n for i in range(num_comp):\n codes.append(self.MLPs[i](codes_vector[:,i,:])) \n codes=torch.stack(codes,dim=1) # [bs, #seg_cls, 13, 512]\n \n \n # # 剩下的几层不用分component\n # remaining_codes=[]\n # for i in range(len(self.remain_MLPs)):\n # remaining_codes.append(self.remain_MLPs[i](codes_vector.view(bs, -1)))\n # remaining_codes = torch.stack(remaining_codes,dim=1) # [bs,5,512]\n\n # normalize with respect to the center of an average face\n if self.opts.start_from_latent_avg:\n if self.opts.learn_in_w:\n # 为了保持接口统一,将后3层的 style code 也扩展出一个 #seg_cls 维度\n codes = codes + self.latent_avg[:self.remaining_layer_idx, :].repeat(codes.shape[0],codes.shape[1],1)\n remaining_codes = self.latent_avg[self.remaining_layer_idx:, :].repeat(bs, num_comp, 1) \n codes = torch.cat([codes, remaining_codes],dim=2)\n else:\n if self.remaining_layer_idx != 17:\n codes = codes + self.latent_avg[:self.remaining_layer_idx, :].repeat(codes.shape[0],codes.shape[1],1, 1)\n remaining_codes = self.latent_avg[self.remaining_layer_idx:, :].repeat(bs, num_comp, 1, 1) \n codes = torch.cat([codes, remaining_codes],dim=2)\n else:\n codes = codes + self.latent_avg.repeat(codes.shape[0],codes.shape[1],1, 1)\n \n # 1. 完全使用 style code i.e., G(w)\n images1, result_latent, structure_feats_GT = self.G([codes], structure_feats, mask, input_is_latent=True,\n randomize_noise=randomize_noise,return_latents=return_latents,\n use_structure_code=False)\n \n \n # # 2. 使用 style code 和 strcture code i.e., G(w,F)\n # images2, _ , _ = self.G([codes], structure_feats, mask, input_is_latent=True,\n # randomize_noise=randomize_noise,return_latents=return_latents,\n # use_structure_code=True)\n \n if return_latents:\n return images1, structure_feats_GT, result_latent\n else:\n return images1, structure_feats_GT\n\n def get_style(self, img, mask):\n \"\"\"输入一张RGB图和对应的mask, 得到各个component 对应的style codes\n \n Args:\n img (Tensor): RGB图, each with shape [bs,3,1024,1024]\n mask (Tensor): RGB图对应的mask图, each with shape [bs,#seg_cls,1024,1024]\n \n Returns:\n structure_feats(Tensor): 图片的structure code, with shape [bs,512,32,32], 注意,这里其实是相对于StyleGAN第层输出的残差\n all_codes(Tensor): 各个component 对应的style codes, with shape [bs,#comp,18,512]。\n !!! 注意,前7层的各个compnent其实没有意义,只是为了统一接口让shape保持一致,用的时候只用第1个即可 !!!\n \"\"\"\n if self.opts.fsencoder_type==\"psp\":\n codes_vector, structure_feats = self.encoder(F.interpolate(img,(256,256),mode='bilinear'), mask) # [bs,#seg_cls, D], [bs,C,32,32]\n else:\n codes_vector, structure_feats = self.encoder(F.interpolate(img,(256,256),mode='bilinear'), mask) # [bs,#seg_cls, D], [bs,C,32,32]\n codes=[]\n bs, num_comp = codes_vector.size(0), codes_vector.size(1)\n for i in range(num_comp):\n codes.append(self.MLPs[i](codes_vector[:,i,:])) \n codes=torch.stack(codes,dim=1) # [bs, #seg_cls, 11,512]\n\n # # 剩下的几层不用分component\n # remaining_codes=[]\n # for i in range(len(self.remain_MLPs)):\n # remaining_codes.append(self.remain_MLPs[i](codes_vector.view(bs, -1)))\n # remaining_codes = torch.stack(remaining_codes,dim=1) # [bs,5,512]\n\n # normalize with respect to the center of an average face\n if self.opts.start_from_latent_avg:\n if self.opts.learn_in_w:\n # 为了保持接口统一,将后3层的 style code 也扩展出一个 #seg_cls 维度\n codes = codes + self.latent_avg[:self.remaining_layer_idx, :].repeat(codes.shape[0],codes.shape[1],1)\n remaining_codes = self.latent_avg[self.remaining_layer_idx:, :].repeat(bs, num_comp, 1) \n style_codes = torch.cat([codes, remaining_codes],dim=2)\n else:\n if self.remaining_layer_idx != 17:\n codes = codes + self.latent_avg[:self.remaining_layer_idx, :].repeat(codes.shape[0],codes.shape[1],1, 1)\n remaining_codes = self.latent_avg[self.remaining_layer_idx:, :].repeat(bs, num_comp, 1, 1) \n style_codes = torch.cat([codes, remaining_codes],dim=2)\n else:\n style_codes = codes + self.latent_avg.repeat(codes.shape[0],codes.shape[1],1, 1)\n \n return structure_feats, style_codes\n\n def get_style_vectors(self, img, mask):\n \"\"\"输入一张RGB图和对应的mask, 得到各个component 对应的style vectors\n \n Args:\n img (Tensor): RGB图, each with shape [bs,3,1024,1024]\n mask (Tensor): RGB图对应的mask图, each with shape [bs,#seg_cls,1024,1024]\n \n Returns:\n style_vectors(Tensor): with shape [bs,#seg_cls,512]\n \"\"\"\n if self.opts.fsencoder_type==\"psp\":\n style_vectors, structure_feats = self.encoder(F.interpolate(img,(256,256),mode='bilinear'), mask) # [bs,#seg_cls, D], [bs,C,32,32]\n else:\n style_vectors, structure_feats = self.encoder(F.interpolate(img,(256,256),mode='bilinear'), mask) # [bs,#seg_cls, D], [bs,C,32,32]\n \n return style_vectors, structure_feats\n \n def cal_style_codes(self,style_vectors):\n \"\"\"根据每个compnent的 style vector转到styleGAN的style code\"\"\"\n \n codes=[]\n bs, num_comp = style_vectors.size(0), style_vectors.size(1)\n for i in range(num_comp):\n codes.append(self.MLPs[i](style_vectors[:,i,:])) \n codes=torch.stack(codes,dim=1) # [bs, #seg_cls, 11,512]\n\n # # 剩下的几层不用分component\n # remaining_codes=[]\n # for i in range(len(self.remain_MLPs)):\n # remaining_codes.append(self.remain_MLPs[i](style_vectors.view(bs, -1)))\n # remaining_codes = torch.stack(remaining_codes,dim=1) # [bs,5,512]\n\n # normalize with respect to the center of an average face\n if self.opts.start_from_latent_avg:\n if self.opts.learn_in_w:\n # 为了保持接口统一,将后3层的 style code 也扩展出一个 #seg_cls 维度\n codes = codes + self.latent_avg[:self.remaining_layer_idx, :].repeat(codes.shape[0],codes.shape[1],1)\n remaining_codes = self.latent_avg[self.remaining_layer_idx:, :].repeat(bs, num_comp, 1) \n style_codes = torch.cat([codes, remaining_codes],dim=2)\n else:\n if self.remaining_layer_idx != 17:\n codes = codes + self.latent_avg[:self.remaining_layer_idx, :].repeat(codes.shape[0],codes.shape[1],1, 1)\n remaining_codes = self.latent_avg[self.remaining_layer_idx:, :].repeat(bs, num_comp, 1, 1) \n style_codes = torch.cat([codes, remaining_codes],dim=2)\n else:\n style_codes = codes + self.latent_avg.repeat(codes.shape[0],codes.shape[1],1, 1)\n \n return style_codes\n\n def gen_img(self, struc_codes, style_codes, mask, randomize_noise=True, noise=None, return_latents=False):\n \"\"\"输入一张mask 和 对应各components的style codes,以及这张图片的structure code, 生成一张图片\n \n Args:\n style_codes (Tensor): 各个component 对应的style codes, with shape [bs,#comp,18,512]\n struc_codes (Tensor)\n mask (Tensor): mask图, with shape [bs,#seg_cls,1024,1024]\n \n randomize_noise (bool, optional): 是否加入随机噪声. Defaults to True.\n return_latents (bool, optional): 是否返回style codes. Defaults to False.\n\n Returns:\n [type]: [description]\n \"\"\"\n \n images, result_latent, structure_feats = self.G([style_codes], struc_codes, mask, input_is_latent=True,\n randomize_noise=randomize_noise,noise=noise,return_latents=return_latents,\n use_structure_code=False)\n\n if return_latents:\n return images, result_latent, structure_feats\n else:\n return images,-1, structure_feats" }, { "identifier": "TestOptions", "path": "options/test_options.py", "snippet": "class TestOptions:\n\n\tdef __init__(self):\n\t\tself.parser = ArgumentParser()\n\t\tself.initialize()\n\n\tdef initialize(self):\n\t\tself.parser.add_argument('--exp_dir', type=str, default=\"/apdcephfs/share_1290939/zhianliu/running_results/our_editing/work_dirs/dummy\",help='Path to experiment output directory')\n\t\tself.parser.add_argument('--num_seg_cls', type=int, default=12,help='Segmentation mask class number')\n\t\tself.parser.add_argument('--remaining_layer_idx', type=int, default=13, help='剩余的几层不用mask')\n # ================= 模型设置 相关 =====================\n\t\tself.parser.add_argument('--out_size', type=int, default=1024,help='output image size') \n\t\tself.parser.add_argument('--n_styles', default=11, type=int, help='StyleGAN层数')\n\t\tself.parser.add_argument('--fsencoder_type', type=str, default=\"psp\", help='FS Encode网络类型')\n\t\tself.parser.add_argument('--extra_encoder_input', type=str, default=\"diff_map\", help='额外的style code补偿Encode网络输入类型') \n # ================= 数据集 相关 =====================\n\t\tself.parser.add_argument('--dataset_root', default='/apdcephfs/share_1290939/zhianliu/datasets/CelebA-HQ', type=str, help='dataset root path')\n\t\tself.parser.add_argument('--ds_frac', default=1.0, type=float, help='dataset fraction')\n\t\tself.parser.add_argument('--test_batch_size', default=1, type=int, help='Batch size for testing and inference')\n\t\tself.parser.add_argument('--test_workers', default=4, type=int, help='Number of test/inference dataloader workers')\n\t\tself.parser.add_argument('--train_G', default=False, type=bool, help='Whether to train the styleGAN model')\n \n\t\tself.parser.add_argument('--output_size', default=1024, type=int, help='Output size of generator')\n\t\tself.parser.add_argument('--checkpoint_path', default=\"/apdcephfs/share_1290939/zhianliu/running_results/our_editing/work_dirs/exp_recon_id0.1_StyleGANWithMask_withoutNorm/checkpoints/iteration_30000.pt\", type=str, help='Path to model checkpoint')\n\t\tself.parser.add_argument('--save_dir', default=\"/apdcephfs/share_1290939/zhianliu/running_results/our_editing/work_dirs/exp_recon_StyleGANWithMask_wNorm/testResults\", type=str, help='Path to save dir') \n\t\tself.parser.add_argument('--device', default='cuda:0', type=str, help='Which GPU(s) to use')\n\n\t\tself.parser.add_argument('--start_from_latent_avg', action='store_true',default=True, help='Whether to add average latent vector to generate codes from encoder.')\n\t\tself.parser.add_argument('--learn_in_w', action='store_true', help='Whether to learn in w space instead of w+')\n \n\tdef parse(self):\n\t\topts = self.parser.parse_args()\n\t\treturn opts" }, { "identifier": "torch_utils", "path": "utils/torch_utils.py", "snippet": "def saveTensorToFile(tensor, save_path):\ndef interpolate(img, size):\ndef readImgAsTensor(img_path, gray=False, to_tensor=True, size=1024):\ndef featMap2im(var):\ndef tensor2im(var, is_zero_center: bool = True, ):\ndef im2tensor(var, add_c_dim: bool = False, norm: bool = True, std: bool = False):\ndef tensor2map(var,shown_mask_indices=None):\ndef vis_mask_in_color(mask):\ndef get_colors():\ndef vis_faces(log_hooks1):\ndef vis_faces_no_id(hooks_dict1, fig, gs, i):\ndef aggregate_loss_dict(agg_loss_dict):\ndef labelMap2OneHot(label, num_cls):\ndef remove_module_prefix(state_dict,prefix):\ndef requires_grad(model, flag=True):\ndef accumulate(model1, model2, decay=0.999):\n C, H, W = tensor.size()" }, { "identifier": "init_faceParsing_pretrained_model", "path": "swap_face_fine/face_parsing/face_parsing_demo.py", "snippet": "def init_faceParsing_pretrained_model(ckpt_path):\n parser = FaceParser(seg_ckpt=ckpt_path)\n\n print(\"Load faceParsing pre-traiend model success!\")\n\n return parser" }, { "identifier": "faceParsing_demo", "path": "swap_face_fine/face_parsing/face_parsing_demo.py", "snippet": "def faceParsing_demo(model, img: Image, convert_to_seg12=True):\n \"\"\"\n 提取 img 的face segmentation map\n \n args:\n model (Object): 加载好的预训练模型\n img (PIL.Image): [0, 255]范围的 PIL.Image 格式图片\n \"\"\"\n with torch.no_grad():\n seg = model(img).cpu().numpy().astype(np.uint8)\n \n if convert_to_seg12:\n seg = __ffhq_masks_to_faceParser_mask_detailed(seg)\n return seg" }, { "identifier": "vis_parsing_maps", "path": "swap_face_fine/face_parsing/face_parsing_demo.py", "snippet": "def vis_parsing_maps(image, parsing_anno, stride=1):\n \"\"\" 将原图 和 seg map 放到一起可视化\n \n args:\n img (PIL.Image): [0, 255]范围的 PIL.Image 格式图片\n parsing_anno (np.array): parsing之后的seg map, size为 [512, 512]\n return:\n vis_im (np.array): 可视化图片, 用cv2保存\n \"\"\"\n # Colors for all 20 parts\n part_colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0],\n [255, 0, 85], [255, 0, 170],\n [0, 255, 0], [85, 255, 0], [170, 255, 0],\n [0, 255, 85], [0, 255, 170],\n [0, 0, 255], [85, 0, 255], [170, 0, 255],\n [0, 85, 255], [0, 170, 255],\n [255, 255, 0], [255, 255, 85], [255, 255, 170],\n [255, 0, 255], [255, 85, 255], [255, 170, 255],\n [0, 255, 255], [85, 255, 255], [170, 255, 255]]\n\n im = image.resize((parsing_anno.shape[0], parsing_anno.shape[1]), Image.BILINEAR)\n im = np.array(im)\n vis_im = im.copy().astype(np.uint8)\n vis_parsing_anno = parsing_anno.copy().astype(np.uint8)\n vis_parsing_anno = cv2.resize(vis_parsing_anno, None, fx=stride, fy=stride, interpolation=cv2.INTER_NEAREST)\n vis_parsing_anno_color = np.zeros((vis_parsing_anno.shape[0], vis_parsing_anno.shape[1], 3)) + 255\n\n num_of_class = np.max(vis_parsing_anno)\n\n for pi in range(1, num_of_class + 1):\n index = np.where(vis_parsing_anno == pi)\n vis_parsing_anno_color[index[0], index[1], :] = part_colors[pi]\n\n vis_parsing_anno_color = vis_parsing_anno_color.astype(np.uint8)\n # print(vis_parsing_anno_color.shape, vis_im.shape)\n vis_im = cv2.addWeighted(cv2.cvtColor(vis_im, cv2.COLOR_RGB2BGR), 0.4, vis_parsing_anno_color, 0.6, 0)\n\n return vis_im" } ]
from torch.utils.data import DataLoader from datasets.dataset import CelebAHQDataset, get_transforms, TO_TENSOR, NORMALIZE, MASK_CONVERT_TF, MASK_CONVERT_TF_DETAILED from models.networks import Net3 from options.test_options import TestOptions from utils import torch_utils from tqdm import tqdm from PIL import Image from options.swap_face_options import SwapFaceOptions from swap_face_fine.face_parsing.face_parsing_demo import init_faceParsing_pretrained_model, faceParsing_demo, vis_parsing_maps import torchvision.transforms as transforms import glob import os import json import sys import pprint import torch import numpy as np
7,957
""" This file runs the main training/val loop """ sys.path.append(".") sys.path.append("..") # 重建一张/几张图片 @torch.no_grad() def recon_imgs(opts, imgs_path, out_dir="./tmp"): net = Net3(opts).eval().to(opts.device) ckpt_dict=torch.load("/apdcephfs/share_1290939/zhianliu/running_results/our_editing/work_dirs/ablation_study/v_15_baseline_seg12_finetuneGD_8A100_remainLyrIdx13_flip_FFHQ_300KIters/checkpoints/iteration_300000.pt") net.latent_avg = ckpt_dict['latent_avg'].to(opts.device) net.load_state_dict(torch_utils.remove_module_prefix(ckpt_dict["state_dict"],prefix="module.")) print("Load pre-trained weights.") # face parsing 模型 faceParsing_ckpt = "./pretrained/faceseg/79999_iter.pth" faceParsing_model = init_faceParsing_pretrained_model(faceParsing_ckpt) for idx, img_path in enumerate(tqdm(imgs_path)): img_pil = Image.open(img_path).convert("RGB") sample_name = os.path.basename(img_path)[:-4] mask = faceParsing_demo(faceParsing_model, img_pil, convert_to_seg12=True) # wrap data
""" This file runs the main training/val loop """ sys.path.append(".") sys.path.append("..") # 重建一张/几张图片 @torch.no_grad() def recon_imgs(opts, imgs_path, out_dir="./tmp"): net = Net3(opts).eval().to(opts.device) ckpt_dict=torch.load("/apdcephfs/share_1290939/zhianliu/running_results/our_editing/work_dirs/ablation_study/v_15_baseline_seg12_finetuneGD_8A100_remainLyrIdx13_flip_FFHQ_300KIters/checkpoints/iteration_300000.pt") net.latent_avg = ckpt_dict['latent_avg'].to(opts.device) net.load_state_dict(torch_utils.remove_module_prefix(ckpt_dict["state_dict"],prefix="module.")) print("Load pre-trained weights.") # face parsing 模型 faceParsing_ckpt = "./pretrained/faceseg/79999_iter.pth" faceParsing_model = init_faceParsing_pretrained_model(faceParsing_ckpt) for idx, img_path in enumerate(tqdm(imgs_path)): img_pil = Image.open(img_path).convert("RGB") sample_name = os.path.basename(img_path)[:-4] mask = faceParsing_demo(faceParsing_model, img_pil, convert_to_seg12=True) # wrap data
img = transforms.Compose([TO_TENSOR, NORMALIZE])(img_pil)
3
2023-10-15 12:15:01+00:00
12k
sotopia-lab/sotopia
sotopia-chat/chat_server.py
[ { "identifier": "redis_agent", "path": "sotopia/agents/redis_agent.py", "snippet": "class RedisAgent(BaseAgent[Observation, AgentAction]):\n def __init__(\n self,\n agent_name: str | None = None,\n uuid_str: str | None = None,\n session_id: str | None = None,\n agent_profile: AgentProfile | None = None,\n ) -> None:\n def act(\n self,\n obs: Observation,\n ) -> AgentAction:\n async def aact(\n self,\n obs: Observation,\n ) -> AgentAction:\n def reset(\n self,\n reset_reason: str = \"\",\n ) -> None:" }, { "identifier": "LLMAgent", "path": "sotopia/agents/llm_agent.py", "snippet": "class LLMAgent(BaseAgent[Observation, AgentAction]):\n def __init__(\n self,\n agent_name: str | None = None,\n uuid_str: str | None = None,\n agent_profile: AgentProfile | None = None,\n model_name: LLM_Name = \"gpt-3.5-turbo\",\n script_like: bool = False,\n ) -> None:\n super().__init__(\n agent_name=agent_name,\n uuid_str=uuid_str,\n agent_profile=agent_profile,\n )\n self.model_name = model_name\n self.script_like = script_like\n\n @property\n def goal(self) -> str:\n if self._goal is not None:\n return self._goal\n assert (\n len(self.inbox) > 0\n ), \"attribute goal has to be called after at least one step\"\n goal = generate_goal(\n self.model_name,\n background=self.inbox[0][\n 1\n ].to_natural_language(), # Only consider the first message for now\n )\n return goal\n\n @goal.setter\n def goal(self, goal: str) -> None:\n self._goal = goal\n\n def act(\n self,\n obs: Observation,\n gen_func: Callable[..., AgentAction] = generate_action,\n ) -> AgentAction:\n self.recv_message(\"Environment\", obs)\n\n if len(obs.available_actions) == 1 and \"none\" in obs.available_actions:\n return AgentAction(action_type=\"none\", argument=\"\")\n else:\n action = gen_func(\n self.model_name,\n history=\"\\n\".join(\n f\"{y.to_natural_language()}\" for x, y in self.inbox\n ),\n turn_number=obs.turn_number,\n action_types=obs.available_actions,\n agent=self.agent_name,\n goal=self.goal,\n )\n return action\n\n async def aact(self, obs: Observation) -> AgentAction:\n self.recv_message(\"Environment\", obs)\n\n if len(obs.available_actions) == 1 and \"none\" in obs.available_actions:\n return AgentAction(action_type=\"none\", argument=\"\")\n else:\n action, prompt = await agenerate_action(\n self.model_name,\n history=\"\\n\".join(\n f\"{y.to_natural_language()}\" for x, y in self.inbox\n ),\n turn_number=obs.turn_number,\n action_types=obs.available_actions,\n agent=self.agent_name,\n goal=self.goal,\n script_like=self.script_like,\n )\n return action" }, { "identifier": "EnvAgentComboStorage", "path": "sotopia/database/env_agent_combo_storage.py", "snippet": "class EnvAgentComboStorage(JsonModel):\n env_id: str = Field(default_factory=lambda: \"\", index=True)\n agent_ids: list[str] = Field(default_factory=lambda: [], index=True)" }, { "identifier": "AgentProfile", "path": "sotopia/database/persistent_profile.py", "snippet": "class AgentProfile(JsonModel):\n first_name: str = Field(index=True)\n last_name: str = Field(index=True)\n age: int = Field(index=True, default_factory=lambda: 0)\n occupation: str = Field(index=True, default_factory=lambda: \"\")\n gender: str = Field(index=True, default_factory=lambda: \"\")\n gender_pronoun: str = Field(index=True, default_factory=lambda: \"\")\n public_info: str = Field(index=True, default_factory=lambda: \"\")\n big_five: str = Field(index=True, default_factory=lambda: \"\")\n moral_values: list[str] = Field(index=False, default_factory=lambda: [])\n schwartz_personal_values: list[str] = Field(\n index=False, default_factory=lambda: []\n )\n personality_and_values: str = Field(index=True, default_factory=lambda: \"\")\n decision_making_style: str = Field(index=True, default_factory=lambda: \"\")\n secret: str = Field(default_factory=lambda: \"\")\n model_id: str = Field(default_factory=lambda: \"\")" }, { "identifier": "EnvironmentList", "path": "sotopia/database/persistent_profile.py", "snippet": "class EnvironmentList(JsonModel):\n name: str = Field(index=True)\n environments: list[str] = Field(default_factory=lambda: [])\n agent_index: list[str] | None = Field(default_factory=lambda: None)\n\n # validate the length of agent_index should be same as environments\n @root_validator\n def the_length_agent_index_matches_environments(cls, values: Any) -> Any:\n environments, agent_index = (\n values.get(\"environments\"),\n values.get(\"agent_index\"),\n )\n if agent_index is None:\n return values\n assert len(environments) == len(\n agent_index\n ), f\"Number of environments {len(environments)} and agent_index {len(agent_index)} do not match\"\n return values" }, { "identifier": "EnvironmentProfile", "path": "sotopia/database/persistent_profile.py", "snippet": "class EnvironmentProfile(JsonModel):\n codename: str = Field(\n index=True,\n default_factory=lambda: \"\",\n description=\"The codename of the environment\",\n )\n source: str = Field(\n index=True,\n default_factory=lambda: \"\",\n description=\"The source of the environment\",\n )\n scenario: str = Field(\n index=True,\n default_factory=lambda: \"\",\n description=\"A concrete scenario of where the social interaction takes place, the scenario should have two agents (agent1 and agent2), and you should illustrate the relationship between the two agents, and for what purpose agent1 is interacting with agent2. Please avoid mentioning specific names and occupations in the scenario and keep all the mentions gender-neutral. Also avoid generating scenarios that requires childrend (below 18) or elderly (above 70) to be involved.\",\n )\n agent_goals: list[str] = Field(\n default_factory=lambda: [],\n description=\"The social goals of each agent, which could include <extra_info>...</extra_info>, <clarification_hint>...</clarification_hint>, and <strategy_hint>...</strategy_hint> to help the agent achieve the goal. Avoid providing too specific strategy hint, try to be as abstract as possible. For example, use 'you can provide financial benefits to achieve your goal' instead of 'you can buy him a boba tea to achieve your goal.'\",\n )\n relationship: RelationshipType = Field(\n index=True,\n default_factory=lambda: RelationshipType.stranger,\n description=\"The relationship between the two agents, choose from: stranger, know_by_name, acquaintance, friend, romantic_relationship, family_member. Do not make up a relationship, but choose from the list, 0 means stranger, 1 means know_by_name, 2 means acquaintance, 3 means friend, 4 means romantic_relationship, 5 means family_member\",\n )\n age_constraint: str | None = Field(\n default_factory=lambda: None,\n description=\"The age constraint of the environment, a list of tuples, each tuple is a range of age, e.g., '[(18, 25), (30, 40)]' means the environment is only available to agent one between 18 and 25, and agent two between 30 and 40\",\n )\n occupation_constraint: str | None = Field(\n default_factory=lambda: None,\n description=\"The occupation constraint of the environment, a list of lists, each list is a list of occupations, e.g., '[['student', 'teacher'], ['doctor', 'nurse']]' means the environment is only available to agent one if agent one is a student or a teacher, and agent two is a doctor or a nurse\",\n )\n agent_constraint: list[list[str]] | None = Field(\n default_factory=lambda: None,\n )" }, { "identifier": "ReachGoalLLMEvaluator", "path": "sotopia/envs/evaluators.py", "snippet": "class ReachGoalLLMEvaluator(Evaluator):\n @beartype\n def __init__(\n self, model_name: LLM_Name, response_format: str = \"basic\"\n ) -> None:\n self.model_name = model_name\n self.prompt = \"\"\n self.response_format = response_format\n\n def __call__(\n self, turn_number: int, messages: list[tuple[str, Message]]\n ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]:\n raise NotImplementedError(\n \"ReachGoalLLMEvaluator is not implemented for synchronous evaluation\"\n )\n\n @gin.configurable\n @beartype\n async def __acall__(\n self,\n turn_number: int,\n messages: list[tuple[str, Message]] | None,\n history: str = \"\",\n temperature: float = 0.7,\n ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]:\n # filter did nothing\n if not history and messages:\n messages_filtered = [\n (x, y)\n for x, y in messages\n if \"did nothing\" not in y.to_natural_language()\n ]\n history = \"\\n\".join(\n [\n f\"{x} {y.to_natural_language()}\"\n if x != \"Environment\"\n else y.to_natural_language()\n for x, y in messages_filtered\n ]\n )\n response_format_class = (\n EnvResponsePlus if self.response_format == \"plus\" else EnvResponse\n )\n\n try:\n response: EnvResponsePlus | EnvResponse # fix type error from langchain 0.0.264. we don't need this line for langchain 0.0.263\n response, prompt = await agenerate(\n model_name=self.model_name,\n template=\"\"\"{history},\n Based on previous interactions, evaluate how well participants achieve their goals.\n Please following the format:\n {format_instructions}\n \"\"\",\n input_values=dict(history=history),\n output_parser=PydanticOutputParser[\n EnvResponsePlus | EnvResponse\n ](pydantic_object=response_format_class),\n temperature=temperature,\n )\n self.prompt = prompt\n response_list = []\n # TODO: multiple agents\n for dimension in response.agent_1_evaluation.dict().keys():\n response_list.append(\n (\n \"agent_1\",\n (\n (\n dimension,\n response.agent_1_evaluation.dict()[dimension][\n 1\n ],\n ),\n response.agent_1_evaluation.dict()[dimension][0],\n ),\n )\n )\n response_list.append(\n (\n \"agent_2\",\n (\n (\n dimension,\n response.agent_2_evaluation.dict()[dimension][\n 1\n ],\n ),\n response.agent_2_evaluation.dict()[dimension][0],\n ),\n )\n )\n return response_list\n except Exception as e:\n log.debug(f\"[red] Failed to generate environment response. {e}\")\n return []" }, { "identifier": "RuleBasedTerminatedEvaluator", "path": "sotopia/envs/evaluators.py", "snippet": "class RuleBasedTerminatedEvaluator(Evaluator):\n def __init__(\n self, max_turn_number: int = 20, max_stale_turn: int = 2\n ) -> None:\n self.max_turn_number = max_turn_number\n self.max_stale_turn = max_stale_turn\n\n def __call__(\n self, turn_number: int, messages: list[tuple[str, Message]]\n ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]:\n # Rule 1: If the conversation is too long, terminate the conversation\n conversation_too_long = turn_number > self.max_turn_number\n # Rule 2: If one of the players leaves, terminate the conversation\n p1_leaving = (\n len(messages) > 1\n and isinstance(messages[-2][1], AgentAction)\n and messages[-2][1].action_type == \"leave\"\n )\n p2_leaving = (\n bool(len(messages))\n and isinstance(messages[-1][1], AgentAction)\n and messages[-1][1].action_type == \"leave\"\n )\n # Rule 3: If the conversation is stale for too long, terminate the conversation\n stale_count = 0\n for message in messages[::-1]:\n if message[0] == \"Environment\":\n continue\n assert isinstance(message[1], AgentAction)\n if message[1].action_type == \"none\":\n stale_count += 1\n else:\n break\n if stale_count > self.max_stale_turn:\n break\n stale_too_long = stale_count > self.max_stale_turn\n terminated = (\n conversation_too_long or p1_leaving or p2_leaving or stale_too_long\n )\n reasons_for_termination = (\n f\"{'The conversation is too long; ' if conversation_too_long else ''}\"\n f\"{'Agent 1 is leaving; ' if p1_leaving else ''}\"\n f\"{'Agent 2 is leaving; ' if p2_leaving else ''}\"\n f\"{'The conversation stales for too long; ' if stale_too_long else ''}\"\n )\n return [\n (\n \"environment\",\n ((\"terminated\", terminated), reasons_for_termination),\n )\n ]\n\n async def __acall__(\n self, turn_number: int, messages: list[tuple[str, Message]]\n ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]:\n return self(turn_number, messages)" }, { "identifier": "ParallelSotopiaEnv", "path": "sotopia/envs/parallel.py", "snippet": "class ParallelSotopiaEnv(\n ParallelEnv[str, Observation, AgentAction], MessengerMixin\n):\n def __init__(\n self,\n available_action_types: set[ActionType] = set(\n [\"none\", \"speak\", \"non-verbal communication\", \"action\", \"leave\"]\n ),\n action_order: Literal[\n \"simutaneous\", \"round-robin\", \"random\"\n ] = \"simutaneous\",\n model_name: LLM_Name = \"gpt-3.5-turbo\",\n evaluators: list[Evaluator] = [],\n terminal_evaluators: list[Evaluator] = [],\n uuid_str: str | None = None,\n env_profile: EnvironmentProfile | None = None,\n ) -> None:\n \"\"\"A sotopia environment for parallel agents.\n\n Args:\n available_action_types (set[ActionType], optional): The action types that are available to the agents. Defaults to set([\"none\", \"speak\", \"non-verbal communication\", \"action\"]).\n action_order (Literal[\"simutaneous\", \"round-robin\", \"random\"], optional): The order in which the agents take actions. Defaults to \"simutaneous\".\n model_name (LLM_Name, optional): The name of the language model to use. Defaults to \"gpt-3.5-turbo\".\n \"\"\"\n super().__init__()\n self.model_name = model_name\n self.background = ScriptBackground(\n scenario=\"\",\n p1_background=\"\",\n p2_background=\"\",\n p1_goal=\"\",\n p2_goal=\"\",\n p1_name=\"\",\n p2_name=\"\",\n )\n\n self.agents = []\n self.action_spaces = {}\n self.available_action_types = list(available_action_types)\n self.action_order = action_order\n self.action_mask: list[bool] = []\n self.evaluators = evaluators\n self.terminal_evaluators = terminal_evaluators\n\n # if an environment profile is provided, use it\n assert (\n env_profile or uuid_str\n ), \"Either env_profile or uuid_str must be provided\"\n if env_profile is not None:\n self.profile = env_profile\n # if a uuid is provided, try to load the environment profile from the database\n elif uuid_str is not None:\n # try retrieving profile from database\n try:\n self.profile = EnvironmentProfile.get(pk=uuid_str)\n except NotFoundError:\n raise ValueError(\n f\"Agent with uuid {uuid_str} not found in database\"\n )\n\n @configurable\n def reset(\n self,\n seed: int | None = None,\n options: dict[str, str] | None = None,\n agents: Agents | None = None,\n omniscient: bool = False,\n lite: bool = False,\n ) -> dict[str, Observation]:\n \"\"\"Starting a new episode. Must be called before step().\n\n Args:\n seed (int, optional): Seed for the environment. Defaults to None. Not used right now.\n options (dict, optional): Options for the environment. Defaults to None.\n \"partial_background_file\" (str): Path to a json file which need to contain a ScriptBackground object. The backgound can be incompleted (\"unknown\" for missing parts), and the missing parts will be filled in by the environment.\n \"full_background_file\" (str): Path to a json file which need to contain a ScriptBackground object. The backgound must be completed (no \"unknown\" for missing parts).\n omniscient (bool, optional): Whether the agents know the other agent's goal. Defaults to False.\n \"\"\"\n super().__init__()\n MessengerMixin.reset_inbox(self)\n assert (\n not options\n or not (\"partial_background_file\" in options)\n and not (\"full_background_file\" in options)\n ), \"partial_background_file and full_background_file are not supported anymore\"\n if agents is not None:\n assert agents, \"agents must be provided\"\n assert len(agents) == 2, \"Only supporting two agents right now\"\n agent_names = list(agents.keys())\n agent_goals = self.profile.agent_goals\n assert (\n len(agent_goals) == 2\n ), \"Only supporting two agents right now\"\n\n raw_background = ScriptBackground(\n scenario=self.profile.scenario,\n p1_background=get_bio(\n self.profile.relationship,\n agents[agent_names[0]].profile,\n agent_id=0,\n ),\n p2_background=get_bio(\n self.profile.relationship,\n agents[agent_names[1]].profile,\n agent_id=1,\n ),\n p1_goal=f\"<root viewer='agent_0'>{agent_goals[0]}</root>\",\n p2_goal=f\"<root viewer='agent_1'>{agent_goals[1]}</root>\",\n p1_name=agent_names[0],\n p2_name=agent_names[1],\n )\n\n if lite:\n raw_background.p1_background = \"\"\n raw_background.p2_background = \"\"\n\n self.background = ScriptBackground(\n scenario=render_text_for_environment(raw_background.scenario),\n p1_background=render_text_for_environment(\n raw_background.p1_background\n ),\n p2_background=render_text_for_environment(\n raw_background.p2_background\n ),\n p1_goal=render_text_for_environment(raw_background.p1_goal),\n p2_goal=render_text_for_environment(raw_background.p2_goal),\n p1_name=raw_background.p1_name,\n p2_name=raw_background.p2_name,\n )\n else:\n raise ValueError(\"agents must be provided\")\n\n self.agents = [self.background.p1_name, self.background.p2_name]\n agent_backgrounds: list[ScriptBackground] = []\n if omniscient:\n for i in range(self.num_agents):\n agent_backgrounds.append(copy.deepcopy(self.background))\n else:\n for i in range(self.num_agents):\n agent_backgrounds.append(\n ScriptBackground(\n scenario=render_text_for_agent(\n raw_background.scenario, i\n ),\n p1_background=render_text_for_agent(\n raw_background.p1_background, i\n ),\n p2_background=render_text_for_agent(\n raw_background.p2_background, i\n ),\n p1_goal=render_text_for_agent(\n raw_background.p1_goal, i\n ),\n p2_goal=render_text_for_agent(\n raw_background.p2_goal, i\n ),\n p1_name=raw_background.p1_name,\n p2_name=raw_background.p2_name,\n )\n )\n background_for_a = agent_backgrounds[0]\n background_for_b = agent_backgrounds[1]\n\n print(\"Is the agent omniscient?\", omniscient)\n if not omniscient:\n background_for_a.p2_goal = \"Unknown\"\n background_for_b.p1_goal = \"Unknown\"\n\n self.action_spaces = {\n agent: Dict(\n dict(\n action_type=Discrete(len(self.available_action_types)),\n argument=Text(256),\n )\n )\n for agent in self.agents\n }\n self.turn_number = 0\n self.action_mask = [False for _ in self.agents]\n if self.action_order == \"round-robin\":\n self.action_mask[0] = True\n elif self.action_order == \"random\":\n self.action_mask[\n random.randint(0, len(self.action_mask) - 1)\n ] = True\n else:\n self.action_mask = [True for _ in self.agents]\n\n self.recv_message(\"Environment\", self.background)\n\n return {\n self.background.p1_name: Observation(\n last_turn=background_for_a.to_natural_language(),\n turn_number=0,\n available_actions=list(self.available_action_types)\n if self.action_mask[0]\n else [\"none\"],\n ),\n self.background.p2_name: Observation(\n last_turn=background_for_b.to_natural_language(),\n turn_number=0,\n available_actions=list(self.available_action_types)\n if self.action_mask[1]\n else [\"none\"],\n ),\n }\n\n @beartype\n def step(\n self, actions: dict[str, AgentAction] | dict[str, dict[str, int | str]]\n ) -> tuple[\n dict[str, Observation],\n dict[str, float],\n dict[str, bool],\n dict[str, bool],\n dict[str, dict[Any, Any]],\n ]:\n # Time step ++\n self.turn_number += 1\n\n # For action sampled from action space, it needs to be converted into AgentAction\n complied_actions: dict[str, AgentAction] = {}\n for key in actions.keys():\n action = actions[key]\n if isinstance(action, AgentAction):\n complied_actions[key] = action\n else:\n action[\"action_type\"] = self.available_action_types[\n int(action[\"action_type\"])\n ]\n complied_actions[key] = AgentAction.parse_obj(action)\n\n # Masking actions from agent that are in turn\n for idx, agent in enumerate(self.agents):\n if not self.action_mask[idx]:\n complied_actions[agent] = AgentAction(\n action_type=\"none\", argument=\"\"\n )\n\n self.recv_message(\n \"Environment\", SimpleMessage(message=f\"Turn #{self.turn_number}\")\n )\n for agent, action in complied_actions.items():\n self.recv_message(agent, action)\n\n response = unweighted_aggregate_evaluate(\n list(\n itertools.chain(\n *(\n evaluator(\n turn_number=self.turn_number, messages=self.inbox\n )\n for evaluator in self.evaluators\n )\n )\n )\n )\n\n self.action_mask = [False for _ in self.agents]\n if self.action_order == \"round-robin\":\n self.action_mask[self.turn_number % len(self.action_mask)] = True\n elif self.action_order == \"random\":\n self.action_mask[\n random.randint(0, len(self.action_mask) - 1)\n ] = True\n else:\n self.action_mask = [True for _ in self.agents]\n obs = _actions_to_natural_language(complied_actions)\n return (\n {\n self.background.p1_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=0),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[0]\n else [\"none\"],\n ),\n self.background.p2_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=1),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[1]\n else [\"none\"],\n ),\n },\n {\n self.background.p1_name: (\n response.p1_rate\n if isinstance(response.p1_rate, float)\n else response.p1_rate[0]\n )\n if response.p1_rate\n else 0,\n self.background.p2_name: (\n response.p2_rate\n if isinstance(response.p2_rate, float)\n else response.p2_rate[0]\n )\n if response.p2_rate\n else 0,\n },\n {\n self.background.p1_name: response.terminated,\n self.background.p2_name: response.terminated,\n },\n {\n self.background.p1_name: False,\n self.background.p2_name: False,\n },\n {\n self.background.p1_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p1_rate or 0,\n },\n self.background.p2_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p2_rate or 0,\n },\n },\n )\n\n @beartype\n async def astep(\n self, actions: dict[str, AgentAction] | dict[str, dict[str, int | str]]\n ) -> tuple[\n dict[str, Observation],\n dict[str, float],\n dict[str, bool],\n dict[str, bool],\n dict[str, dict[Any, Any]],\n ]:\n # Time step ++\n self.turn_number += 1\n\n # For action sampled from action space, it needs to be converted into AgentAction\n complied_actions: dict[str, AgentAction] = {}\n for key in actions.keys():\n action = actions[key]\n if isinstance(action, AgentAction):\n complied_actions[key] = action\n else:\n action[\"action_type\"] = self.available_action_types[\n int(action[\"action_type\"])\n ]\n complied_actions[key] = AgentAction.parse_obj(action)\n\n # Masking actions from agent that are in turn\n for idx, agent in enumerate(self.agents):\n if not self.action_mask[idx]:\n complied_actions[agent] = AgentAction(\n action_type=\"none\", argument=\"\"\n )\n\n self.recv_message(\n \"Environment\", SimpleMessage(message=f\"Turn #{self.turn_number}\")\n )\n for agent, action in complied_actions.items():\n self.recv_message(agent, action)\n\n response = unweighted_aggregate_evaluate(\n list(\n itertools.chain(\n *await asyncio.gather(\n *[\n evaluator.__acall__(\n turn_number=self.turn_number,\n messages=self.inbox,\n )\n for evaluator in self.evaluators\n ]\n )\n )\n )\n )\n\n if response.terminated:\n terminal_response = unweighted_aggregate_evaluate(\n list(\n itertools.chain(\n *await asyncio.gather(\n *[\n evaluator.__acall__(\n turn_number=self.turn_number,\n messages=self.inbox,\n )\n for evaluator in self.terminal_evaluators\n ]\n )\n )\n )\n )\n # incorporate terminal response into response\n response.p1_rate = response.p1_rate or terminal_response.p1_rate\n response.p2_rate = response.p2_rate or terminal_response.p2_rate\n if response.comments and terminal_response.comments:\n response.comments += terminal_response.comments\n elif terminal_response.comments:\n response.comments = terminal_response.comments\n\n self.action_mask = [False for _ in self.agents]\n if self.action_order == \"round-robin\":\n self.action_mask[self.turn_number % len(self.action_mask)] = True\n elif self.action_order == \"random\":\n self.action_mask[\n random.randint(0, len(self.action_mask) - 1)\n ] = True\n else:\n self.action_mask = [True for _ in self.agents]\n obs = _actions_to_natural_language(complied_actions)\n info = {\n self.background.p1_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p1_rate or 0,\n },\n self.background.p2_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p2_rate or 0,\n },\n }\n if response.terminated:\n info[\"rewards_prompt\"] = {\"overall_prompt\": self.terminal_evaluators[0].prompt} # type: ignore\n\n return (\n {\n self.background.p1_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=0),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[0]\n else [\"none\"],\n ),\n self.background.p2_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=1),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[1]\n else [\"none\"],\n ),\n },\n {\n self.background.p1_name: (\n response.p1_rate\n if isinstance(response.p1_rate, float)\n else response.p1_rate[0]\n )\n if response.p1_rate\n else 0,\n self.background.p2_name: (\n response.p2_rate\n if isinstance(response.p2_rate, float)\n else response.p2_rate[0]\n )\n if response.p2_rate\n else 0,\n },\n {\n self.background.p1_name: response.terminated,\n self.background.p2_name: response.terminated,\n },\n {\n self.background.p1_name: False,\n self.background.p2_name: False,\n },\n info,\n )\n\n def render(self, mode: str = \"human\") -> None:\n pass\n\n def close(self) -> None:\n pass" }, { "identifier": "arun_one_episode", "path": "sotopia/server.py", "snippet": "@gin.configurable\nasync def arun_one_episode(\n env: ParallelSotopiaEnv,\n agent_list: Sequence[BaseAgent[Observation, AgentAction]],\n model_dict: dict[str, LLM_Name],\n omniscient: bool = False,\n script_like: bool = False,\n json_in_script: bool = False,\n tag: str | None = None,\n push_to_db: bool = False,\n) -> list[tuple[str, str, Message]]:\n agents = Agents({agent.agent_name: agent for agent in agent_list})\n environment_messages = env.reset(agents=agents, omniscient=omniscient)\n agents_model_names = [model_dict[\"agent1\"], model_dict[\"agent2\"]]\n for agent_name, agent_model in zip(env.agents, agents_model_names):\n if agent_model == \"human\":\n agents[agent_name] = HumanAgent(agent_name)\n elif agent_model == \"redis\":\n agents[agent_name] = RedisAgent(agent_name)\n elif script_like and not json_in_script:\n agents[agent_name] = ScriptWritingAgent(\n agent_name,\n model_name=agent_model,\n background=env.background,\n agent_names=env.agents,\n )\n else:\n agents[agent_name] = LLMAgent(\n agent_name, model_name=agent_model, script_like=script_like\n )\n agents.reset()\n\n messages: list[list[tuple[str, str, Message]]] = []\n\n # Main Event Loop\n done = False\n messages.append(\n [\n (\"Environment\", agent_name, environment_messages[agent_name])\n for agent_name in env.agents\n ]\n )\n # set goal for agents\n for index, agent_name in enumerate(env.agents):\n agents[agent_name].goal = env.profile.agent_goals[index]\n rewards: list[list[float]] = []\n reasons: list[str] = []\n while not done:\n # gather agent messages\n agent_messages: dict[str, AgentAction] = dict()\n actions = await asyncio.gather(\n *[\n agents[agent_name].aact(environment_messages[agent_name])\n for agent_name in env.agents\n ]\n )\n if script_like:\n # manually mask one message\n agent_mask = env.action_mask\n for idx in range(len(agent_mask)):\n print(\"Current mask: \", agent_mask)\n if agent_mask[idx] == 0:\n print(\"Action not taken: \", actions[idx])\n actions[idx] = AgentAction(action_type=\"none\", argument=\"\")\n else:\n print(\"Current action taken: \", actions[idx])\n\n # actions = cast(list[AgentAction], actions)\n for idx, agent_name in enumerate(env.agents):\n agent_messages[agent_name] = actions[idx]\n\n messages[-1].append(\n (agent_name, \"Environment\", agent_messages[agent_name])\n )\n\n # send agent messages to environment\n (\n environment_messages,\n rewards_in_turn,\n terminated,\n ___,\n info,\n ) = await env.astep(agent_messages)\n messages.append(\n [\n (\"Environment\", agent_name, environment_messages[agent_name])\n for agent_name in env.agents\n ]\n )\n # print(\"Environment message: \", environment_messages)\n # exit(0)\n rewards.append(\n [rewards_in_turn[agent_name] for agent_name in env.agents]\n )\n reasons.append(\n \" \".join(info[agent_name][\"comments\"] for agent_name in env.agents)\n )\n done = all(terminated.values())\n\n # TODO: clean up this part\n epilog = EpisodeLog(\n environment=env.profile.pk,\n agents=[agent.profile.pk for agent in agent_list],\n tag=tag,\n models=[model_dict[\"env\"], model_dict[\"agent1\"], model_dict[\"agent2\"]],\n messages=[\n [\n (m[0], m[1], m[2].to_natural_language())\n for m in messages_in_turn\n ]\n for messages_in_turn in messages\n ],\n reasoning=info[env.agents[0]][\"comments\"],\n rewards=[\n info[agent_name][\"complete_rating\"] for agent_name in env.agents\n ],\n rewards_prompt=info[\"rewards_prompt\"][\"overall_prompt\"],\n )\n rich.print(epilog.rewards_prompt)\n agent_profiles, conversation = epilog.render_for_humans()\n for agent_profile in agent_profiles:\n rich.print(agent_profile)\n for message in conversation:\n rich.print(message)\n\n if push_to_db:\n try:\n epilog.save()\n except Exception as e:\n logging.error(f\"Failed to save episode log: {e}\")\n # flatten nested list messages\n return list(itertools.chain(*messages))" } ]
import asyncio import logging import os import random import subprocess import redis.asyncio as redis import typer from asyncio import gather from asyncio import run as aiorun from datetime import datetime from logging import FileHandler from typing import Literal, cast from rich.logging import RichHandler from sotopia.agents import redis_agent from sotopia.agents.llm_agent import LLMAgent from sotopia.database import EnvAgentComboStorage from sotopia.database.persistent_profile import ( AgentProfile, EnvironmentList, EnvironmentProfile, ) from sotopia.envs.evaluators import ( ReachGoalLLMEvaluator, RuleBasedTerminatedEvaluator, ) from sotopia.envs.parallel import ParallelSotopiaEnv from sotopia.server import arun_one_episode
8,780
process = subprocess.Popen( ["git", "rev-parse", "HEAD"], shell=False, stdout=subprocess.PIPE ) git_head_hash = process.communicate()[0].strip() FORMAT = "%(asctime)s - %(levelname)s - %(name)s - %(message)s" logging.basicConfig( level=15, format=FORMAT, datefmt="[%X]", handlers=[ RichHandler(), FileHandler( datetime.now().strftime( f"./logs/%H_%M_%d_%m_%Y_{str(git_head_hash.decode('utf-8'))}.log" ) ), ], ) app = typer.Typer() async def _start_server_with_two_session_ids_and_agent_env_combo( session_ids: list[str], agent_env_combo_pk: str ) -> None: env_agent_combo_storage = EnvAgentComboStorage.get(agent_env_combo_pk)
process = subprocess.Popen( ["git", "rev-parse", "HEAD"], shell=False, stdout=subprocess.PIPE ) git_head_hash = process.communicate()[0].strip() FORMAT = "%(asctime)s - %(levelname)s - %(name)s - %(message)s" logging.basicConfig( level=15, format=FORMAT, datefmt="[%X]", handlers=[ RichHandler(), FileHandler( datetime.now().strftime( f"./logs/%H_%M_%d_%m_%Y_{str(git_head_hash.decode('utf-8'))}.log" ) ), ], ) app = typer.Typer() async def _start_server_with_two_session_ids_and_agent_env_combo( session_ids: list[str], agent_env_combo_pk: str ) -> None: env_agent_combo_storage = EnvAgentComboStorage.get(agent_env_combo_pk)
env = ParallelSotopiaEnv(
8
2023-10-23 19:47:26+00:00
12k
qualabs/video-headline
player/tests.py
[ { "identifier": "EmbedView", "path": "player/views.py", "snippet": "class EmbedView(TemplateView):\n template_name = \"player/index.html\"\n\n def validate_domain(self, channel_allowed_domains, referer_domain):\n allowed_domains = settings.ALLOWED_DOMAINS + channel_allowed_domains\n\n if len(channel_allowed_domains) == 0:\n return True\n\n for allowed_domain in allowed_domains:\n secondary = allowed_domain\n allowed_domain = re.escape(allowed_domain).replace('\\\\*', '[a-zA-Z0-9_-]+')\n allowed_domain = re.compile(allowed_domain)\n if allowed_domain.match(str(referer_domain)):\n return True\n\n return False\n\n def get_context_data(self, **kwargs):\n context = super(EmbedView, self).get_context_data(**kwargs)\n\n poster_url, video, video_url, mime_type = self.get_video_data(kwargs.get('video_id'))\n channel = video.channel\n organization = video.organization\n\n if not organization.traffic_enabled:\n context['error'] = True\n context['message'] = 'The content is not available.'\n return context\n\n referer = self.request.META.get('HTTP_REFERER')\n\n referer_domain = None\n if referer:\n regex_domain = r'^(?:https?:\\/\\/)?(?:[^@\\/\\n]+@)?([^:\\/?\\n]+)'\n referer_domain = re.match(regex_domain, referer).group(1)\n\n adTagUrl = mark_safe(\n video.ads_vast_url or channel.ads_vast_url or ''\n ) if video.enable_ads else mark_safe('')\n\n if video.autoplay == 'c':\n autoplay = channel.autoplay\n else:\n autoplay = video.autoplay == 'y'\n\n if not autoplay:\n autoplay = ''\n\n if self.validate_domain(channel.allowed_domains, referer_domain):\n if video.state not in [LiveVideo.State.ON, Media.State.FINISHED]:\n context['error'] = True\n context['message'] = 'The content is not available.'\n\n return context\n\n else:\n video_data = {\n 'error': False,\n 'url': video_url,\n 'type': mime_type,\n 'laUrl': '',\n 'laType': '',\n 'certUrl': '',\n 'adTagUrl': adTagUrl,\n 'posterUrl': poster_url,\n 'autoplay': autoplay,\n 'tracking_api_url': '',\n 'player_api_key': '',\n 'qhub_analytics_enabled': '',\n 'qhub_analytics_plugin_url': '',\n 'channel': channel,\n 'organization': organization,\n 'video': video,\n 'playerCustomCss': ''\n }\n if self.request.GET.get('token'):\n video_data['token'] = b64decode(self.request.GET.get('token').encode('utf-8')).decode('utf-8')\n\n # Traking\n org_qtracking_config = organization.config.get('qtracking')\n\n if org_qtracking_config:\n player_api_key = org_qtracking_config.get('player_api_key')\n tracking_api_url = org_qtracking_config.get('tracking_api_url')\n tracking_enabled = org_qtracking_config.get('enabled')\n\n if player_api_key and tracking_enabled and tracking_api_url:\n video_data['player_api_key'] = player_api_key\n video_data['tracking_api_url'] = f'{tracking_api_url}/api/v1/tracking/'\n\n custom_player_css = organization.config.get('playerCustomCss')\n\n if custom_player_css: video_data['playerCustomCss'] = custom_player_css\n\n # Qhub analytics\n org_qhub_analytics_config = organization.config.get('qhub_analytics')\n\n if org_qhub_analytics_config:\n video_data['qhub_analytics_enabled'] = org_qhub_analytics_config.get('enabled',\n '')\n video_data['qhub_analytics_plugin_url'] = org_qhub_analytics_config.get(\n 'plugin_url', '')\n\n context.update(video_data)\n else:\n context['error'] = True\n context['message'] = 'Content is not available on this site.'\n\n return context\n\n @staticmethod\n def get_video_data(video_id):\n live_video = LiveVideo.objects.filter(video_id=video_id).first()\n\n if live_video:\n video_url = f'https://{live_video.cf_domain}/output.m3u8'\n poster_url = ''\n mime_type = 'application/x-mpegURL'\n\n return poster_url, live_video, video_url, mime_type\n\n media = Media.objects.filter(video_id=video_id).first()\n\n if media:\n poster_url, media_url, mime_type = media.get_urls()\n\n return poster_url, media, media_url, mime_type\n\n raise Http404('Video not found')" }, { "identifier": "Organization", "path": "organization/models/organization.py", "snippet": "class Organization(models.Model):\n \"\"\" Organization is the top most level entity \"\"\"\n\n name = models.CharField(max_length=100,\n unique=True,\n verbose_name='Name')\n contact_email = models.CharField(max_length=254,\n default='',\n blank=True,\n verbose_name='Contact Email')\n bucket_name = models.CharField(max_length=100,\n editable=False,\n default='',\n verbose_name='Bucket')\n cf_id = models.CharField(max_length=100,\n editable=False,\n default='',\n verbose_name='Cf_id')\n cf_domain = models.CharField(max_length=100,\n editable=False,\n default='',\n verbose_name='Cf_domain')\n plan = models.ForeignKey(Plan,\n models.PROTECT,\n null=True,\n default=1,\n related_name='organizations',\n verbose_name='Plan')\n config = JSONField(blank=True,\n default=dict,\n verbose_name='Configuration')\n aws_account = models.ForeignKey(AWSAccount,\n null=True,\n related_name='organizations',\n on_delete=models.PROTECT,\n verbose_name='AWS_account')\n upload_enabled = models.BooleanField(\n default=True,\n verbose_name='Enabled to upload videos'\n )\n traffic_enabled = models.BooleanField(\n default=True,\n verbose_name='Data traffic enabled'\n )\n security_enabled = models.BooleanField(\n default=False,\n verbose_name='URL security enabled'\n )\n\n def __str__(self):\n return self.name\n\n @property\n def cf_distribution_ids(self):\n dists = list(self.channels.values_list('cf_id', flat=True))\n dists += list(self.live_videos.values_list('cf_id', flat=True))\n\n if self.cf_id:\n dists.append(self.cf_id)\n\n return dists\n\n class Meta:\n verbose_name = 'Organization'\n verbose_name_plural = 'Organizations'" }, { "identifier": "Channel", "path": "organization/models/channel.py", "snippet": "class Channel(models.Model):\n \"\"\"\n Channels is the second level of organization and contains videos.\n \"\"\"\n channel_id = models.CharField(max_length=36, default=get_channel_id, unique=True,\n db_index=True,\n editable=False, verbose_name='channel_id')\n organization = models.ForeignKey(Organization, related_name='channels',\n on_delete=models.CASCADE,\n verbose_name='Organization')\n name = models.CharField(max_length=100, verbose_name='Name')\n allowed_domains = ArrayField(models.CharField(max_length=254), blank=True, null=True,\n verbose_name='Allowed domains', default=list)\n ads_vast_url = models.URLField(blank=True,\n null=True,\n max_length=1024,\n verbose_name='VAST URL (ads)')\n detect_adblock = models.BooleanField(default=False, verbose_name='Detect AdBlock?')\n autoplay = models.BooleanField(default=False, verbose_name='Autoplay?')\n cf_id = models.CharField(max_length=100,\n editable=False,\n default='',\n verbose_name='Cf_id')\n cf_domain = models.CharField(max_length=100,\n editable=False,\n default='',\n verbose_name='Cf_domain')\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = 'Channel'\n verbose_name_plural = 'Channels'\n unique_together = ('organization', 'name')" }, { "identifier": "create_organizations", "path": "test_utils.py", "snippet": "def create_organizations(name, org_quantity, bucket_name='', contact_email='', cf_id='',\n cf_domain='', plan=None, config=None):\n organizations = []\n for number in range(1, org_quantity + 1):\n org = Organization.objects.create(\n name=f'{name} {number}',\n bucket_name=bucket_name,\n contact_email=contact_email,\n cf_id=cf_id,\n cf_domain=cf_domain,\n plan=plan,\n config=config if config else {}\n )\n\n organizations.append(org)\n\n return organizations" }, { "identifier": "create_user", "path": "test_utils.py", "snippet": "def create_user(username, password, organization):\n user = Account.objects.create_user(\n username=username,\n password=password,\n organization=organization,\n email=f'{username}@admin.com'\n )\n\n return user" }, { "identifier": "create_channels", "path": "test_utils.py", "snippet": "def create_channels(name, organization, quantity, allowed_domains=[],\n ads_vast_url=None, detect_adblock=False, autoplay=False,\n cf_domain='domain.cloudfront.com'):\n channels = []\n for number in range(1, quantity + 1):\n channel = Channel.objects.create(\n name=f'{name} {number}',\n organization=organization,\n allowed_domains=allowed_domains,\n ads_vast_url=ads_vast_url,\n detect_adblock=detect_adblock,\n autoplay=autoplay,\n cf_domain=f'{number}.{cf_domain}',\n cf_id=f'my_cf_id_{number}'\n )\n\n channels.append(channel)\n\n return channels" }, { "identifier": "create_videos", "path": "test_utils.py", "snippet": "def create_videos(name, created_by, organization, quantity, state=Media.State.WAITING_FILE,\n metadata=None, ads_vast_url=None, enable_ads=True, autoplay='c',\n created_at=None, media_type='video'):\n videos = []\n for number in range(1, quantity + 1):\n video = create_video(name, created_by, organization, number, state, metadata, ads_vast_url,\n enable_ads, autoplay, created_at, media_type)\n videos.append(video)\n\n return videos" }, { "identifier": "add_channel_to_video", "path": "test_utils.py", "snippet": "def add_channel_to_video(channel, video):\n video.channel = channel\n video.save()" }, { "identifier": "create_live_videos", "path": "test_utils.py", "snippet": "def create_live_videos(name, created_by, organization, quantity, state=LiveVideo.State.OFF,\n metadata=None, ads_vast_url=None, enable_ads=True, autoplay='c',\n created_at=None, ml_channel_arn='', input_state=[]):\n lives = []\n for number in range(1, quantity + 1):\n live = create_live_video(name, created_by, organization, number, state, metadata,\n ads_vast_url,\n enable_ads, autoplay, created_at, ml_channel_arn, input_state)\n\n lives.append(live)\n\n return lives" }, { "identifier": "Media", "path": "video/models/media.py", "snippet": "class Media(models.Model):\n '''\n Constants to represent the `state`s of the Video\n '''\n\n class State:\n WAITING_FILE = 'waiting_file'\n QUEUING_FAILED = 'queuing_failed'\n QUEUED = 'queued'\n PROCESSING = 'processing'\n PROCESSING_FAILED = 'processing_failed'\n FINISHED = 'finished'\n NOT_FINISHED = 'not_finished'\n FAILED = 'failed'\n\n CHOICES = (\n (WAITING_FILE, WAITING_FILE),\n (QUEUING_FAILED, QUEUING_FAILED),\n (QUEUED, QUEUED),\n (PROCESSING, PROCESSING),\n (PROCESSING_FAILED, PROCESSING_FAILED),\n (FINISHED, FINISHED)\n )\n\n AUTOPLAY_CHOICES = (('c', 'Channel'), ('y', 'Yes'), ('n', 'No'))\n\n MEDIA_TYPE_CHOICES = (('audio', 'Audio'), ('video', 'Video'))\n\n video_id = models.CharField(max_length=36,\n default=uuid.uuid4,\n unique=True,\n db_index=True,\n verbose_name='Content ID')\n\n name = models.CharField(max_length=254,\n verbose_name='Name')\n\n created_by = models.ForeignKey(Account,\n models.SET_NULL,\n related_name='uploaded_videos',\n verbose_name='Created by',\n null=True)\n\n organization = models.ForeignKey(Organization,\n models.CASCADE,\n related_name='media',\n verbose_name='Organization')\n\n channel = models.ForeignKey(Channel,\n models.CASCADE,\n null=True,\n blank=True,\n related_name='media',\n verbose_name='Channel')\n\n tags = models.ManyToManyField(Tag,\n related_name='media',\n verbose_name='Tags',\n blank=True)\n\n state = FSMField(default=State.WAITING_FILE,\n verbose_name='Video State',\n choices=State.CHOICES,\n protected=True)\n\n metadata = JSONField(\n max_length=500, blank=True, default={},\n verbose_name='Metadata'\n )\n\n ads_vast_url = models.URLField(\n blank=True,\n null=True,\n max_length=1024,\n verbose_name='VAST URL (ads)'\n )\n\n enable_ads = models.BooleanField(\n default=True,\n verbose_name='Enable Ads?'\n )\n\n autoplay = models.CharField(\n max_length=1,\n default='c',\n choices=AUTOPLAY_CHOICES,\n verbose_name='Autoplay?'\n )\n\n created_at = models.DateTimeField(\n editable=False,\n default=timezone.now,\n verbose_name='Created'\n )\n\n media_type = models.CharField(\n max_length=5,\n default='video',\n choices=MEDIA_TYPE_CHOICES,\n verbose_name='Content Type'\n )\n \n has_thumbnail = models.BooleanField(\n default=False,\n verbose_name='Has custom thumbnail?'\n )\n\n storage = models.BigIntegerField(default=0,\n verbose_name='Size in bytes')\n\n duration = models.IntegerField(default=0,\n verbose_name='Duration in seconds')\n\n def __str__(self):\n return f'{self.video_id} ({self.name})'\n\n class Meta:\n verbose_name = 'Content'\n verbose_name_plural = 'Contents'\n\n def get_urls(self):\n channel = self.channel\n\n # Hacky patch. Don't know how you'd get into this state!\n if channel is None:\n return \"\", \"\", \"\"\n \n media_url = ''\n\n # Default mime type for video\n mime_type = 'application/x-mpegURL'\n poster_url = ''\n\n if self.media_type == 'video':\n media_url = f'https://{channel.cf_domain}/{self.video_id}/hls/output.m3u8'\n poster_url = f'https://{channel.cf_domain}/{self.video_id}/thumbs/thumb_high.0000000.jpg'\n\n elif self.media_type == 'audio':\n media_url = f'https://{channel.cf_domain}/{self.video_id}/audio/output.mp4'\n mime_type = 'audio/mp4'\n\n thumb_path = 'thumb.jpg' if self.has_thumbnail else 'thumbs/thumb_high.0000000.jpg'\n poster_url = f'https://{channel.cf_domain}/{self.video_id}/{thumb_path}'\n\n return poster_url, media_url, mime_type\n\n @transition(field=state, source=State.WAITING_FILE, target=State.QUEUED)\n def _to_queued(self):\n pass\n\n @transition(field=state, source=State.WAITING_FILE, target=State.QUEUING_FAILED)\n def _to_queued_failed(self):\n pass\n\n @transition(field=state, source=State.QUEUED, target=State.PROCESSING)\n def _to_processing(self):\n pass\n\n @transition(field=state, source=State.PROCESSING, target=State.PROCESSING_FAILED)\n def _to_processing_failed(self):\n pass\n\n @transition(field=state, source=[State.PROCESSING, State.QUEUED], target=State.FINISHED)\n def _to_finished(self):\n pass\n\n @transition(field=state,\n source=[State.FINISHED, State.PROCESSING_FAILED, State.FAILED,\n State.QUEUING_FAILED],\n target=State.QUEUED)\n def _re_process(self):\n pass\n\n def to_queued(self):\n self._to_queued()\n # send video to transcode\n mediaconvert.transcode(self)\n self.save()\n\n def to_queued_failed(self):\n self._to_queued_failed()\n self.save()\n\n def to_processing(self):\n self._to_processing()\n self.save()\n\n def to_processing_failed(self):\n self._to_processing_failed()\n self.save()\n\n def to_finished(self):\n self._to_finished()\n self.storage = s3.get_size(self.organization, self.organization.bucket_name, self.video_id)\n self.save()\n\n def re_process(self):\n self._re_process()\n self.metadata = {}\n\n # Delete files on S3\n s3.delete_object(self.organization.bucket_name, '{}/thumb'.format(self.video_id),\n self.organization.aws_account)\n s3.delete_object(self.organization.bucket_name, '{}/hls'.format(self.video_id),\n self.organization.aws_account)\n\n # Invalidate cache on CloudFront\n cloudfront.create_invalidation(self.organization, self.channel.cf_id, [\n '/{}/thumb/*'.format(self.video_id),\n '/{}/hls/*'.format(self.video_id)\n ])\n\n mediaconvert.transcode(self)\n self.save()" }, { "identifier": "LiveVideo", "path": "video/models/live.py", "snippet": "class LiveVideo(models.Model):\n '''\n Constants to represent the state`s of the Streaming\n '''\n\n class State:\n OFF = 'off'\n ON = 'on'\n STARTING = 'starting'\n STOPPING = 'stopping'\n WAITING_INPUT = 'waiting_input'\n DELETING = 'deleting'\n\n CHOICES = (\n (OFF, OFF),\n (ON, ON),\n (STARTING, STARTING),\n (STOPPING, STOPPING)\n )\n\n class GeoType:\n WHITELIST = 'whitelist'\n BLACKLIST = 'blacklist'\n NONE = 'none'\n\n CHOICES = (\n (WHITELIST, WHITELIST),\n (BLACKLIST, BLACKLIST),\n (NONE, NONE)\n )\n\n AUTOPLAY_CHOICES = (('c', 'Channel'), ('y', 'Yes'), ('n', 'No'))\n\n video_id = models.CharField(max_length=36,\n default=uuid.uuid4,\n unique=True,\n db_index=True,\n verbose_name='Video ID')\n\n name = models.CharField(max_length=254,\n verbose_name='Name')\n\n created_by = models.ForeignKey(Account,\n models.SET_NULL,\n related_name='uploaded_live_video',\n verbose_name='Created by',\n null=True)\n\n organization = models.ForeignKey(Organization,\n models.CASCADE,\n related_name='live_videos',\n verbose_name='Organization')\n\n channel = models.ForeignKey(Channel,\n models.CASCADE,\n null=True,\n blank=True,\n related_name='live_videos',\n verbose_name='Channel')\n\n tags = models.ManyToManyField(Tag,\n related_name='live_videos',\n verbose_name='Tags',\n blank=True)\n\n state = FSMField(default=State.OFF,\n verbose_name='Live Video state',\n choices=State.CHOICES,\n protected=True)\n\n input_state = ArrayField(models.CharField(max_length=255,\n default='',\n verbose_name='Origin state'),\n default=list,\n blank=True)\n\n metadata = JSONField(max_length=500,\n blank=True,\n default=dict,\n verbose_name='Metadata')\n\n ads_vast_url = models.URLField(blank=True,\n null=True,\n max_length=1024,\n verbose_name='VAST URL (ads)')\n\n enable_ads = models.BooleanField(default=True,\n verbose_name='Enable Ads?')\n\n created_at = models.DateTimeField(editable=False,\n default=timezone.now,\n verbose_name='Created')\n\n ml_input_url = models.CharField(max_length=254,\n editable=False,\n default='',\n verbose_name='Input Url')\n\n ml_input_id = models.CharField(max_length=36,\n editable=False,\n default='',\n verbose_name='Input Id')\n\n ml_channel_arn = models.CharField(max_length=254,\n editable=False,\n default='',\n verbose_name='Channel Arn')\n\n sns_topic_arn = models.CharField(max_length=254,\n editable=False,\n default='',\n verbose_name='Topic Arn')\n\n autoplay = models.CharField(max_length=1,\n default='c',\n choices=AUTOPLAY_CHOICES,\n verbose_name='Autoplay?'\n )\n\n cf_id = models.CharField(max_length=100,\n verbose_name='Cf_id',\n editable=False,\n default='')\n\n cf_domain = models.CharField(max_length=100,\n verbose_name='Cf_domain',\n editable=False,\n default='')\n\n geolocation_type = models.CharField(max_length=20,\n editable=True,\n choices=GeoType.CHOICES,\n default=GeoType.NONE,\n verbose_name='Geolocation Type'\n )\n\n geolocation_countries = ArrayField(models.CharField(max_length=2,\n editable=True,\n default='',\n verbose_name='Geolocation Countries'),\n default=list,\n blank=True\n )\n\n def __init__(self, *args, **kwargs):\n super(LiveVideo, self).__init__(*args, **kwargs)\n self._old_geolocation_type = self.geolocation_type\n self._old_geolocation_countries = self.geolocation_countries\n self._old_channel = self.channel\n\n def __str__(self):\n return f'{self.video_id} ({self.name})'\n\n def ml_channel_id(self):\n return self.ml_channel_arn.split(':')[-1]\n\n class Meta:\n verbose_name = 'Live Video'\n verbose_name_plural = 'Live Videos'\n\n @transition(field=state, source=[State.STOPPING], target=State.OFF)\n def _to_off(self):\n pass\n\n @transition(field=state, source=[State.STARTING], target=State.WAITING_INPUT)\n def _to_waiting(self):\n pass\n\n @transition(field=state, source=[State.WAITING_INPUT], target=State.ON)\n def _to_on(self):\n pass\n\n @transition(field=state, source=[State.STOPPING, State.OFF], target=State.STARTING)\n def _to_starting(self):\n pass\n\n @transition(field=state, source=[State.STARTING, State.ON, State.WAITING_INPUT], target=State.STOPPING)\n def _to_stopping(self):\n pass\n\n @transition(field=state, source=[State.OFF], target=State.DELETING)\n def _to_deleting(self):\n pass\n \n def to_starting(self):\n self._to_starting()\n medialive.start_channel(self)\n self.save()\n\n def to_stopping(self):\n self._to_stopping()\n medialive.stop_channel(self)\n self.save()\n\n def to_waiting(self):\n self._to_waiting()\n cloudwatchlogs.check_input_state(self)\n self.save()\n\n def to_on(self):\n self._to_on()\n self.save()\n\n def to_off(self):\n self._to_off()\n self.input_state.clear()\n self.save()\n \n def to_deleting(self):\n self._to_deleting()\n self.save()\n channel_id = self.ml_channel_arn.split(':')[-1] \n account_id = self.organization.aws_account.account_id\n video_id = self.video_id\n try:\n medialive.delete_channel(channel_id, account_id)\n except medialive.ChannelNotFoundException:\n pass\n finally:\n medialive.delete_input(self.ml_input_id,account_id)\n cloudfront.update_distribution(self.organization, self.cf_id, False)\n cloudwatchevents.remove_targets(self)\n cloudwatchevents.delete_rule(self)\n sns.unsubscribe_all(self)\n sns.delete_topic(self)\n cloudfront._delete_cloudfront_distribution.delay(self.cf_id, account_id, video_id)" } ]
from django.test import TestCase, Client from django.urls import reverse from rest_framework import status from player.views import EmbedView from organization.models import Organization, Channel from test_utils import create_organizations, create_user, create_channels, create_videos, \ add_channel_to_video, create_live_videos from video.models import Media, LiveVideo
8,230
'video_id': video.video_id}) response = self.client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate error in response self.assertTrue(response.context['error']) # Validate specific error message self.assertEqual(UNAVAILABLE_MESSAGE, response.context['message']) def test_error_message_video_state_processing(self): video = \ create_videos('Video', self.user1, self.org1, 1, Media.State.PROCESSING, None, None, False)[0] add_channel_to_video(self.chan1, video) url = reverse('embed', kwargs={'channel_id': self.chan1.channel_id, 'video_id': video.video_id}) response = self.client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate error in response self.assertTrue(response.context['error']) # Validate specific error message self.assertEqual(UNAVAILABLE_MESSAGE, response.context['message']) def test_error_message_video_state_processing_failed(self): video = \ create_videos('Video', self.user1, self.org1, 1, Media.State.PROCESSING_FAILED, None, None, False)[0] add_channel_to_video(self.chan1, video) url = reverse('embed', kwargs={'channel_id': self.chan1.channel_id, 'video_id': video.video_id}) response = self.client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate error in response self.assertTrue(response.context['error']) # Validate specific error message self.assertEqual(UNAVAILABLE_MESSAGE, response.context['message']) def test_error_message_video_state_not_finished(self): video = \ create_videos('Video', self.user1, self.org1, 1, Media.State.NOT_FINISHED, None, None, False)[0] add_channel_to_video(self.chan1, video) url = reverse('embed', kwargs={'channel_id': self.chan1.channel_id, 'video_id': video.video_id}) response = self.client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate error in response self.assertTrue(response.context['error']) # Validate specific error message self.assertEqual(UNAVAILABLE_MESSAGE, response.context['message']) def test_error_message_video_state_failed(self): video = \ create_videos('Video', self.user1, self.org1, 1, Media.State.FAILED, None, None, False)[0] add_channel_to_video(self.chan1, video) url = reverse('embed', kwargs={'channel_id': self.chan1.channel_id, 'video_id': video.video_id}) response = self.client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate error in response self.assertTrue(response.context['error']) # Validate specific error message self.assertEqual(UNAVAILABLE_MESSAGE, response.context['message']) def test_error_message_video_with_disabled_org(self): video = \ create_videos('Video', self.user1, self.org1, 1, Media.State.FAILED, None, None, False)[0] add_channel_to_video(self.chan1, video) self.org1.upload_enable = False self.org1.save() url = reverse('embed', kwargs={'channel_id': self.chan1.channel_id, 'video_id': video.video_id}) response = self.client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate error in response self.assertTrue(response.context['error']) # Validate specific error message self.assertEqual(UNAVAILABLE_MESSAGE, response.context['message']) self.org1.upload_enable = True self.org1.save() def test_error_message_live_video_state_starting(self): live = \
INVALID_DOMAIN_MESSAGE = 'Content is not available on this site.' UNAVAILABLE_MESSAGE = 'The content is not available.' class PlayerTests(TestCase): @classmethod def setUpClass(cls): # Organizations cls.org1 = create_organizations('Organization', 1)[0] # Users cls.user1 = create_user('user1', '12345678', cls.org1) def setUp(self): # Channel with ads_vast_url self.chan1 = \ create_channels('Channel with ads vast', self.org1, 1, [], 'http://www.channel-vast-url.com')[0] # Channel with autoplay self.chan2 = \ create_channels('Channel with autoplay', self.org1, 1, [], None, False, True)[0] # Channel with allowed domains self.chan3 = \ create_channels('Channel with all allowed domains', self.org1, 1, [])[0] self.chan4 = \ create_channels('Channel with simple allowed domain', self.org1, 1, ['www.allowed-domain.com'])[0] self.chan5 = \ create_channels('Channel with wildcard domain', self.org1, 1, ['www.*.test.com'])[0] self.chan6 = \ create_channels('Channel with double wildcard domain', self.org1, 1, ['www.*.*.test.com'])[0] self.chan7 = \ create_channels('Channel with common domain', self.org1, 1, ['*.domain.com'])[0] # Video with default options self.video1 = \ create_videos('Video', self.user1, self.org1, 1, Media.State.FINISHED)[0] add_channel_to_video(self.chan1, self.video1) # Video with ads_vast_url and without enabled ads self.video2 = \ create_videos('Video with ads vast and without enable ads', self.user1, self.org1, 1, Media.State.FINISHED, None, 'http://www.video-vast-url.com', False)[0] add_channel_to_video(self.chan1, self.video2) # Video with ads_vast_url self.video3 = \ create_videos('Video with ads vast', self.user1, self.org1, 1, Media.State.FINISHED, None, 'http://www.video-vast-url.com')[0] add_channel_to_video(self.chan1, self.video3) # Video without ads_vast_url and with enable_ads false self.video4 = \ create_videos('Video without ads vast and with enable ads', self.user1, self.org1, 1, Media.State.FINISHED, None, None, False)[0] add_channel_to_video(self.chan1, self.video4) # Videos with autoplay options self.video5 = \ create_videos('Video with autoplay no', self.user1, self.org1, 1, Media.State.FINISHED, None, None, True, 'n')[0] add_channel_to_video(self.chan1, self.video5) self.video6 = \ create_videos('Video with autoplay yes', self.user1, self.org1, 1, Media.State.FINISHED, None, None, True, 'y')[0] add_channel_to_video(self.chan1, self.video6) self.client = Client(HTTP_REFERER='http://qhub-tests.com') def tearDown(self): Media.objects.all().delete() Channel.objects.all().delete() @classmethod def tearDownClass(cls): cls.org1.delete() # <editor-fold desc="Video vast TESTS"> def test_video_override_channel_vast(self): url = reverse('embed', kwargs={'channel_id': self.chan1.channel_id, 'video_id': self.video3.video_id}) response = self.client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate adTagUrl in response self.assertEquals(self.video3.ads_vast_url, response.context['adTagUrl']) def test_video_without_vast(self): url = reverse('embed', kwargs={'channel_id': self.chan1.channel_id, 'video_id': self.video1.video_id}) response = self.client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate adTagUrl in response self.assertEquals(self.chan1.ads_vast_url, response.context['adTagUrl']) # </editor-fold> # <editor-fold desc="Video no-ads flag TESTS"> def test_video_flag_override_channel_vast(self): url = reverse('embed', kwargs={'channel_id': self.chan1.channel_id, 'video_id': self.video3.video_id}) response = self.client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate adTagUrl in response self.assertEquals(self.video3.ads_vast_url, response.context['adTagUrl']) def test_video_flag_use_channel_vast(self): url = reverse('embed', kwargs={'channel_id': self.chan1.channel_id, 'video_id': self.video1.video_id}) response = self.client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate adTagUrl in response self.assertEquals(self.chan1.ads_vast_url, response.context['adTagUrl']) def test_video_flag_false_vast(self): url = reverse('embed', kwargs={'channel_id': self.chan1.channel_id, 'video_id': self.video4.video_id}) response = self.client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate adTagUrl in response self.assertEquals('', response.context['adTagUrl']) def test_video_flag_false_without_vast(self): url = reverse('embed', kwargs={'channel_id': self.chan1.channel_id, 'video_id': self.video4.video_id}) response = self.client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate adTagUrl in response self.assertEquals('', response.context['adTagUrl']) # </editor-fold> # <editor-fold desc="Allowed Domain TESTS"> def test_valid_all_domains(self): client = Client(HTTP_REFERER='http://www.allowed-domain.com') add_channel_to_video(self.chan3, self.video3) url = reverse('embed', kwargs={'video_id': self.video3.video_id}) response = client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate error in response self.assertFalse(response.context['error']) def test_valid_simple_domain(self): client = Client(HTTP_REFERER='http://www.allowed-domain.com') add_channel_to_video(self.chan4, self.video3) url = reverse('embed', kwargs={'video_id': self.video3.video_id}) response = client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate error in response self.assertFalse(response.context['error']) def test_invalid_simple_domain(self): client = Client(HTTP_REFERER='http://www.not-allowed-domain.com') add_channel_to_video(self.chan4, self.video3) url = reverse('embed', kwargs={'video_id': self.video3.video_id}) response = client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate error in response self.assertTrue(response.context['error']) # Validate specific error message self.assertEqual(INVALID_DOMAIN_MESSAGE, response.context['message']) # VALID WILDCARD def test_valid_wildcard_domain(self): client = Client(HTTP_REFERER='http://www.wildcard.test.com') add_channel_to_video(self.chan5, self.video3) url = reverse('embed', kwargs={'video_id': self.video3.video_id}) response = client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate error in response self.assertFalse(response.context['error']) def test_second_valid_wildcard_domain(self): client = Client(HTTP_REFERER='http://www.wild-card.test.com') add_channel_to_video(self.chan5, self.video3) url = reverse('embed', kwargs={'video_id': self.video3.video_id}) response = client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate error in response self.assertFalse(response.context['error']) def test_third_valid_wildcard_domain(self): client = Client(HTTP_REFERER='http://www.wild_c4rd-test.test.com') add_channel_to_video(self.chan5, self.video3) url = reverse('embed', kwargs={'video_id': self.video3.video_id}) response = client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate error in response self.assertFalse(response.context['error']) def test_valid_double_wildcard_domain(self): client = Client(HTTP_REFERER='http://www.wild.card.test.com') add_channel_to_video(self.chan6, self.video3) url = reverse('embed', kwargs={'video_id': self.video3.video_id}) response = client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate error in response self.assertFalse(response.context['error']) def test_valid_common_domain(self): client = Client(HTTP_REFERER='http://www.domain.com') add_channel_to_video(self.chan7, self.video3) url = reverse('embed', kwargs={'video_id': self.video3.video_id}) response = client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate error in response self.assertFalse(response.context['error']) # INVALID WILDCARD def test_invalid_wildcard_domain(self): client = Client(HTTP_REFERER='http://www.wildcard.test.invalid.com') add_channel_to_video(self.chan5, self.video3) url = reverse('embed', kwargs={'video_id': self.video3.video_id}) response = client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate error in response self.assertTrue(response.context['error']) # Validate specific error message self.assertEqual(INVALID_DOMAIN_MESSAGE, response.context['message']) def test_second_invalid_wildcard_domain(self): client = Client(HTTP_REFERER='http://www.test.com') add_channel_to_video(self.chan5, self.video3) url = reverse('embed', kwargs={'video_id': self.video3.video_id}) response = client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate error in response self.assertTrue(response.context['error']) # Validate specific error message self.assertEqual(INVALID_DOMAIN_MESSAGE, response.context['message']) def test_third_invalid_wildcard_domain(self): client = Client(HTTP_REFERER='http://www.invalid.wildcard.test.com') add_channel_to_video(self.chan5, self.video3) url = reverse('embed', kwargs={'video_id': self.video3.video_id}) response = client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate error in response self.assertTrue(response.context['error']) # Validate specific error message self.assertEqual(INVALID_DOMAIN_MESSAGE, response.context['message']) def test_fourth_invalid_wildcard_domain(self): client = Client(HTTP_REFERER='http://www..test.com') add_channel_to_video(self.chan5, self.video3) url = reverse('embed', kwargs={'video_id': self.video3.video_id}) response = client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate error in response self.assertTrue(response.context['error']) # Validate specific error message self.assertEqual(INVALID_DOMAIN_MESSAGE, response.context['message']) def test_invalid_double_wildcard_domain(self): client = Client(HTTP_REFERER='http://www.wild.test.com') add_channel_to_video(self.chan6, self.video3) url = reverse('embed', kwargs={'video_id': self.video3.video_id}) response = client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate error in response self.assertTrue(response.context['error']) # Validate specific error message self.assertEqual(INVALID_DOMAIN_MESSAGE, response.context['message']) def test_second_invalid_double_wildcard_domain(self): client = Client(HTTP_REFERER='http://www.wild.test.card.com') add_channel_to_video(self.chan6, self.video3) url = reverse('embed', kwargs={'video_id': self.video3.video_id}) response = client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate error in response self.assertTrue(response.context['error']) # Validate specific error message self.assertEqual(INVALID_DOMAIN_MESSAGE, response.context['message']) # VIDEO AUTOPLAY def test_channel_autoplay_with_video_autoplay_channel_must_autoplay(self): """ The channel as autoplay, the video has autoplay as the channel config. The video must autoplay """ client = Client() add_channel_to_video(self.chan2, self.video1) url = reverse('embed', kwargs={'channel_id': self.chan2.channel_id, 'video_id': self.video1.video_id}) response = client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # We check against the string given that it must be the word 'True' for it to work # and not something that evaluates to True self.assertTrue(str(response.context['autoplay']) == 'True') def test_channel_autoplay_with_video_autoplay_no_must_not_autoplay(self): """ The channel as autoplay, the video has autoplay as no. The video must not autoplay """ client = Client() add_channel_to_video(self.chan2, self.video5) url = reverse('embed', kwargs={'channel_id': self.chan2.channel_id, 'video_id': self.video5.video_id}) response = client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate autoplay in response self.assertTrue(response.context['autoplay'] == '') def test_channel_autoplay_with_video_autoplay_yes_must_autoplay(self): """ The channel as autoplay, the video has autoplay activated. The video must not autoplay """ client = Client() add_channel_to_video(self.chan2, self.video6) url = reverse('embed', kwargs={'channel_id': self.chan2.channel_id, 'video_id': self.video6.video_id}) response = client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # We check against the string given that it must be the word 'True' for it to work # and not something that evaluates to True self.assertTrue(str(response.context['autoplay']) == 'True') def test_channel_no_autoplay_with_video_autoplay_channel_must_not_autoplay(self): """ The channel has no autoplay, the video has autoplay as channel. The video must not autoplay """ client = Client() url = reverse('embed', kwargs={'channel_id': self.chan1.channel_id, 'video_id': self.video1.video_id}) response = client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate autoplay in response self.assertTrue(response.context['autoplay'] == '') def test_channel_no_autoplay_with_video_autoplay_no_must_not_autoplay(self): """ The channel has no autoplay, the video has autoplay no autoplay. The video must not autoplay """ client = Client() url = reverse('embed', kwargs={'channel_id': self.chan1.channel_id, 'video_id': self.video5.video_id}) response = client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate autoplay in response self.assertTrue(response.context['autoplay'] == '') def test_channel_no_autoplay_with_video_autoplay_yes_must_autoplay(self): """ The channel has no autoplay, the video has autoplay activated. The video must autoplay """ client = Client() url = reverse('embed', kwargs={'channel_id': self.chan1.channel_id, 'video_id': self.video6.video_id}) response = client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # We check against the string given that it must be the word 'True' for it to work # and not something that evaluates to True self.assertTrue(str(response.context['autoplay']) == 'True') # </editor-fold> # <editor-fold desc="Available Content Test" def test_error_message_video_state_waiting_file(self): video = \ create_videos('Video', self.user1, self.org1, 1, Media.State.WAITING_FILE, None, None, False)[0] add_channel_to_video(self.chan1, video) url = reverse('embed', kwargs={'channel_id': self.chan1.channel_id, 'video_id': video.video_id}) response = self.client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate error in response self.assertTrue(response.context['error']) # Validate specific error message self.assertEqual(UNAVAILABLE_MESSAGE, response.context['message']) def test_error_message_video_state_queued(self): video = \ create_videos('Video', self.user1, self.org1, 1, Media.State.QUEUED, None, None, False)[0] add_channel_to_video(self.chan1, video) url = reverse('embed', kwargs={'channel_id': self.chan1.channel_id, 'video_id': video.video_id}) response = self.client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate error in response self.assertTrue(response.context['error']) # Validate specific error message self.assertEqual(UNAVAILABLE_MESSAGE, response.context['message']) def test_error_message_video_state_queued_failed(self): video = \ create_videos('Video', self.user1, self.org1, 1, Media.State.QUEUING_FAILED, None, None, False)[0] add_channel_to_video(self.chan1, video) url = reverse('embed', kwargs={'channel_id': self.chan1.channel_id, 'video_id': video.video_id}) response = self.client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate error in response self.assertTrue(response.context['error']) # Validate specific error message self.assertEqual(UNAVAILABLE_MESSAGE, response.context['message']) def test_error_message_video_state_processing(self): video = \ create_videos('Video', self.user1, self.org1, 1, Media.State.PROCESSING, None, None, False)[0] add_channel_to_video(self.chan1, video) url = reverse('embed', kwargs={'channel_id': self.chan1.channel_id, 'video_id': video.video_id}) response = self.client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate error in response self.assertTrue(response.context['error']) # Validate specific error message self.assertEqual(UNAVAILABLE_MESSAGE, response.context['message']) def test_error_message_video_state_processing_failed(self): video = \ create_videos('Video', self.user1, self.org1, 1, Media.State.PROCESSING_FAILED, None, None, False)[0] add_channel_to_video(self.chan1, video) url = reverse('embed', kwargs={'channel_id': self.chan1.channel_id, 'video_id': video.video_id}) response = self.client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate error in response self.assertTrue(response.context['error']) # Validate specific error message self.assertEqual(UNAVAILABLE_MESSAGE, response.context['message']) def test_error_message_video_state_not_finished(self): video = \ create_videos('Video', self.user1, self.org1, 1, Media.State.NOT_FINISHED, None, None, False)[0] add_channel_to_video(self.chan1, video) url = reverse('embed', kwargs={'channel_id': self.chan1.channel_id, 'video_id': video.video_id}) response = self.client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate error in response self.assertTrue(response.context['error']) # Validate specific error message self.assertEqual(UNAVAILABLE_MESSAGE, response.context['message']) def test_error_message_video_state_failed(self): video = \ create_videos('Video', self.user1, self.org1, 1, Media.State.FAILED, None, None, False)[0] add_channel_to_video(self.chan1, video) url = reverse('embed', kwargs={'channel_id': self.chan1.channel_id, 'video_id': video.video_id}) response = self.client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate error in response self.assertTrue(response.context['error']) # Validate specific error message self.assertEqual(UNAVAILABLE_MESSAGE, response.context['message']) def test_error_message_video_with_disabled_org(self): video = \ create_videos('Video', self.user1, self.org1, 1, Media.State.FAILED, None, None, False)[0] add_channel_to_video(self.chan1, video) self.org1.upload_enable = False self.org1.save() url = reverse('embed', kwargs={'channel_id': self.chan1.channel_id, 'video_id': video.video_id}) response = self.client.get(url) # Validate status code self.assertEquals(status.HTTP_200_OK, response.status_code) # Validate error in response self.assertTrue(response.context['error']) # Validate specific error message self.assertEqual(UNAVAILABLE_MESSAGE, response.context['message']) self.org1.upload_enable = True self.org1.save() def test_error_message_live_video_state_starting(self): live = \
create_live_videos('Live', self.user1, self.org1, 1, LiveVideo.State.STARTING, None,
8
2023-10-17 19:44:32+00:00
12k
Qualcomm-AI-research/geometric-algebra-transformer
gatr/nets/axial_gatr.py
[ { "identifier": "SelfAttentionConfig", "path": "gatr/layers/attention/config.py", "snippet": "class SelfAttentionConfig:\n \"\"\"Configuration for attention.\n\n Parameters\n ----------\n in_mv_channels : int\n Number of input multivector channels.\n out_mv_channels : int\n Number of output multivector channels.\n num_heads : int\n Number of attention heads.\n in_s_channels : int\n Input scalar channels. If None, no scalars are expected nor returned.\n out_s_channels : int\n Output scalar channels. If None, no scalars are expected nor returned.\n additional_qk_mv_channels : int\n Whether additional multivector features for the keys and queries will be provided.\n additional_qk_s_channels : int\n Whether additional scalar features for the keys and queries will be provided.\n normalizer : str\n Normalizer function to use in sdp_dist attention\n normalizer_eps : float\n Small umerical constant for stability in the normalizer in sdp_dist attention\n multi_query: bool\n Whether to do multi-query attention\n attention_type : {\"scalar\", \"geometric\", \"sdp_dist\"}\n Whether the attention mechanism is based on the scalar product or also the join.\n pos_encoding : bool\n Whether to apply rotary positional embeddings along the item dimension to the scalar keys\n and queries.\n pos_enc_base : int\n Base for the frequencies in the positional encoding.\n output_init : str\n Initialization scheme for final linear layer\n increase_hidden_channels : int\n Factor by which to increase the number of hidden channels (both multivectors and scalars)\n dropout_prob : float or None\n Dropout probability\n \"\"\"\n\n multi_query: bool = True\n in_mv_channels: Optional[int] = None\n out_mv_channels: Optional[int] = None\n in_s_channels: Optional[int] = None\n out_s_channels: Optional[int] = None\n num_heads: int = 8\n additional_qk_mv_channels: int = 0\n additional_qk_s_channels: int = 0\n normalizer_eps: Optional[float] = 1e-3\n pos_encoding: bool = False\n pos_enc_base: int = 4096\n output_init: str = \"default\"\n checkpoint: bool = True\n increase_hidden_channels: int = 2\n dropout_prob: Optional[float] = None\n\n def __post_init__(self):\n \"\"\"Type checking / conversion.\"\"\"\n if isinstance(self.dropout_prob, str) and self.dropout_prob.lower() in [\"null\", \"none\"]:\n self.dropout_prob = None\n\n @property\n def hidden_mv_channels(self) -> Optional[int]:\n \"\"\"Returns the number of hidden multivector channels.\"\"\"\n\n if self.in_mv_channels is None:\n return None\n\n return max(self.increase_hidden_channels * self.in_mv_channels // self.num_heads, 1)\n\n @property\n def hidden_s_channels(self) -> Optional[int]:\n \"\"\"Returns the number of hidden scalar channels.\"\"\"\n\n if self.in_s_channels is None:\n return None\n\n hidden_s_channels = max(\n self.increase_hidden_channels * self.in_s_channels // self.num_heads, 4\n )\n\n # When using positional encoding, the number of scalar hidden channels needs to be even.\n # It also should not be too small.\n if self.pos_encoding:\n hidden_s_channels = (hidden_s_channels + 1) // 2 * 2\n hidden_s_channels = max(hidden_s_channels, 8)\n\n return hidden_s_channels\n\n @classmethod\n def cast(cls, config: Any) -> SelfAttentionConfig:\n \"\"\"Casts an object as SelfAttentionConfig.\"\"\"\n if isinstance(config, SelfAttentionConfig):\n return config\n if isinstance(config, Mapping):\n return cls(**config)\n raise ValueError(f\"Can not cast {config} to {cls}\")" }, { "identifier": "GATrBlock", "path": "gatr/layers/gatr_block.py", "snippet": "class GATrBlock(nn.Module):\n \"\"\"Equivariant transformer block for GATr.\n\n This is the biggest building block of GATr.\n\n Inputs are first processed by a block consisting of LayerNorm, multi-head geometric\n self-attention, and a residual connection. Then the data is processed by a block consisting of\n another LayerNorm, an item-wise two-layer geometric MLP with GeLU activations, and another\n residual connection.\n\n Parameters\n ----------\n mv_channels : int\n Number of input and output multivector channels\n s_channels: int\n Number of input and output scalar channels\n attention: SelfAttentionConfig\n Attention configuration\n mlp: MLPConfig\n MLP configuration\n dropout_prob : float or None\n Dropout probability\n \"\"\"\n\n def __init__(\n self,\n mv_channels: int,\n s_channels: int,\n attention: SelfAttentionConfig,\n mlp: MLPConfig,\n dropout_prob: Optional[float] = None,\n ) -> None:\n super().__init__()\n\n # Normalization layer (stateless, so we can use the same layer for both normalization\n # instances)\n self.norm = EquiLayerNorm()\n\n # Self-attention layer\n attention = replace(\n attention,\n in_mv_channels=mv_channels,\n out_mv_channels=mv_channels,\n in_s_channels=s_channels,\n out_s_channels=s_channels,\n output_init=\"small\",\n dropout_prob=dropout_prob,\n )\n self.attention = SelfAttention(attention)\n\n # MLP block\n mlp = replace(\n mlp,\n mv_channels=(mv_channels, 2 * mv_channels, mv_channels),\n s_channels=(s_channels, 2 * s_channels, s_channels),\n dropout_prob=dropout_prob,\n )\n self.mlp = GeoMLP(mlp)\n\n def forward(\n self,\n multivectors: torch.Tensor,\n scalars: torch.Tensor,\n reference_mv=None,\n additional_qk_features_mv=None,\n additional_qk_features_s=None,\n attention_mask=None,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Forward pass of the transformer block.\n\n Inputs are first processed by a block consisting of LayerNorm, multi-head geometric\n self-attention, and a residual connection. Then the data is processed by a block consisting\n of another LayerNorm, an item-wise two-layer geometric MLP with GeLU activations, and\n another residual connection.\n\n Parameters\n ----------\n multivectors : torch.Tensor with shape (..., items, channels, 16)\n Input multivectors.\n scalars : torch.Tensor with shape (..., s_channels)\n Input scalars.\n reference_mv : torch.Tensor with shape (..., 16) or None\n Reference multivector for the equivariant join operation in the MLP.\n additional_qk_features_mv : None or torch.Tensor with shape\n (..., num_items, add_qk_mv_channels, 16)\n Additional Q/K features, multivector part.\n additional_qk_features_s : None or torch.Tensor with shape\n (..., num_items, add_qk_mv_channels, 16)\n Additional Q/K features, scalar part.\n attention_mask: None or torch.Tensor or AttentionBias\n Optional attention mask.\n\n Returns\n -------\n outputs_mv : torch.Tensor with shape (..., items, channels, 16).\n Output multivectors\n output_scalars : torch.Tensor with shape (..., s_channels)\n Output scalars\n \"\"\"\n\n # Attention block: layer norm\n h_mv, h_s = self.norm(multivectors, scalars=scalars)\n\n # Attention block: self attention\n h_mv, h_s = self.attention(\n h_mv,\n scalars=h_s,\n additional_qk_features_mv=additional_qk_features_mv,\n additional_qk_features_s=additional_qk_features_s,\n attention_mask=attention_mask,\n )\n\n # Attention block: skip connection\n outputs_mv = multivectors + h_mv\n outputs_s = scalars + h_s\n\n # MLP block: layer norm\n h_mv, h_s = self.norm(outputs_mv, scalars=outputs_s)\n\n # MLP block: MLP\n h_mv, h_s = self.mlp(h_mv, scalars=h_s, reference_mv=reference_mv)\n\n # MLP block: skip connection\n outputs_mv = outputs_mv + h_mv\n outputs_s = outputs_s + h_s\n\n return outputs_mv, outputs_s" }, { "identifier": "EquiLinear", "path": "gatr/layers/linear.py", "snippet": "class EquiLinear(nn.Module):\n \"\"\"Pin-equivariant linear layer.\n\n The forward pass maps multivector inputs with shape (..., in_channels, 16) to multivector\n outputs with shape (..., out_channels, 16) as\n\n ```\n outputs[..., j, y] = sum_{i, b, x} weights[j, i, b] basis_map[b, x, y] inputs[..., i, x]\n ```\n\n plus an optional bias term for outputs[..., :, 0] (biases in other multivector components would\n break equivariance).\n\n Here basis_map are precomputed (see gatr.primitives.linear) and weights are the\n learnable weights of this layer.\n\n If there are auxiliary input scalars, they transform under a linear layer, and mix with the\n scalar components the multivector data. Note that in this layer (and only here) the auxiliary\n scalars are optional.\n\n This layer supports four initialization schemes:\n - \"default\": preserves (or actually slightly reducing) the variance of the data in\n the forward pass\n - \"small\": variance of outputs is approximately one order of magnitude smaller\n than for \"default\"\n - \"unit_scalar\": outputs will be close to (1, 0, 0, ..., 0)\n - \"almost_unit_scalar\": similar to \"unit_scalar\", but with more stochasticity\n\n Parameters\n ----------\n in_mv_channels : int\n Input multivector channels\n out_mv_channels : int\n Output multivector channels\n bias : bool\n Whether a bias term is added to the scalar component of the multivector outputs\n in_s_channels : int or None\n Input scalar channels. If None, no scalars are expected nor returned.\n out_s_channels : int or None\n Output scalar channels. If None, no scalars are expected nor returned.\n initialization : {\"default\", \"small\", \"unit_scalar\", \"almost_unit_scalar\"}\n Initialization scheme. For \"default\", initialize with the same philosophy as most\n networks do: preserve variance (approximately) in the forward pass. For \"small\",\n initalize the network such that the variance of the output data is approximately one\n order of magnitude smaller than that of the input data. For \"unit_scalar\", initialize\n the layer such that the output multivectors will be closer to (1, 0, 0, ..., 0).\n \"almost_unit_scalar\" is similar, but with more randomness.\n \"\"\"\n\n def __init__(\n self,\n in_mv_channels: int,\n out_mv_channels: int,\n in_s_channels: Optional[int] = None,\n out_s_channels: Optional[int] = None,\n bias: bool = True,\n initialization: str = \"default\",\n ) -> None:\n super().__init__()\n\n # Check inputs\n if initialization == \"unit_scalar\":\n assert bias, \"unit_scalar initialization requires bias\"\n if in_s_channels is None:\n raise NotImplementedError(\n \"unit_scalar initialization is currently only implemented for scalar inputs\"\n )\n\n self._in_mv_channels = in_mv_channels\n\n # MV -> MV\n self.weight = nn.Parameter(\n torch.empty((out_mv_channels, in_mv_channels, NUM_PIN_LINEAR_BASIS_ELEMENTS))\n )\n\n # We only need a separate bias here if that isn't already covered by the linear map from\n # scalar inputs\n self.bias = (\n nn.Parameter(torch.zeros((out_mv_channels, 1)))\n if bias and in_s_channels is None\n else None\n )\n\n # Scalars -> MV scalars\n self.s2mvs: Optional[nn.Linear]\n if in_s_channels:\n self.s2mvs = nn.Linear(in_s_channels, out_mv_channels, bias=bias)\n else:\n self.s2mvs = None\n\n # MV scalars -> scalars\n if out_s_channels:\n self.mvs2s = nn.Linear(in_mv_channels, out_s_channels, bias=bias)\n else:\n self.mvs2s = None\n\n # Scalars -> scalars\n if in_s_channels is not None and out_s_channels is not None:\n self.s2s = nn.Linear(\n in_s_channels, out_s_channels, bias=False\n ) # Bias would be duplicate\n else:\n self.s2s = None\n\n # Initialization\n self.reset_parameters(initialization)\n\n def forward(\n self, multivectors: torch.Tensor, scalars: Optional[torch.Tensor] = None\n ) -> Tuple[torch.Tensor, Union[torch.Tensor, None]]:\n \"\"\"Maps input multivectors and scalars using the most general equivariant linear map.\n\n The result is again multivectors and scalars.\n\n For multivectors we have:\n ```\n outputs[..., j, y] = sum_{i, b, x} weights[j, i, b] basis_map[b, x, y] inputs[..., i, x]\n = sum_i linear(inputs[..., i, :], weights[j, i, :])\n ```\n\n Here basis_map are precomputed (see gatr.primitives.linear) and weights are the\n learnable weights of this layer.\n\n Parameters\n ----------\n multivectors : torch.Tensor with shape (..., in_mv_channels, 16)\n Input multivectors\n scalars : None or torch.Tensor with shape (..., in_s_channels)\n Optional input scalars\n\n Returns\n -------\n outputs_mv : torch.Tensor with shape (..., out_mv_channels, 16)\n Output multivectors\n outputs_s : None or torch.Tensor with shape (..., out_s_channels)\n Output scalars, if scalars are provided. Otherwise None.\n \"\"\"\n\n outputs_mv = equi_linear(multivectors, self.weight) # (..., out_channels, 16)\n\n if self.bias is not None:\n bias = embed_scalar(self.bias)\n outputs_mv = outputs_mv + bias\n\n if self.s2mvs is not None and scalars is not None:\n outputs_mv[..., 0] += self.s2mvs(scalars)\n\n if self.mvs2s is not None:\n outputs_s = self.mvs2s(multivectors[..., 0])\n if self.s2s is not None and scalars is not None:\n outputs_s = outputs_s + self.s2s(scalars)\n else:\n outputs_s = None\n\n return outputs_mv, outputs_s\n\n def reset_parameters(\n self,\n initialization: str,\n gain: float = 1.0,\n additional_factor=1.0 / np.sqrt(3.0),\n use_mv_heuristics=True,\n ) -> None:\n \"\"\"Initializes the weights of the layer.\n\n Parameters\n ----------\n initialization : {\"default\", \"small\", \"unit_scalar\", \"almost_unit_scalar\"}\n Initialization scheme. For \"default\", initialize with the same philosophy as most\n networks do: preserve variance (approximately) in the forward pass. For \"small\",\n initalize the network such that the variance of the output data is approximately one\n order of magnitude smaller than that of the input data. For \"unit_scalar\", initialize\n the layer such that the output multivectors will be closer to (1, 0, 0, ..., 0).\n \"almost_unit_scalar\" is similar, but with more randomness.\n gain : float\n Gain factor for the activations. Should be 1.0 if previous layer has no activation,\n sqrt(2) if it has a ReLU activation, and so on. Can be computed with\n `torch.nn.init.calculate_gain()`.\n additional_factor : float\n Empirically, it has been found that slightly *decreasing* the data variance at each\n layer gives a better performance. In particular, the PyTorch default initialization uses\n an additional factor of 1/sqrt(3) (cancelling the factor of sqrt(3) that naturally\n arises when computing the bounds of a uniform initialization). A discussion of this was\n (to the best of our knowledge) never published, but see\n https://github.com/pytorch/pytorch/issues/57109 and\n https://soumith.ch/files/20141213_gplus_nninit_discussion.htm.\n use_mv_heuristics : bool\n Multivector components are differently affected by the equivariance constraint. If\n `use_mv_heuristics` is set to True, we initialize the weights for each output\n multivector component differently, with factors determined empirically to preserve the\n variance of each multivector component in the forward pass.\n \"\"\"\n\n # Prefactors depending on initialization scheme\n mv_component_factors, mv_factor, mvs_bias_shift, s_factor = self._compute_init_factors(\n initialization, gain, additional_factor, use_mv_heuristics\n )\n\n # Following He et al, 1502.01852, we aim to preserve the variance in the forward pass.\n # A sufficient criterion for this is that the variance of the weights is given by\n # `Var[w] = gain^2 / fan`.\n # Here `gain^2` is 2 if the previous layer has a ReLU nonlinearity, 1 for the initial layer,\n # and some other value in other situations (we may not care about this too much).\n # More importantly, `fan` is the number of connections: the number of input elements that\n # get summed over to compute each output element.\n\n # Let us fist consider the multivector outputs.\n self._init_multivectors(mv_component_factors, mv_factor, mvs_bias_shift)\n\n # Then let's consider the maps to scalars.\n self._init_scalars(s_factor)\n\n @staticmethod\n def _compute_init_factors(initialization, gain, additional_factor, use_mv_heuristics):\n \"\"\"Computes prefactors for the initialization.\n\n See self.reset_parameters().\n \"\"\"\n\n if initialization not in {\"default\", \"small\", \"unit_scalar\", \"almost_unit_scalar\"}:\n raise ValueError(f\"Unknown initialization scheme {initialization}\")\n\n if initialization == \"default\":\n mv_factor = gain * additional_factor * np.sqrt(3)\n s_factor = gain * additional_factor * np.sqrt(3)\n mvs_bias_shift = 0.0\n elif initialization == \"small\":\n # Change scale by a factor of 0.3 in this layer\n mv_factor = 0.1 * gain * additional_factor * np.sqrt(3)\n s_factor = 0.1 * gain * additional_factor * np.sqrt(3)\n mvs_bias_shift = 0.0\n elif initialization == \"unit_scalar\":\n # Change scale by a factor of 0.3 for MV outputs, and initialize bias around 1\n mv_factor = 0.1 * gain * additional_factor * np.sqrt(3)\n s_factor = gain * additional_factor * np.sqrt(3)\n mvs_bias_shift = 1.0\n elif initialization == \"almost_unit_scalar\":\n # Change scale by a factor of 0.3 for MV outputs, and initialize bias around 1\n mv_factor = 0.5 * gain * additional_factor * np.sqrt(3)\n s_factor = gain * additional_factor * np.sqrt(3)\n mvs_bias_shift = 1.0\n else:\n raise ValueError(\n f\"Unknown initialization scheme {initialization}, expected\"\n ' \"default\", \"small\", or \"unit_scalar\".'\n )\n\n # Individual factors for each multivector component\n if use_mv_heuristics:\n # Without corrections, the variance of standard normal inputs after a forward pass\n # through this layer is different for each output grade. The reason is that the\n # equivariance constraints affect different grades differently.\n # We heuristically correct for this by initializing the weights for different basis\n # elements differently, using the following additional factors on the weight bound:\n # mv_component_factors = torch.sqrt(torch.Tensor([0.5, 4.0, 6.0, 4.0, 1.0, 0.5, 0.5]))\n mv_component_factors = torch.sqrt(\n torch.Tensor([1.0, 4.0, 6.0, 2.0, 0.5, 0.5, 1.5, 1.5, 0.5])\n )\n else:\n mv_component_factors = torch.ones(NUM_PIN_LINEAR_BASIS_ELEMENTS)\n return mv_component_factors, mv_factor, mvs_bias_shift, s_factor\n\n def _init_multivectors(self, mv_component_factors, mv_factor, mvs_bias_shift):\n \"\"\"Weight initialization for maps to multivector outputs.\"\"\"\n\n # We have\n # `outputs[..., j, y] = sum_{i, b, x} weights[j, i, b] basis_map[b, x, y] inputs[..., i, x]`\n # The basis maps are more or less grade projections, summing over all basis elements\n # corresponds to (almost) an identity map in the GA space. The sum over `b` and `x` thus\n # does not contribute to `fan` substantially. (We may add a small ad-hoc factor later to\n # make up for this approximation.) However, there is still the sum over incoming channels,\n # and thus `fan ~ mv_in_channels`. Assuming (for now) that the previous layer contained a\n # ReLU activation, we finally have the condition `Var[w] = 2 / mv_in_channels`.\n # Since the variance of a uniform distribution between -a and a is given by\n # `Var[Uniform(-a, a)] = a^2/3`, we should set `a = gain * sqrt(3 / mv_in_channels)`.\n # In theory (see docstring).\n fan_in = self._in_mv_channels\n bound = mv_factor / np.sqrt(fan_in)\n for i, factor in enumerate(mv_component_factors):\n nn.init.uniform_(self.weight[..., i], a=-factor * bound, b=factor * bound)\n\n # Now let's focus on the scalar components of the multivector outputs.\n # If there are only multivector inputs, all is good. But if scalar inputs contribute them as\n # well, they contribute to the output variance as well.\n # In this case, we initialize such that the multivector inputs and the scalar inputs each\n # contribute half to the output variance.\n # We can achieve this by inspecting the basis maps and seeing that only basis element 0\n # contributes to the scalar output. Thus, we can reduce the variance of the correponding\n # weights to give a variance of 0.5, not 1.\n if self.s2mvs is not None:\n bound = mv_component_factors[0] * mv_factor / np.sqrt(fan_in) / np.sqrt(2)\n nn.init.uniform_(self.weight[..., [0]], a=-bound, b=bound)\n\n # The same holds for the scalar-to-MV map, where we also just want a variance of 0.5.\n if self.s2mvs is not None:\n fan_in, _ = nn.init._calculate_fan_in_and_fan_out(\n self.s2mvs.weight\n ) # pylint:disable=protected-access\n fan_in = max(fan_in, 1) # Since in theory we could have 0-channel scalar \"data\"\n bound = mv_component_factors[0] * mv_factor / np.sqrt(fan_in) / np.sqrt(2)\n nn.init.uniform_(self.s2mvs.weight, a=-bound, b=bound)\n\n # Bias needs to be adapted, as the overall fan in is different (need to account for MV\n # and s inputs) and we may need to account for the unit_scalar initialization scheme\n if self.s2mvs.bias is not None:\n fan_in = (\n nn.init._calculate_fan_in_and_fan_out(self.s2mvs.weight)[0]\n + self._in_mv_channels\n )\n bound = mv_component_factors[0] / np.sqrt(fan_in) if fan_in > 0 else 0\n nn.init.uniform_(self.s2mvs.bias, mvs_bias_shift - bound, mvs_bias_shift + bound)\n\n def _init_scalars(self, s_factor):\n \"\"\"Weight initialization for maps to multivector outputs.\"\"\"\n\n # If both exist, we need to account for overcounting again, and assign each a target a\n # variance of 0.5.\n models = []\n if self.s2s:\n models.append(self.s2s)\n if self.mvs2s:\n models.append(self.mvs2s)\n for model in models:\n fan_in, _ = nn.init._calculate_fan_in_and_fan_out(\n model.weight\n ) # pylint:disable=protected-access\n fan_in = max(fan_in, 1) # Since in theory we could have 0-channel scalar \"data\"\n bound = s_factor / np.sqrt(fan_in) / np.sqrt(len(models))\n nn.init.uniform_(model.weight, a=-bound, b=bound)\n # Bias needs to be adapted, as the overall fan in is different (need to account for MV and\n # s inputs)\n if self.mvs2s and self.mvs2s.bias is not None:\n fan_in = nn.init._calculate_fan_in_and_fan_out(self.mvs2s.weight)[\n 0\n ] # pylint:disable=protected-access\n if self.s2s:\n fan_in += nn.init._calculate_fan_in_and_fan_out(self.s2s.weight)[\n 0\n ] # pylint:disable=protected-access\n bound = s_factor / np.sqrt(fan_in) if fan_in > 0 else 0\n nn.init.uniform_(self.mvs2s.bias, -bound, bound)" }, { "identifier": "MLPConfig", "path": "gatr/layers/mlp/config.py", "snippet": "class MLPConfig:\n \"\"\"Geometric MLP configuration.\n\n Parameters\n ----------\n mv_channels : iterable of int\n Number of multivector channels at each layer, from input to output\n s_channels : None or iterable of int\n If not None, sets the number of scalar channels at each layer, from input to output. Length\n needs to match mv_channels\n activation : {\"relu\", \"sigmoid\", \"gelu\"}\n Which (gated) activation function to use\n dropout_prob : float or None\n Dropout probability\n \"\"\"\n\n mv_channels: Optional[List[int]] = None\n s_channels: Optional[List[int]] = None\n activation: str = \"gelu\"\n dropout_prob: Optional[float] = None\n\n def __post_init__(self):\n \"\"\"Type checking / conversion.\"\"\"\n if isinstance(self.dropout_prob, str) and self.dropout_prob.lower() in [\"null\", \"none\"]:\n self.dropout_prob = None\n\n @classmethod\n def cast(cls, config: Any) -> MLPConfig:\n \"\"\"Casts an object as MLPConfig.\"\"\"\n if isinstance(config, MLPConfig):\n return config\n if isinstance(config, Mapping):\n return cls(**config)\n raise ValueError(f\"Can not cast {config} to {cls}\")" } ]
from dataclasses import replace from typing import Optional, Tuple, Union from einops import rearrange from torch import nn from torch.utils.checkpoint import checkpoint from gatr.layers.attention.config import SelfAttentionConfig from gatr.layers.gatr_block import GATrBlock from gatr.layers.linear import EquiLinear from gatr.layers.mlp.config import MLPConfig import torch
7,273
# Copyright (c) 2023 Qualcomm Technologies, Inc. # All rights reserved. # Default rearrange patterns _MV_REARRANGE_PATTERN = "... i j c x -> ... j i c x" _S_REARRANGE_PATTERN = "... i j c -> ... j i c" class AxialGATr(nn.Module): # pylint: disable=duplicate-code """Axial GATr network for two token dimensions. This, together with gatr.nets.gatr.GATr, is the main architecture proposed in our paper. It combines `num_blocks` GATr transformer blocks, each consisting of geometric self-attention layers, a geometric MLP, residual connections, and normalization layers. In addition, there are initial and final equivariant linear layers. Assumes input data with shape `(..., num_items_1, num_items_2, num_channels, 16)`. The first, third, fifth, ... block computes attention over the `items_2` axis. The other blocks compute attention over the `items_1` axis. Positional encoding can be specified separately for both axes. Parameters ---------- in_mv_channels : int Number of input multivector channels. out_mv_channels : int Number of output multivector channels. hidden_mv_channels : int Number of hidden multivector channels. in_s_channels : None or int If not None, sets the number of scalar input channels. out_s_channels : None or int If not None, sets the number of scalar output channels. hidden_s_channels : None or int If not None, sets the number of scalar hidden channels. attention: Dict Data for SelfAttentionConfig mlp: Dict Data for MLPConfig num_blocks : int Number of transformer blocks. pos_encodings : tuple of bool Whether to apply rotary positional embeddings along the item dimensions to the scalar keys and queries. The first element in the tuple determines whether positional embeddings are applied to the first item dimension, the second element the same for the second item dimension. collapse_dims_for_odd_blocks : bool Whether the batch dimensions will be collapsed in odd blocks (to support xformers block attention) """ def __init__( self, in_mv_channels: int, out_mv_channels: int, hidden_mv_channels: int, in_s_channels: Optional[int], out_s_channels: Optional[int], hidden_s_channels: Optional[int], attention: SelfAttentionConfig,
# Copyright (c) 2023 Qualcomm Technologies, Inc. # All rights reserved. # Default rearrange patterns _MV_REARRANGE_PATTERN = "... i j c x -> ... j i c x" _S_REARRANGE_PATTERN = "... i j c -> ... j i c" class AxialGATr(nn.Module): # pylint: disable=duplicate-code """Axial GATr network for two token dimensions. This, together with gatr.nets.gatr.GATr, is the main architecture proposed in our paper. It combines `num_blocks` GATr transformer blocks, each consisting of geometric self-attention layers, a geometric MLP, residual connections, and normalization layers. In addition, there are initial and final equivariant linear layers. Assumes input data with shape `(..., num_items_1, num_items_2, num_channels, 16)`. The first, third, fifth, ... block computes attention over the `items_2` axis. The other blocks compute attention over the `items_1` axis. Positional encoding can be specified separately for both axes. Parameters ---------- in_mv_channels : int Number of input multivector channels. out_mv_channels : int Number of output multivector channels. hidden_mv_channels : int Number of hidden multivector channels. in_s_channels : None or int If not None, sets the number of scalar input channels. out_s_channels : None or int If not None, sets the number of scalar output channels. hidden_s_channels : None or int If not None, sets the number of scalar hidden channels. attention: Dict Data for SelfAttentionConfig mlp: Dict Data for MLPConfig num_blocks : int Number of transformer blocks. pos_encodings : tuple of bool Whether to apply rotary positional embeddings along the item dimensions to the scalar keys and queries. The first element in the tuple determines whether positional embeddings are applied to the first item dimension, the second element the same for the second item dimension. collapse_dims_for_odd_blocks : bool Whether the batch dimensions will be collapsed in odd blocks (to support xformers block attention) """ def __init__( self, in_mv_channels: int, out_mv_channels: int, hidden_mv_channels: int, in_s_channels: Optional[int], out_s_channels: Optional[int], hidden_s_channels: Optional[int], attention: SelfAttentionConfig,
mlp: MLPConfig,
3
2023-10-23 15:58:36+00:00
12k
tomguluson92/cloth2tex
phase1_inference.py
[ { "identifier": "ClothRenderer", "path": "renderer/cloth_renderer.py", "snippet": "class ClothRenderer(object):\n \n def __init__(self, objfile, resolution=512, focal_distance=1.6, scale_factor=1):\n self.device = torch.device(\"cuda:0\")\n\n self.img_size = resolution\n self.render_size = resolution\n self.renderer, self.renderer_silhouette = self.__get_renderer(self.render_size, focal_distance)\n \n print(\"[Cloth2Tex]\", objfile)\n obj_filename = os.path.join(objfile)\n verts, faces, aux = load_obj(\n obj_filename,\n device=self.device,\n load_textures=True)\n self.faces = faces.verts_idx\n self.verts = verts\n self.aux = aux\n \n self.verts = self.normalize_vertex(verts.clone()) * scale_factor\n \n self.center = verts.mean(0)\n self.scale = max((verts - self.center).abs().max(0)[0])\n self.landmark_cam = OrthogonalCamera(rotation=self.cameras.R.cuda(), translation=self.cameras.T.cuda()).to(self.device)\n \n _keys = []\n if len(aux.texture_images.keys()) > 0:\n for _ in aux.texture_images.keys():\n _keys.append(_)\n self.tex_lst = [aux.texture_images[i] for i in _keys]\n texture_image = self.tex_lst[0]\n \n \n self.verts_uvs = aux.verts_uvs[None, ...] # (1, V, 2)\n faces_uvs = faces.textures_idx[None, ...] # (1, F, 3)\n tex_maps = aux.texture_images\n\n # Canonical Mesh\n texture_image = texture_image[None, ...].to(self.device) # (1, H, W, 3)\n self.texture = TexturesUV(maps=texture_image, faces_uvs=self.faces[None], verts_uvs=self.verts_uvs)\n self.canonical_mesh = Meshes([self.verts], [self.faces], self.texture)\n \n def normalize_vertex(self, verts):\n # Normalizing\n N = verts.shape[0]\n center = verts.mean(0)\n scale = max((verts - center).abs().max(0)[0])\n \n verts = verts - center\n verts = verts * (1/float(scale))\n \n return verts\n \n def denormalize_vertex(self, verts):\n \n out = self.scale*verts + self.center\n \n return out\n \n def render_silhouette(self, verts, side='back', landmark=True, vertex_number=[[], []]):\n vert_lst_front = vertex_number[0]\n vert_lst_back = vertex_number[1]\n \n tmp_verts = verts.clone()\n mesh = Meshes([tmp_verts], [self.faces], self.texture)\n meshes = mesh.extend(2)\n \n # Get a batch(2) of viewing angles. \n elev = torch.linspace(180, -180, 2)\n azim = torch.linspace(0, 0, 2)\n \n focal_length = torch.linspace(-1, 1, 2)\n R, T = look_at_view_transform(dist=focal_length, elev=elev, azim=azim)\n cameras = FoVOrthographicCameras(device=self.device, R=R, T=T)\n \n target_images, fragments = self.renderer_silhouette(meshes, cameras=cameras)\n \n if landmark is True:\n # project normalized vertex to image space(fix vertex)\n specific_verts_2d_front = self.landmark_cam(verts[vert_lst_front].unsqueeze(0))[0]\n # conversion from OpenGL coordinate to OpenCV coordinate\n specific_verts_2d_front[:,] = -specific_verts_2d_front[:,]\n # conversion from [-1,1] to [0,512]\n specific_verts_2d_front = (specific_verts_2d_front+1)/2*self.render_size\n \n # project normalized vertex to image space(fix vertex)\n specific_verts_2d_back = self.landmark_cam(verts[vert_lst_back].unsqueeze(0))[0]\n # conversion from OpenGL coordinate to OpenCV coordinate\n specific_verts_2d_back[:,] = -specific_verts_2d_back[:,]\n # conversion from [-1,1] to [0,512]\n specific_verts_2d_back = (specific_verts_2d_back+1)/2*self.render_size\n \n if side == 'front':\n return target_images[0], [specific_verts_2d_front]\n elif side == 'back':\n return target_images[1], [specific_verts_2d_back]\n else:\n return target_images, [specific_verts_2d_front, specific_verts_2d_back]\n \n return target_images, fragments\n \n def render_image(self, texture_image):\n texture = TexturesUV(maps=texture_image, faces_uvs=self.faces[None], verts_uvs=self.verts_uvs)\n \n tmp_verts = self.verts.clone()\n mesh = Meshes([tmp_verts], [self.faces.clone()], texture)\n meshes = mesh.extend(2)\n \n # Get a batch(2) of viewing angles. \n elev = torch.linspace(180, -180, 2)\n azim = torch.linspace(0, 0, 2)\n \n focal_length = torch.linspace(-1, 1, 2)\n R, T = look_at_view_transform(dist=focal_length, elev=elev, azim=azim)\n cameras = FoVOrthographicCameras(device=self.device, R=R, T=T)\n \n target_images = self.renderer(meshes, cameras=cameras)\n target_masks, _ = self.renderer_silhouette(meshes, cameras=cameras)\n \n return target_images, target_masks\n \n \n def __get_renderer(self, render_size, focal_distance=2):\n \n lights = PointLights(device=self.device, location=[[0.0, 0.0, -3.0]],\n ambient_color=((1,1,1),),diffuse_color=((0,0,0),),specular_color=((0,0,0),))\n \n self.focal_distance = focal_distance\n R, T = look_at_view_transform(focal_distance, -180, 0) # 180 -> -180\n cameras = FoVPerspectiveCameras(device=self.device, R=R, T=T) # silhouette only!\n # cameras = FoVOrthographicCameras(device=self.device, R=R, T=T)\n \n self.cameras = cameras\n \n raster_settings = RasterizationSettings(\n image_size=render_size, \n blur_radius=0.0, \n faces_per_pixel=1, \n )\n sigma = 1e-4\n gamma = 1e-4\n blend_params = BlendParams(sigma=sigma, gamma=gamma, background_color=(255, 255, 255))\n \n renderer = MeshRenderer(\n rasterizer=MeshRasterizer(\n cameras=cameras,\n raster_settings=raster_settings\n ),\n shader = SoftPhongShader(\n device=self.device, \n cameras=cameras,\n lights=lights,\n # blend_params=blend_params\n )\n )\n \n # ref: https://github.com/facebookresearch/pytorch3d/issues/470\n sigma = 1e-8\n gamma = 1e-8\n blend_params = BlendParams(sigma=sigma, gamma=gamma, background_color=(0, 0, 0))\n raster_settings = RasterizationSettings(\n image_size=render_size, \n blur_radius=np.log(1. / 1e-8 - 1.)*sigma, # blur_radius=np.log(1. / 1e-8 - 1.)*sigma, \n faces_per_pixel=10, \n bin_size=None, \n max_faces_per_bin=None\n )\n \n renderer_silhouette = MeshRendererWithFragments(\n rasterizer=MeshRasterizer(\n cameras=cameras, \n raster_settings=raster_settings\n ),\n shader=SoftSilhouetteShader(blend_params=blend_params)\n # shader=SoftSilhouetteShader(blend_params=blend_params)\n )\n\n return renderer, renderer_silhouette" }, { "identifier": "extract_ampl_phase", "path": "utils/frequency.py", "snippet": "def extract_ampl_phase(input_img):\n \n fft_img = torch.fft.rfftn(input_img.clone())\n fft_im = torch.stack((fft_img.real, fft_img.imag), -1)\n \n # fft_im: size should be bx3xhxwx2\n fft_amp = fft_im[:,:,:,:,0]**2 + fft_im[:,:,:,:,1]**2\n fft_amp = torch.sqrt(fft_amp) # amplitude\n fft_pha = torch.atan2( fft_im[:,:,:,:,1], fft_im[:,:,:,:,0]) # phase\n return fft_amp, fft_pha" }, { "identifier": "Binarize", "path": "utils/binary_function.py", "snippet": "class Binarize(Function):\n clip_value = 1\n\n @staticmethod\n def forward(ctx, inp):\n ctx.save_for_backward(inp)\n\n output = inp.sign()\n\n return output\n\n @staticmethod\n def backward(ctx, grad_output):\n inp: Tensor = ctx.saved_tensors[0]\n\n clipped = inp.abs() <= Binarize.clip_value\n\n output = torch.zeros(inp.size()).to(grad_output.device)\n output[clipped] = 1\n output[~clipped] = 0\n\n return output * grad_output" }, { "identifier": "TVLoss", "path": "utils/tvl_loss.py", "snippet": "class TVLoss(nn.Module):\n def __init__(self, weight=1):\n super(TVLoss,self).__init__()\n self.TVLoss_weight = weight\n\n def forward(self, x):\n batch_size = x.size()[0]\n h_x = x.size()[2]\n w_x = x.size()[3]\n count_h = self._tensor_size(x[:,:,1:,:])\n count_w = self._tensor_size(x[:,:,:,1:])\n h_tv = torch.pow((x[:,:,1:,:]-x[:,:,:h_x-1,:]),2).sum()\n w_tv = torch.pow((x[:,:,:,1:]-x[:,:,:,:w_x-1]),2).sum()\n \n # 2023.03.29 +2nearest\n # h_tv = torch.pow((x[:,:,1:,:]-x[:,:,:h_x-1,:]),2).sum() + torch.pow((x[:,:,1:,:]-x[:,:,:h_x-1,:])[:, :, ::2, :],2).sum()\n # w_tv = torch.pow((x[:,:,:,1:]-x[:,:,:,:w_x-1]),2).sum() + torch.pow((x[:,:,:,1:]-x[:,:,:,:w_x-1])[:, :, :, ::2],2).sum()\n \n return self.TVLoss_weight*2*(h_tv/count_h+w_tv/count_w)/batch_size\n\n def _tensor_size(self,t):\n return t.size()[1]*t.size()[2]*t.size()[3]" }, { "identifier": "TVMaskLoss", "path": "utils/tvl_loss.py", "snippet": "class TVMaskLoss(nn.Module):\n def __init__(self, weight=1):\n super(TVMaskLoss,self).__init__()\n self.TVMaskLoss_weight = weight\n self.non_idx = None\n\n def forward(self, mask, x):\n if self.non_idx is None:\n non_idx = mask.nonzero()\n self.non_idx = non_idx.split(1, dim=1)\n \n tmp_mask = torch.ones(1,3,512,512).cuda()\n tmp_mask[self.non_idx] = 0 # 排除非UV区域.\n \n batch_size = x.size()[0]\n h_x = x.size()[2]\n w_x = x.size()[3]\n \n x = x * tmp_mask\n \n count_h = self._tensor_size(x[:,:,1:,:])\n count_w = self._tensor_size(x[:,:,:,1:])\n # h_tv = torch.pow((x[:,:,1:,:]-x[:,:,:h_x-1,:]),2).sum()\n # w_tv = torch.pow((x[:,:,:,1:]-x[:,:,:,:w_x-1]),2).sum()\n \n # 2023.03.29 +2nearest\n h_tv = torch.pow((x[:,:,1:,:]-x[:,:,:h_x-1,:]),2).sum() + torch.pow((x[:,:,1:,:]-x[:,:,:h_x-1,:])[:, :, ::2, :],2).sum()\n w_tv = torch.pow((x[:,:,:,1:]-x[:,:,:,:w_x-1]),2).sum() + torch.pow((x[:,:,:,1:]-x[:,:,:,:w_x-1])[:, :, :, ::2],2).sum()\n return self.TVMaskLoss_weight*2*(h_tv/count_h+w_tv/count_w)/batch_size\n\n def _tensor_size(self,t):\n return t.size()[1]*t.size()[2]*t.size()[3]" }, { "identifier": "DeformationGraph", "path": "lib/deformation_graph.py", "snippet": "class DeformationGraph(nn.Module):\n \n def __init__(self, vert_number=9648, radius=0.015, k=9, sampling_strategy='qslim'): \n super().__init__()\n \n self.radius = radius\n self.k = k\n self.max_neigh_num = 40\n self.sampling_strategy = sampling_strategy\n self.one_ring_neigh = []\n self.nodes_idx = None\n self.weights = None\n self.influence_nodes_idx = []\n self.dists = []\n \n self.vert_number = vert_number\n\n def construct_graph(self, category_name, vertices=None, faces=None):\n \n transform_fp = \"transform_{}.pkl\".format(category_name)\n if self.sampling_strategy == 'qslim':\n m = Mesh(v=vertices, f=faces)\n if os.path.exists(transform_fp):\n with open(transform_fp, 'rb') as f:\n tmp = pickle.load(f, encoding='latin1')\n M, A, D = tmp['M'], tmp['A'], tmp['D']\n else:\n M, A, D = generate_transform_matrices(m, [20, 20])\n tmp = {'M': M, 'A': A, 'D': D}\n with open(transform_fp, 'wb') as fp:\n pickle.dump(tmp, fp)\n # import pdb; pdb.set_trace()\n nodes_v = M[1].v\n self.nodes_idx = D[0].nonzero()[1]\n adj_mat = A[1].toarray()\n \n for i in range(adj_mat.shape[0]):\n self.one_ring_neigh.append(adj_mat[i].nonzero()[0].tolist() + [i]*(self.max_neigh_num-len(adj_mat[i].nonzero()[0])))\n self.one_ring_neigh = torch.tensor(self.one_ring_neigh).cuda() \n\n # construct kd tree\n kdtree = KDTree(nodes_v)\n \n for vert in vertices:\n dist, idx = kdtree.query(vert, k=self.k)\n self.dists.append(dist)\n self.influence_nodes_idx.append(idx)\n \n self.weights = -np.log(np.array(self.dists)+eps)\n \n # weights normalization\n self.weights = torch.tensor(self.weights/col(self.weights.sum(1))).cuda()\n self.influence_nodes_idx = torch.tensor(self.influence_nodes_idx).cuda()\n \n def forward(self, vertices, opt_d_rotations, opt_d_translations):\n \n opt_d_rotmat = batch_rodrigues(opt_d_rotations[0]).unsqueeze(0) # 1 * N_c * 3 * 3\n nodes = vertices[self.nodes_idx, ...]\n \n opt_d_rotmat = opt_d_rotmat.cuda()\n opt_d_translations = opt_d_translations.cuda()\n\n influence_nodes_v = nodes[self.influence_nodes_idx.reshape((-1,))]# .reshape((28944(self.k * 9648),3,3))\n opt_d_r = opt_d_rotmat[0, self.influence_nodes_idx.reshape((-1,)), ...]# .reshape((28944,3,3,3)) \n opt_d_t = opt_d_translations[0, self.influence_nodes_idx.reshape((-1,)), ...]# .reshape((28944,3,3))\n \n warpped_vertices = (torch.einsum('bij, bkj->bki', opt_d_r.cuda(), (vertices.repeat_interleave(self.k, dim=0) - influence_nodes_v).unsqueeze(1)).squeeze(1) \\\n + influence_nodes_v + opt_d_t.cuda()).reshape((self.vert_number, self.k, 3)) * (self.weights.unsqueeze(-1))\n warpped_vertices = warpped_vertices.sum(axis=1).float()\n\n diff_term = (nodes + opt_d_translations[0].cuda()).repeat_interleave(self.max_neigh_num, dim=0) - \\\n (nodes[self.one_ring_neigh.reshape((-1,))] + opt_d_translations[0][self.one_ring_neigh.reshape((-1,))].cuda()) - \\\n torch.einsum('bij, bkj->bki', opt_d_rotmat[0].repeat_interleave(self.max_neigh_num, dim=0).cuda(), \\\n (nodes.repeat_interleave(self.max_neigh_num, dim=0) - nodes[self.one_ring_neigh.reshape((-1,))]).unsqueeze(1)).squeeze(1)\n arap_loss = torch.sum(diff_term ** 2) / self.nodes_idx.shape[0]\n \n return warpped_vertices.unsqueeze(0), arap_loss" }, { "identifier": "generate_transform_matrices_coma", "path": "lib/mesh_sampling.py", "snippet": "def generate_transform_matrices_coma(mesh, factors):\n \"\"\"Generates len(factors) meshes, each of them is scaled by factors[i] and\n computes the transformations between them.\n Returns:\n M: a set of meshes downsampled from mesh by a factor specified in factors.\n A: Adjacency matrix for each of the meshes\n D: csc_matrix Downsampling transforms between each of the meshes\n U: Upsampling transforms between each of the meshes\n F: a list of faces\n \"\"\"\n\n factors = map(lambda x: 1.0 / x, factors)\n M, A, D, U, F = [], [], [], [], []\n F.append(mesh.f) # F[0]\n A.append(get_vert_connectivity(mesh.v, mesh.f).astype('float32')) # A[0]\n M.append(mesh) # M[0]\n\n for factor in factors:\n ds_f, ds_D = qslim_decimator_transformer(M[-1], factor=factor)\n D.append(ds_D.astype('float32'))\n new_mesh_v = ds_D.dot(M[-1].v)\n new_mesh = Mesh(v=new_mesh_v, f=ds_f)\n F.append(new_mesh.f)\n M.append(new_mesh)\n A.append(\n get_vert_connectivity(new_mesh.v, new_mesh.f).tocoo())\n U.append(setup_deformation_transfer(M[-1], M[-2]).astype('float32'))\n\n return M, A, D, U, F" }, { "identifier": "to_edge_index", "path": "lib/utils_dg.py", "snippet": "def to_edge_index(mat):\n return torch.LongTensor(np.vstack(mat.nonzero()))" }, { "identifier": "to_sparse", "path": "lib/utils_dg.py", "snippet": "def to_sparse(spmat):\n return torch.sparse.FloatTensor(\n torch.LongTensor([spmat.tocoo().row,\n spmat.tocoo().col]),\n torch.FloatTensor(spmat.tocoo().data), torch.Size(spmat.tocoo().shape))" }, { "identifier": "get_vert_connectivity", "path": "lib/utils_dg.py", "snippet": "def get_vert_connectivity(mesh_v, mesh_f):\n \"\"\"Returns a sparse matrix (of size #verts x #verts) where each nonzero\n element indicates a neighborhood relation. For example, if there is a\n nonzero element in position (15,12), that means vertex 15 is connected\n by an edge to vertex 12.\"\"\"\n\n vpv = sp.csc_matrix((len(mesh_v),len(mesh_v)))\n\n # for each column in the faces...\n for i in range(3):\n IS = mesh_f[:,i]\n JS = mesh_f[:,(i+1)%3]\n data = np.ones(len(IS))\n ij = np.vstack((row(IS.flatten()), row(JS.flatten())))\n mtx = sp.csc_matrix((data, ij), shape=vpv.shape)\n vpv = vpv + mtx + mtx.T\n\n return vpv" }, { "identifier": "scipy_to_torch_sparse", "path": "lib/utils_dg.py", "snippet": "def scipy_to_torch_sparse(scp_matrix):\n values = scp_matrix.data\n indices = np.vstack((scp_matrix.row, scp_matrix.col))\n i = torch.LongTensor(indices)\n v = torch.FloatTensor(values)\n shape = scp_matrix.shape\n\n sparse_tensor = torch.sparse.FloatTensor(i, v, torch.Size(shape))\n return sparse_tensor" }, { "identifier": "DeformGraphModel", "path": "models/deform_model.py", "snippet": "class DeformGraphModel(torch.nn.Module):\n def __init__(self, deform_graph, renderer, binarization, canonical_mesh, std_lst, lr_rate=5e-4, savedir=\"1017\"):\n super(DeformGraphModel, self).__init__()\n \n self.device = torch.device(\"cuda:0\")\n \n self.deform_graph = deform_graph\n self.cloth_renderer = renderer\n self.binarization = binarization\n self.canonical_mesh = canonical_mesh\n \n self.step_size = lr_rate\n \n self.device = torch.device(\"cuda:0\")\n self.std_lst = std_lst[0]\n self.savedir = savedir\n # self.std_lst_b = std_lst[1]\n \n def iterative_deformgraph(self,\n batch_id,\n vertex_number,\n inputs,\n contours,\n verts,\n opt_d_rotations,\n opt_d_translations,\n times=101):\n \n verts_for_dg = verts.detach()\n verts_for_dg.requires_grad = False\n \n surface_optimizer = torch.optim.Adam([\n {'params': [opt_d_rotations]},\n {'params': [opt_d_translations]}\n ], lr=self.step_size)\n \n w_dg = 50\n w_kp = 0.001\n w_lap = 100\n w_norm = 10\n w_arap = 50\n w_edge = 1\n \n min_loss = 10000\n loop = tqdm(range(times))\n \n inputs_front, inputs_back = inputs[0].to(self.device).float(), inputs[1].to(self.device).float()\n landmark_front, landmark_back = contours[0].to(self.device).float(), contours[1].to(self.device).float() # landmark (2023.02.15)\n \n \n for i in loop:\n surface_optimizer.zero_grad()\n \n # arap: as rigid as possible\n warpped_vertices, loss_arap = self.deform_graph(verts_for_dg, opt_d_rotations, opt_d_translations)\n warpped_vertices = warpped_vertices.squeeze()\n \n src_mesh = Meshes([warpped_vertices], [self.cloth_renderer.faces], self.cloth_renderer.texture)\n \n # front&back\n masks = torch.stack([inputs_front[0], inputs_back[0]]).squeeze()\n \n # mn\n if landmark_back.shape[1] < landmark_front.shape[1]:\n _cc = [landmark_back, torch.zeros(1,1,1,2).cuda()] # original\n # _cc = [landmark_back, torch.zeros(1,1,2).cuda()] # blender\n landmark_back = torch.cat(_cc, 1)\n \n landmarks_canon = torch.stack([landmark_front.squeeze(), landmark_back.squeeze()])\n \n render_mask, specific_verts_2d = self.cloth_renderer.render_silhouette(warpped_vertices, side='both', landmark=True, vertex_number=vertex_number)\n \n # mn\n if specific_verts_2d[0].shape[0] != specific_verts_2d[1].shape[0]:\n _dd = [specific_verts_2d[1], torch.zeros(1,2).cuda()]\n specific_verts_2d[1] = torch.cat(_dd, 0)\n \n render_mask = render_mask[..., 3]\n render_mask_out = self.binarization(render_mask)\n \n loss_dg = nn.MSELoss()(render_mask_out, masks) + 0.3 * mask_iou(render_mask_out, masks) # [2, 512, 512] [2, 512, 512]\n loss_kp = nn.MSELoss()(torch.stack(specific_verts_2d), landmarks_canon)\n edge_mask = edge_extraction(masks)[:, 0].float()\n edge_render_mask = edge_extraction(render_mask_out)[:, 0].float()\n \n loss_edge = nn.L1Loss()(edge_render_mask*render_mask_out, edge_mask)\n \n loss_lap = mesh_laplacian_smoothing(src_mesh, method=\"uniform\")\n loss_norm = mesh_normal_consistency(src_mesh)\n \n # loss = w_dg*loss_dg + w_kp*loss_kp + w_norm*loss_norm + w_arap*loss_arap + w_edge*loss_edge\n loss = w_dg*loss_dg + w_kp*loss_kp + w_norm*loss_norm + w_arap*loss_arap + w_edge*loss_edge # + w_lap*loss_lap + w_norm*loss_norm\n \n loss.backward()\n surface_optimizer.step()\n \n with torch.no_grad():\n render_mask, specific_verts_2d = self.cloth_renderer.render_silhouette(warpped_vertices, side='both', landmark=True, vertex_number=vertex_number)\n f_render_mask, b_render_mask = render_mask[0, ..., 3], render_mask[1, ..., 3]\n f_render_mask, b_render_mask = self.binarization(f_render_mask), self.binarization(b_render_mask)\n \n _f_2d, _b_2d = specific_verts_2d[0].cpu().numpy().copy(), specific_verts_2d[1].cpu().numpy().copy()\n \n loop.set_description('[Total]{0:.2f}[Mask]{1:.2f}[Nor]{2:.2f}[KP]{3:.2f}[ARAP]{4:.2f}[Edge]{5:.2f}'.format(loss, w_dg * loss_dg, w_norm*loss_norm, w_kp*loss_kp, w_arap*loss_arap, w_edge*loss_edge))\n \n if float(loss) < min_loss:\n min_loss = float(loss)\n \n aaa1 = f_render_mask.detach().cpu().numpy() * 255.\n aaa2 = b_render_mask.detach().cpu().numpy() * 255.\n \n bbb1 = inputs_front[0][0].unsqueeze(-1).cpu().numpy() * 255.\n bbb2 = inputs_back[0][0].unsqueeze(-1).cpu().numpy() * 255.\n \n if len(aaa1.shape) == 2:\n aaa1 = np.expand_dims(aaa1, -1)\n aaa2 = np.expand_dims(aaa2, -1)\n \n ccc1 = aaa1 * 0.4 + bbb1\n ccc2 = aaa2 * 0.4 + bbb2\n cv2.putText(ccc1, \"front\", (int(10), int(40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (193, 33, 240), 2, cv2.LINE_AA) \n cv2.putText(ccc2, \"back\", (int(10), int(40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (193, 33, 240), 2, cv2.LINE_AA) \n \n for iii, vvvv in enumerate(_f_2d):\n cv2.circle(ccc1, (int(vvvv[0]), int(vvvv[1])), 3, (193, 33, 240), -1)\n cv2.putText(ccc1, self.std_lst[iii], (int(vvvv[0]), int(vvvv[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (200, 100, 90), 2, cv2.LINE_AA) \n for iii, vvvv in enumerate(landmarks_canon[0]):\n cv2.circle(ccc1, (int(vvvv[0]), int(vvvv[1])), 3, (193, 33, 240), -1)\n cv2.putText(ccc1, self.std_lst[iii], (int(vvvv[0]), int(vvvv[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (80, 40, 200), 2, cv2.LINE_AA) \n \n for iii, vvvv in enumerate(_b_2d):\n if int(vvvv[0]) != 0:\n cv2.circle(ccc2, (int(vvvv[0]), int(vvvv[1])), 3, (193, 33, 240), -1)\n cv2.putText(ccc2, self.std_lst[iii], (int(vvvv[0]), int(vvvv[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (200, 100, 90), 2, cv2.LINE_AA) \n \n for iii, vvvv in enumerate(landmarks_canon[1]):\n if int(vvvv[0]) != 0:\n cv2.circle(ccc2, (int(vvvv[0]), int(vvvv[1])), 3, (193, 33, 240), -1)\n cv2.putText(ccc2, self.std_lst[iii], (int(vvvv[0]), int(vvvv[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (80, 40, 200), 2, cv2.LINE_AA) \n \n \n cv2.imwrite(\"experiments/{0}/{1}_step2_min.jpg\".format(self.savedir, batch_id), cv2.hconcat([(ccc1.astype(np.uint8)), ccc2.astype(np.uint8)]))\n \n \n ddd1, ddd2 = edge_render_mask[0].unsqueeze(-1).cpu().numpy() * 255., edge_render_mask[1].unsqueeze(-1).cpu().numpy() * 255.\n cv2.imwrite(\"experiments/{0}/{1}_step2_edge.jpg\".format(self.savedir, batch_id), cv2.hconcat([(ddd1.astype(np.uint8)), ddd2.astype(np.uint8)]))\n \n minimum_vertices = warpped_vertices.clone()\n best_opt_d_rot = opt_d_rotations.clone()\n best_opt_d_trans = opt_d_translations.clone()\n \n # if i >= 50:\n # if i % 50 == 0:\n # save_obj(\"experiments/batch_result/mesh/0505_{}.obj\".format(i), warpped_vertices.detach(), self.cloth_renderer.faces)\n # else:\n # if i % 5 == 0:\n # save_obj(\"experiments/batch_result/mesh/0505_{}.obj\".format(i), warpped_vertices.detach(), self.cloth_renderer.faces) \n\n if i % 500 == 0:\n aaa1 = f_render_mask.detach().cpu().numpy() * 255.\n aaa2 = b_render_mask.detach().cpu().numpy() * 255.\n \n bbb1 = inputs_front[0][0].unsqueeze(-1).cpu().numpy() * 255.\n bbb2 = inputs_back[0][0].unsqueeze(-1).cpu().numpy() * 255.\n \n if len(aaa1.shape) == 2:\n aaa1 = np.expand_dims(aaa1, -1)\n aaa2 = np.expand_dims(aaa2, -1)\n \n ccc1 = aaa1 * 0.4 + bbb1\n ccc2 = aaa2 * 0.4 + bbb2\n cv2.putText(ccc1, \"front\", (int(10), int(40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (193, 33, 240), 2, cv2.LINE_AA) \n cv2.putText(ccc2, \"back\", (int(10), int(40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (193, 33, 240), 2, cv2.LINE_AA) \n \n for iii, vvvv in enumerate(_f_2d):\n cv2.circle(ccc1, (int(vvvv[0]), int(vvvv[1])), 3, (80, 40, 200), -1)\n cv2.putText(ccc1, self.std_lst[iii], (int(vvvv[0]), int(vvvv[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (200, 100, 90), 2, cv2.LINE_AA) \n for iii, vvvv in enumerate(landmarks_canon[0]):\n cv2.circle(ccc1, (int(vvvv[0]), int(vvvv[1])), 3, (80, 40, 200), -1)\n cv2.putText(ccc1, self.std_lst[iii], (int(vvvv[0]), int(vvvv[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (80, 40, 200), 2, cv2.LINE_AA) \n \n for iii, vvvv in enumerate(_b_2d):\n if int(vvvv[0]) != 0:\n cv2.circle(ccc2, (int(vvvv[0]), int(vvvv[1])), 3, (80, 40, 200), -1)\n cv2.putText(ccc2, self.std_lst[iii], (int(vvvv[0]), int(vvvv[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (200, 100, 90), 2, cv2.LINE_AA) \n \n for iii, vvvv in enumerate(landmarks_canon[1]):\n if int(vvvv[0]) != 0:\n cv2.circle(ccc2, (int(vvvv[0]), int(vvvv[1])), 3, (80, 40, 200), -1)\n cv2.putText(ccc2, self.std_lst[iii], (int(vvvv[0]), int(vvvv[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (80, 40, 200), 2, cv2.LINE_AA) \n \n cv2.imwrite(\"experiments/{0}/{1}_step2_{2}.jpg\".format(self.savedir, batch_id, i), cv2.hconcat([(ccc1.astype(np.uint8)), ccc2.astype(np.uint8)]))\n \n \n print(\"[cloth2tex] [deformation graph parameter]\", opt_d_rotations.shape, opt_d_translations.shape)\n return minimum_vertices, best_opt_d_rot, best_opt_d_trans\n \n def forward(self, x):\n out = self.linear(x)\n # out = self.sigmoid(out)\n return out" } ]
import argparse import datetime import torch import torchvision import torch.nn as nn import torch.optim as optim import numpy as np import pickle import os import os.path as osp import torchvision import torchvision.transforms as transforms import torch.nn.functional as F import thinplate as tps import time import matplotlib.pyplot as plt import importlib import random import json import cv2 from torchvision.models.feature_extraction import create_feature_extractor, get_graph_node_names from renderer.cloth_renderer import ClothRenderer from PIL import Image from utils.frequency import extract_ampl_phase from utils.binary_function import Binarize from utils.tvl_loss import TVLoss, TVMaskLoss from tqdm import tqdm from pytorch3d.io import load_obj, save_obj from itertools import chain from pytorch3d.structures import Meshes from pytorch3d.transforms import RotateAxisAngle from pytorch3d.loss import ( mesh_edge_loss, mesh_laplacian_smoothing, mesh_normal_consistency, ) from lib.deformation_graph import DeformationGraph from lib.mesh_sampling import generate_transform_matrices_coma from lib.utils_dg import to_edge_index, to_sparse, get_vert_connectivity, scipy_to_torch_sparse from models import DeformGraphModel from torch_geometric.transforms import FaceToEdge from torch_geometric.data import Data from psbody.mesh import Mesh from torch_geometric.io import read_ply
8,827
# -*- coding: utf-8 -*- """ @date: 2023.03.29-31 week13 @func: PhaseI inference code. """ class Trainer(object): def __init__(self, objfile, savedir, resolution=512, focal_distance=2, verts_num=9648, scale_factor=1.0): self.device = torch.device("cuda") #set mesh and visualizer---------------------- self.cloth_renderer = ClothRenderer(objfile, resolution, focal_distance, scale_factor) if os.path.exists(os.path.join("experiments", savedir)): pass else: os.makedirs(os.path.join("experiments", savedir)) self.savedir = savedir self.uv = torch.ones((1, 512, 512, 3)).cuda() self.uv.requires_grad = True self.optimizer = optim.Adam([self.uv], lr=5e-3, betas=(0.5, 0.999)) # define loss self.criterion = nn.MSELoss() # nn.L1Loss() nn.MSELoss() self.mse = nn.MSELoss() self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # You can choose TVMaskLoss and test if it is suitable for your case.
# -*- coding: utf-8 -*- """ @date: 2023.03.29-31 week13 @func: PhaseI inference code. """ class Trainer(object): def __init__(self, objfile, savedir, resolution=512, focal_distance=2, verts_num=9648, scale_factor=1.0): self.device = torch.device("cuda") #set mesh and visualizer---------------------- self.cloth_renderer = ClothRenderer(objfile, resolution, focal_distance, scale_factor) if os.path.exists(os.path.join("experiments", savedir)): pass else: os.makedirs(os.path.join("experiments", savedir)) self.savedir = savedir self.uv = torch.ones((1, 512, 512, 3)).cuda() self.uv.requires_grad = True self.optimizer = optim.Adam([self.uv], lr=5e-3, betas=(0.5, 0.999)) # define loss self.criterion = nn.MSELoss() # nn.L1Loss() nn.MSELoss() self.mse = nn.MSELoss() self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # You can choose TVMaskLoss and test if it is suitable for your case.
self.tvl_loss = TVLoss(weight=1) # TVMaskLoss(weight=1) or TVLoss(weight=1)
3
2023-10-17 11:30:53+00:00
12k
uukuguy/multi_loras
multi_loras/slora/models/peft/lora_unordered_batch_infer.py
[ { "identifier": "lora_get_qkvo_fwd_shrink", "path": "multi_loras/slora/models/peft/triton_kernel/lora/lora_prefill.py", "snippet": "@torch.inference_mode()\ndef lora_get_qkvo_fwd_shrink(x, w, o, b_loc, b_lora_start, b_lora_ranks, b_start_loc, b_seq_len, b_indicies, hidden_size, qkvo, max_rank, max_input_len):\n # good for large input_len (prefill stage) better than bgmv, worse than cutlass\n BLOCK_N = 16 if max_rank > 8 else max_rank\n BLOCK_M = 32\n BLOCK_K = 128\n\n batch = b_seq_len.shape[0]\n\n grid = (batch, triton.cdiv(max_rank, BLOCK_N), triton.cdiv(max_input_len, BLOCK_M)) # batch, head,\n\n num_warps = 4\n _shrink_fwd_kernel[grid](\n x, w, b_loc, b_lora_start, b_lora_ranks, b_start_loc, b_seq_len, b_indicies,\n o,\n qkvo,\n x.stride(0), x.stride(1),\n w.stride(0), w.stride(1),\n o.stride(0), o.stride(1),\n BLOCK_M=BLOCK_M,\n BLOCK_DMODEL=hidden_size,\n BLOCK_N=BLOCK_N,\n BLOCK_K=BLOCK_K,\n num_warps=num_warps,\n num_stages=1,\n )\n return" }, { "identifier": "lora_get_qkvo_fwd_expand", "path": "multi_loras/slora/models/peft/triton_kernel/lora/lora_prefill.py", "snippet": "@torch.inference_mode()\ndef lora_get_qkvo_fwd_expand(x, w, o, scale, b_loc, b_lora_start, b_lora_ranks, b_start_loc, b_seq_len, b_indicies, feat_out, qkvo, max_rank, max_input_len):\n # good for large input_len (prefill stage) better than bgmv, worse than cutlass\n BLOCK_N = 128\n N = 1\n TILE = N * BLOCK_N\n BLOCK_M = 32\n # BLOCK_N = 16\n # N = 32\n # TILE = N * BLOCK_N\n # BLOCK_M = 16\n\n batch = b_seq_len.shape[0]\n\n grid = (batch, triton.cdiv(feat_out, TILE), triton.cdiv(max_input_len, BLOCK_M)) # batch, head,\n\n num_warps = 4\n _expand_fwd_kernel[grid](\n x, w, scale, b_loc, b_lora_start, b_lora_ranks, b_start_loc, b_seq_len, b_indicies,\n o,\n qkvo,\n x.stride(0), x.stride(1),\n w.stride(0), w.stride(1),\n o.stride(0), o.stride(1),\n BLOCK_M=BLOCK_M,\n BLOCK_DMODEL=feat_out,\n BLOCK_N=BLOCK_N,\n BLOCK_RANK=max_rank,\n TILE_N=TILE,\n num_warps=num_warps,\n num_stages=2,\n )\n return" }, { "identifier": "context_attention_fwd", "path": "multi_loras/slora/models/llama/triton_kernel/context_flashattention_nopad.py", "snippet": "@torch.no_grad()\ndef context_attention_fwd(q, k, v, o, b_start_loc, b_seq_len, max_input_len):\n BLOCK = 128\n # shape constraints\n Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1]\n assert Lq == Lk and Lk == Lv\n assert Lk in {16, 32, 64, 128}\n\n sm_scale = 1.0 / (Lq**0.5) # 计算scale系数\n batch, head = b_seq_len.shape[0], q.shape[1]\n\n grid = (batch, head, triton.cdiv(max_input_len, BLOCK)) # batch, head,\n\n num_warps = 4 if Lk <= 64 else 8\n _fwd_kernel[grid](\n q, k, v, sm_scale, b_start_loc, b_seq_len,\n o,\n q.stride(0), q.stride(1), q.stride(2),\n k.stride(0), k.stride(1), k.stride(2),\n v.stride(0), v.stride(1), v.stride(2),\n o.stride(0), o.stride(1), o.stride(2),\n BLOCK_M=BLOCK,\n BLOCK_DMODEL=Lk,\n BLOCK_N=BLOCK,\n num_warps=num_warps,\n num_stages=1,\n )\n return" }, { "identifier": "rotary_emb_fwd", "path": "multi_loras/slora/models/llama/triton_kernel/rotary_emb.py", "snippet": "@torch.no_grad()\ndef rotary_emb_fwd(q, cos, sin):\n total_len = q.shape[0]\n head_num = q.shape[1]\n head_dim = q.shape[2]\n assert q.shape[0] == cos.shape[0] and q.shape[0] == sin.shape[0], f\"q shape {q.shape} cos shape {cos.shape}\"\n BLOCK_HEAD = 4\n BLOCK_SEQ = 32\n grid = (triton.cdiv(head_num, BLOCK_HEAD), triton.cdiv(total_len, BLOCK_SEQ))\n if head_dim >= 128:\n num_warps = 8\n else:\n num_warps = 4\n\n _rotary_kernel[grid](\n q, cos, sin,\n q.stride(0), q.stride(1), q.stride(2),\n cos.stride(0), cos.stride(1),\n sin.stride(0), sin.stride(1),\n total_len, head_num,\n BLOCK_HEAD=BLOCK_HEAD,\n BLOCK_SEQ=BLOCK_SEQ,\n BLOCK_DMODEL=head_dim,\n num_warps=num_warps,\n num_stages=1,\n )\n return" }, { "identifier": "init_bloc", "path": "multi_loras/slora/common/infer_utils.py", "snippet": "def init_bloc(b_loc, b_seq_len, max_len_in_batch, alloc_mem_index):\n start_index = 0\n b_seq_len_numpy = b_seq_len.cpu().numpy()\n for i in range(len(b_seq_len)):\n cur_seq_len = b_seq_len_numpy[i]\n b_loc[i, max_len_in_batch - cur_seq_len:max_len_in_batch] = alloc_mem_index[start_index:start_index + cur_seq_len]\n start_index += cur_seq_len\n return" }, { "identifier": "NaiveInferAdapter", "path": "multi_loras/slora/router/model_infer/naive_infer_adapter.py", "snippet": "class NaiveInferAdapter:\n adapter_dirs: List[str] # all adapters on the server\n a_loc: torch.Tensor # a_loc[i] is a list of indices occupied by adapter i\n a_start: torch.Tensor # a_start[i] is the start location of adapter i\n a_len: torch.Tensor # a_len[i] is the number of cells occupied by adapter i\n a_scaling: torch.Tensor # a_scaling[i] is the scaling factor of adapter i\n idx_map: Dict[str, int]\n key_buffer: torch.Tensor\n value_buffer: torch.Tensor\n layer_num: int\n head_num: int\n head_dim: int\n\n @classmethod\n def init(cls, _layer_num, _head_num, _head_dim):\n return cls(\n adapter_dirs=[],\n a_loc=torch.empty(0, dtype=torch.long, device=\"cuda\"),\n a_start=torch.empty(0, dtype=torch.long, device=\"cuda\"),\n a_len=torch.empty(0, dtype=torch.long, device=\"cuda\"),\n a_scaling=torch.empty(0, dtype=torch.float16, device=\"cuda\"),\n idx_map={},\n key_buffer=[torch.empty(0, dtype=torch.float16, device=\"cuda\")\n for _ in range(_layer_num)],\n value_buffer=[torch.empty(0, dtype=torch.float16, device=\"cuda\")\n for _ in range(_layer_num)],\n layer_num=_layer_num,\n head_num=_head_num,\n head_dim=_head_dim,\n )\n\n\n # @calculate_time(show=True, min_cost_ms=0)\n def load_lora_A(self, adapter, start, end):\n r = adapter.r\n h = adapter.network_config[\"hidden_size\"]\n for i in range(adapter.network_config[\"num_hidden_layers\"]):\n adapter.layers[i].load_to_gpu()\n w_combined = adapter.layers[i].w_combined\n self.key_buffer[i][start:end] = w_combined[0]\n adapter.layers[i].offload_from_gpu()\n\n\n # @calculate_time(show=True, min_cost_ms=0)\n def load_lora_B(self, adapter, start, end):\n r = adapter.r\n h = adapter.network_config[\"hidden_size\"]\n for i in range(adapter.network_config[\"num_hidden_layers\"]):\n adapter.layers[i].load_to_gpu()\n w_combined = adapter.layers[i].w_combined\n self.value_buffer[i][start:end] = w_combined[1]\n adapter.layers[i].offload_from_gpu()\n\n\n # @calculate_time(show=True, min_cost_ms=0)\n def load_adapters(self, adapters, prefetch=False):\n assert prefetch is False\n if len(adapters) == 0:\n print(f\"load 0 adapters, {len(self.adapter_dirs)} in total\")\n return\n\n new_adapters = []\n rank_sum = 0\n for adapter in adapters:\n if adapter is not None and adapter.lora_dir not in self.idx_map:\n new_adapters.append(adapter)\n rank_sum += adapter.r * 4\n print(f\"load {len(new_adapters)} adapters, {len(self.adapter_dirs) + len(new_adapters)} in total\")\n\n if len(new_adapters) == 0:\n print(f\"load 0 adapters, {len(self.adapter_dirs)} in total\")\n return\n\n new_key_buffer = [torch.empty((rank_sum, self.head_num, self.head_dim), dtype=torch.float16, device=\"cuda\")\n for _ in range(self.layer_num)]\n new_value_buffer = [torch.empty((rank_sum, self.head_num, self.head_dim), dtype=torch.float16, device=\"cuda\")\n for _ in range(self.layer_num)]\n self.key_buffer = [torch.cat((self.key_buffer[i], new_key_buffer[i]))\n for i in range(self.layer_num)]\n self.value_buffer = [torch.cat((self.value_buffer[i], new_value_buffer[i]))\n for i in range(self.layer_num)]\n\n start_offset = self.a_start.shape[0]\n self.a_start = torch.cat((self.a_start, torch.empty(len(new_adapters,), dtype=torch.long, device=\"cuda\")))\n len_offset = self.a_len.shape[0]\n self.a_len = torch.cat((self.a_len, torch.empty(len(new_adapters,), dtype=torch.long, device=\"cuda\")))\n loc_offset = self.a_loc.shape[0]\n self.a_loc = torch.arange(0, self.a_loc.shape[0] + rank_sum, dtype=torch.long, device=\"cuda\")\n\n cum_loc = loc_offset\n cum_loc_list = []\n for i, new_adapter in enumerate(new_adapters):\n cum_loc_list.append(cum_loc)\n self.idx_map[new_adapter.lora_dir] = len(self.adapter_dirs)\n self.adapter_dirs.append(new_adapter.lora_dir)\n self.a_start[start_offset + i] = cum_loc\n self.a_len[len_offset + i] = new_adapter.r * 4\n cum_loc += new_adapter.r * 4\n self.a_scaling = torch.cat((self.a_scaling, torch.tensor([adapter.scaling for adapter in new_adapters], dtype=torch.float16, device=\"cuda\")))\n\n for i, new_adapter in enumerate(new_adapters):\n cum_loc = cum_loc_list[i]\n self.load_lora_A(new_adapter, cum_loc, cum_loc + new_adapter.r * 4)\n self.load_lora_B(new_adapter, cum_loc, cum_loc + new_adapter.r * 4)\n \n\n # @calculate_time(show=True, min_cost_ms=0)\n def offload_adapters(self, reserve_adapter_dirs):\n if len(reserve_adapter_dirs) == len(self.adapter_dirs):\n print(f\"offload 0 adapters, {len(self.adapter_dirs)} remains\")\n return\n if len(reserve_adapter_dirs) == 0:\n print(f\"offload {len(self.adapter_dirs)} adapters, 0 remains\")\n self.key_buffer=[torch.empty(0, dtype=torch.float16, device=\"cuda\")\n for _ in range(self.layer_num)]\n self.value_buffer=[torch.empty(0, dtype=torch.float16, device=\"cuda\")\n for _ in range(self.layer_num)]\n self.adapter_dirs=[]\n self.a_loc=torch.empty(0, dtype=torch.long, device=\"cuda\")\n self.a_start=torch.empty(0, dtype=torch.long, device=\"cuda\")\n self.a_len=torch.empty(0, dtype=torch.long, device=\"cuda\")\n self.a_scaling=torch.empty(0, dtype=torch.float16, device=\"cuda\")\n self.idx_map={}\n return\n\n left_ind = []\n self.idx_map = {}\n new_adapter_dirs = []\n for i, adapter_dir in enumerate(self.adapter_dirs):\n if adapter_dir in reserve_adapter_dirs:\n left_ind.append(i)\n self.idx_map[adapter_dir] = len(new_adapter_dirs)\n new_adapter_dirs.append(adapter_dir)\n if len(new_adapter_dirs) == len(self.adapter_dirs):\n return\n print(f\"offload {len(self.adapter_dirs) - len(left_ind)} adapters, \"\n f\"{len(left_ind)} remains\")\n # left_ind = torch.tensor(left_ind, dtype=torch.int32, device=\"cuda\")\n left_ind = torch.tensor(left_ind, dtype=torch.long, device=\"cuda\")\n self.adapter_dirs = new_adapter_dirs\n rank_sum = torch.sum(self.a_len[left_ind]).item()\n \n new_a_len = torch.empty(len(left_ind), dtype=torch.long, device=\"cuda\")\n new_a_start = torch.empty(len(left_ind), dtype=torch.long, device=\"cuda\")\n new_a_scaling = torch.empty(len(left_ind), dtype=torch.float16, device=\"cuda\")\n\n new_a_len[:] = self.a_len[left_ind]\n new_a_start[0] = 0\n new_a_start[1:] = torch.cumsum(new_a_len, dim=0)[:-1]\n new_a_scaling[:] = self.a_scaling[left_ind]\n\n # update self.key_buffer self.value_buffer\n new_key_buffer = [torch.empty((rank_sum, self.head_num, self.head_dim), dtype=torch.float16, device=\"cuda\")\n for _ in range(self.layer_num)]\n new_value_buffer = [torch.empty((rank_sum, self.head_num, self.head_dim), dtype=torch.float16, device=\"cuda\")\n for _ in range(self.layer_num)]\n copy_ind = torch.empty(rank_sum, dtype=torch.long, device=\"cuda\")\n launch_var_len_copy_triton(self.a_start[left_ind], new_a_len,\n self.a_loc, new_a_start, copy_ind)\n new_key_buffer = [self.key_buffer[i][copy_ind] for i in range(self.layer_num)]\n new_value_buffer = [self.value_buffer[i][copy_ind] for i in range(self.layer_num)]\n self.key_buffer = new_key_buffer\n self.value_buffer = new_value_buffer\n\n self.a_len = new_a_len\n self.a_start = new_a_start\n self.a_loc = torch.arange(0, rank_sum, dtype=torch.long, device=\"cuda\")\n self.a_scaling = new_a_scaling" }, { "identifier": "mark_cost_time", "path": "multi_loras/slora/utils/infer_utils.py", "snippet": "def mark_cost_time(func_name):\n def inner_func(func):\n def time_func(*args, **kwargs):\n if dist.get_rank() in [0, 1] and is_show_cost_time:\n torch.cuda.synchronize()\n start_time = time.time()\n ans = func(*args, **kwargs)\n torch.cuda.synchronize()\n print(func_name, \"cost time:\", (time.time() - start_time) * 1000)\n return ans\n else:\n torch.cuda.synchronize()\n ans = func(*args, **kwargs)\n torch.cuda.synchronize()\n return ans\n\n return time_func\n\n return inner_func" }, { "identifier": "calculate_time", "path": "multi_loras/slora/utils/infer_utils.py", "snippet": "def calculate_time(show=False, min_cost_ms=0.0):\n def wrapper(func):\n def inner_func(*args, **kwargs):\n torch.cuda.synchronize()\n if show:\n start_time = time.time()\n result = func(*args, **kwargs)\n torch.cuda.synchronize()\n if show:\n cost_time = (time.time() - start_time) * 1000\n if cost_time > min_cost_ms:\n print(f\"Function {func.__name__} took {cost_time} ms to run.\")\n return result\n\n return inner_func\n\n return wrapper" }, { "identifier": "mark_start", "path": "multi_loras/slora/utils/infer_utils.py", "snippet": "def mark_start(key):\n torch.cuda.synchronize()\n global time_mark\n time_mark[key] = time.time()\n return" }, { "identifier": "mark_end", "path": "multi_loras/slora/utils/infer_utils.py", "snippet": "def mark_end(key, print_min_cost=0.0):\n torch.cuda.synchronize()\n global time_mark\n cost_time = (time.time() - time_mark[key]) * 1000\n if cost_time > print_min_cost:\n print(f\"cost {key}:\", cost_time)" } ]
import numpy as np import torch import torch.nn as nn from typing import final from .triton_kernel.lora.lora_prefill import lora_get_qkvo_fwd_shrink, lora_get_qkvo_fwd_expand from ..llama.triton_kernel.context_flashattention_nopad import context_attention_fwd from ..llama.triton_kernel.rotary_emb import rotary_emb_fwd from ...common.infer_utils import init_bloc from ...router.model_infer.naive_infer_adapter import NaiveInferAdapter from ...utils.infer_utils import mark_cost_time from ...utils.infer_utils import calculate_time, mark_start, mark_end from slora._kernels import dispatch_bgmv
7,941
o = self._lora_get_o(layer_id, o, infer_state, no_lora_compute) # if self.world_size_ > 1: # dist.all_reduce(o, op=dist.ReduceOp.SUM, async_op=False) # residual input_embs.add_(o.view(-1, layer_infer.embed_dim_)) return # @calculate_time(show=True, min_cost_ms=0) # this impl dont to use @mark_cost_time def _lora_token_attention(self, layer_id, input_embs, infer_state, no_lora_compute=False, no_lora_copy=False): layer_weight = self.base_model.trans_layers_weight[layer_id] layer_infer = self.base_model.layers_infer[layer_id] # layer normalization input1 = layer_infer._att_norm(input_embs, infer_state, layer_weight) # fetch k, v cache_k, cache_v = layer_infer._pre_cache_kv(infer_state, layer_weight) # gen new q, k, v (batch different adapters) q = self._batch_lora_get_qkv(layer_id, input1, cache_k, cache_v, infer_state, no_lora_compute, no_lora_copy) input1 = None layer_infer._post_cache_kv(cache_k, cache_v, infer_state, layer_weight) # compute attention o = layer_infer._token_attention_kernel(q, infer_state, layer_weight) q = None o = self._batch_lora_get_o(layer_id, o, infer_state, no_lora_compute) # if self.world_size_ > 1: # dist.all_reduce(o, op=dist.ReduceOp.SUM, async_op=False) input_embs.add_(o.view(-1, layer_infer.embed_dim_)) return # @calculate_time(show=True, min_cost_ms=0) def _batch_lora_get_qkv(self, layer_id, input_embs, cache_k, cache_v, infer_state, no_lora_compute=False, no_lora_copy=False)->torch.Tensor: base_model = self.base_model base_layer_weight = base_model.trans_layers_weight[layer_id] base_layer_infer = base_model.layers_infer[layer_id] # q (bs, H) q = torch.mm(input_embs.view(-1, base_layer_infer.embed_dim_), base_layer_weight.q_weight_) # @TODO: fix me, filter requests querying only base model assert(len(q)==len(self.req_bins)) if not no_lora_compute: # mark_start("get_q") delta_qA = self.delta[0] dispatch_bgmv(delta_qA, input_embs.view(-1, base_layer_infer.embed_dim_), self.key_buffer[layer_id], self.infer_adapter.a_start, self.infer_adapter.a_len, self.infer_adapter.a_loc, self.req_bins, 0, self.infer_adapter.a_scaling) dispatch_bgmv(q, delta_qA, self.value_buffer[layer_id], self.infer_adapter.a_start, self.infer_adapter.a_len, self.infer_adapter.a_loc, self.req_bins, 0, self.infer_adapter.a_scaling) # delta_qA = None # mark_end("get_q") rotary_emb_fwd(q.view(-1, base_layer_infer.tp_q_head_num_, base_model.head_dim_), infer_state.position_cos, infer_state.position_sin) # k (bs, H) torch.mm(input_embs.view(-1, base_layer_infer.embed_dim_), base_layer_weight.k_weight_, out=cache_k.view(-1, base_model.tp_k_head_num_ * base_model.head_dim_)) if not no_lora_compute: # mark_start("get_k") delta_kA = self.delta[1] dispatch_bgmv(delta_kA, input_embs.view(-1, base_layer_infer.embed_dim_), self.key_buffer[layer_id], self.infer_adapter.a_start, self.infer_adapter.a_len, self.infer_adapter.a_loc, self.req_bins, 1, self.infer_adapter.a_scaling) dispatch_bgmv(cache_k.view(-1, base_model.tp_k_head_num_ * base_model.head_dim_), delta_kA, self.value_buffer[layer_id], self.infer_adapter.a_start, self.infer_adapter.a_len, self.infer_adapter.a_loc, self.req_bins, 1, self.infer_adapter.a_scaling) # delta_kA = None # mark_end("get_k") rotary_emb_fwd(cache_k, infer_state.position_cos, infer_state.position_sin) # v (bs, H) torch.mm(input_embs.view(-1, base_layer_infer.embed_dim_), base_layer_weight.v_weight_, out=cache_v.view(-1, base_model.tp_k_head_num_ * base_model.head_dim_)) if not no_lora_compute: # mark_start("get_v") delta_vA = self.delta[2] dispatch_bgmv(delta_vA, input_embs.view(-1, base_layer_infer.embed_dim_), self.key_buffer[layer_id], self.infer_adapter.a_start, self.infer_adapter.a_len, self.infer_adapter.a_loc, self.req_bins, 2, self.infer_adapter.a_scaling) dispatch_bgmv(cache_v.view(-1, base_model.tp_k_head_num_ * base_model.head_dim_), delta_vA, self.value_buffer[layer_id], self.infer_adapter.a_start, self.infer_adapter.a_len, self.infer_adapter.a_loc, self.req_bins, 2, self.infer_adapter.a_scaling) # delta_vA = None # mark_end("get_v") return q def _lora_get_qkv(self, layer_id, input_embs, cache_k, cache_v, infer_state, no_lora_compute=False)->torch.Tensor: base_model = self.base_model base_layer_weight = base_model.trans_layers_weight[layer_id] base_layer_infer = base_model.layers_infer[layer_id] # q (S, H) q = torch.mm(input_embs.view(-1, base_layer_infer.embed_dim_), base_layer_weight.q_weight_) assert(len(q)==len(self.batch_req_bins)) # q = q_base + input * A * B * scaling # input: (S, H) A: (H, R) B: (R, H) if not no_lora_compute: # fix me: @TODO we need to filter out requests querying only base model delta_qA = self.delta[0] if self.max_b_seq_len >= 200 and self.max_lora_dim >= 64 and len(infer_state.b_seq_len) >= 2: # if 1 == 0: lora_get_qkvo_fwd_shrink(input_embs.view(-1, base_layer_infer.embed_dim_), self.key_buffer[layer_id].view(-1, self.kv_embed_dim), delta_qA, self.infer_adapter.a_loc, self.infer_adapter.a_start, self.infer_adapter.a_len, infer_state.b_start_loc, infer_state.b_seq_len, self.req_bins, base_layer_infer.embed_dim_, 0, self.max_lora_dim, self.max_b_seq_len)
class LoraUnorderedBatchInfer: def __init__(self, base_model, adapters, infer_adapter=None): self.base_model = base_model lora_layer_dim = [adapter.r if adapter is not None else 0 for adapter in adapters] self.max_lora_dim = max(lora_layer_dim) self.req_bins = torch.zeros(len(adapters), dtype=torch.long, device="cuda") if infer_adapter is not None: self.infer_adapter = infer_adapter if isinstance(infer_adapter, NaiveInferAdapter): self.key_buffer = infer_adapter.key_buffer self.value_buffer = infer_adapter.value_buffer else: self.key_buffer = infer_adapter.mem_manager.key_buffer self.value_buffer = infer_adapter.mem_manager.value_buffer for i, adapter in enumerate(adapters): # FIX ME @TODO: currently not supporting adapter is None if adapter is None: continue idx = infer_adapter.adapter_dirs.index(adapter.lora_dir) self.req_bins[i] = idx self.kv_embed_dim = base_model.tp_k_head_num_ * base_model.head_dim_ @torch.no_grad() def forward( self, batch_size, # number of request total_token_num, max_len_in_batch, input_ids, # 1D input tensor b_loc, # mapping to memory pool b_start_loc, # the start index of each request b_seq_len, # the current length of each request is_prefill=True, use_bmm=True, no_lora_compute=False, no_lora_copy=False): # Notice that batch_lora only support decoding assert len(b_loc) == len(b_start_loc) == len(b_seq_len) self.delta = [] self.max_b_seq_len = torch.max(b_seq_len).item() if is_prefill: assert(len(self.req_bins)==len(b_seq_len)) self.batch_req_bins = torch.repeat_interleave(self.req_bins, b_seq_len) # self.b_start_loc = torch.cumsum(torch.cat([torch.tensor([0], dtype=torch.long, device="cuda"), b_seq_len[:-1]]), dim=0) for _ in range(3): self.delta.append(torch.zeros((len(self.batch_req_bins), self.max_lora_dim), dtype=torch.float16, device="cuda")) return self._prefill(batch_size, total_token_num, max_len_in_batch, input_ids, b_loc, b_start_loc, b_seq_len, no_lora_compute) else: for _ in range(3): self.delta.append(torch.zeros((len(b_seq_len), self.max_lora_dim), dtype=torch.float16, device="cuda")) return self._decode(batch_size, total_token_num, max_len_in_batch, input_ids, b_loc, b_start_loc, b_seq_len, no_lora_compute, no_lora_copy) def _prefill(self, batch_size, total_token_num, max_len_in_batch, input_ids, b_loc, b_start_loc, b_seq_len, no_lora_compute=False): infer_state = self.base_model.infer_state_class() infer_state.is_prefill = True infer_state.batch_size = batch_size infer_state.total_token_num = total_token_num infer_state.max_len_in_batch = max_len_in_batch assert (input_ids.shape[0] == total_token_num) assert (b_loc.shape[0] == b_start_loc.shape[0] == b_seq_len.shape[0]) b_seq_len_numpy = b_seq_len.cpu().numpy() position_ids = torch.from_numpy(np.concatenate([np.arange(0, b_seq_len_numpy[i]) for i in range(len(b_seq_len_numpy))], axis=0)).cuda() infer_state.position_cos = torch.index_select( self.base_model._cos_cached, 0, position_ids).view(position_ids.shape[0], -1) infer_state.position_sin = torch.index_select( self.base_model._sin_cached, 0, position_ids).view(position_ids.shape[0], -1) position_ids = None infer_state.b_loc = b_loc infer_state.b_start_loc = b_start_loc infer_state.b_seq_len = b_seq_len infer_state.mem_manager = self.base_model.mem_manager infer_state.prefill_mem_index = self.base_model.mem_manager.alloc(infer_state.total_token_num) infer_state.prefill_key_buffer = torch.empty( (infer_state.total_token_num, self.base_model.tp_k_head_num_, self.base_model.head_dim_), dtype=torch.float16, device="cuda") infer_state.prefill_value_buffer = torch.empty( (infer_state.total_token_num, self.base_model.tp_k_head_num_, self.base_model.head_dim_), dtype=torch.float16, device="cuda") init_bloc(b_loc, b_seq_len, max_len_in_batch, infer_state.prefill_mem_index) predict_logics = self._context_forward(input_ids, infer_state, no_lora_compute) return predict_logics def _decode(self, batch_size, total_token_num, max_len_in_batch, input_ids, b_loc, b_start_loc, b_seq_len, no_lora_compute=False, no_lora_copy=False): infer_state = self.base_model.infer_state_class() infer_state.is_prefill = False infer_state.batch_size = batch_size infer_state.total_token_num = total_token_num infer_state.max_len_in_batch = max_len_in_batch assert (b_loc.shape[0] == b_start_loc.shape[0] == b_seq_len.shape[0]) infer_state.b_loc = b_loc infer_state.b_start_loc = b_start_loc infer_state.b_seq_len = b_seq_len infer_state.mem_manager = self.base_model.mem_manager alloc_mem = self.base_model.mem_manager.alloc_contiguous(batch_size) if alloc_mem is not None: infer_state.decode_is_contiguous = True infer_state.decode_mem_index = alloc_mem[0] infer_state.decode_mem_start = alloc_mem[1] infer_state.decode_mem_end = alloc_mem[2] b_loc[:, max_len_in_batch - 1] = infer_state.decode_mem_index else: infer_state.decode_is_contiguous = False alloc_mem = self.base_model.mem_manager.alloc(batch_size) infer_state.decode_mem_index = alloc_mem infer_state.decode_key_buffer = torch.empty( (batch_size, self.base_model.tp_k_head_num_, self.base_model.head_dim_), dtype=torch.float16, device="cuda") infer_state.decode_value_buffer = torch.empty( (batch_size, self.base_model.tp_k_head_num_, self.base_model.head_dim_), dtype=torch.float16, device="cuda") b_loc[:, max_len_in_batch - 1] = infer_state.decode_mem_index infer_state.init_some_extra_state(self.base_model, batch_size, total_token_num, max_len_in_batch, input_ids, b_loc, b_start_loc, b_seq_len, False) predict_logics = self._token_forward(input_ids, infer_state, no_lora_compute, no_lora_copy) return predict_logics @final def _context_forward(self, input_ids, infer_state, no_lora_compute=False): cuda_input_ids = input_ids input_embs = self.base_model.pre_infer.context_forward( cuda_input_ids, infer_state, self.base_model.pre_post_weight) for i in range(self.base_model.layers_num): input_embs = self._lora_context_forward(i, input_embs, infer_state, no_lora_compute) predict_logics = self.base_model.post_infer.token_forward( input_embs, infer_state, self.base_model.pre_post_weight, return_logics=True) return predict_logics @final def _token_forward(self, input_ids, infer_state, no_lora_compute=False, no_lora_copy=False): cuda_input_ids = input_ids input_embs = self.base_model.pre_infer.token_forward( cuda_input_ids, infer_state, self.base_model.pre_post_weight) for i in range(self.base_model.layers_num): input_embs = self._lora_token_forward(i, input_embs, infer_state, no_lora_compute, no_lora_copy) predict_logics = self.base_model.post_infer.token_forward( input_embs, infer_state, self.base_model.pre_post_weight, return_logics=True) return predict_logics @final def _lora_context_forward(self, layer_id, input_embs, infer_state, no_lora_compute=False): self._lora_context_attention(layer_id, input_embs, infer_state, no_lora_compute) layer_weight = self.base_model.trans_layers_weight[layer_id] layer_infer = self.base_model.layers_infer[layer_id] layer_infer._context_ffn(input_embs, infer_state, layer_weight) return input_embs @final # @calculate_time(show=True, min_cost_ms=0) def _lora_token_forward(self, layer_id, input_embs, infer_state, no_lora_compute=False, no_lora_copy=False): self._lora_token_attention(layer_id, input_embs, infer_state, no_lora_compute, no_lora_copy) layer_weight = self.base_model.trans_layers_weight[layer_id] layer_infer = self.base_model.layers_infer[layer_id] # mark_start("token_ffn") layer_infer._token_ffn(input_embs, infer_state, layer_weight) # mark_end("token_ffn") return input_embs # @mark_cost_time("trans context flash forward time cost") # dont to remove this, will make performence down, did not know why def _lora_context_attention(self, layer_id, input_embs, infer_state, no_lora_compute=False): layer_weight = self.base_model.trans_layers_weight[layer_id] layer_infer = self.base_model.layers_infer[layer_id] # layer normalization input1 = layer_infer._att_norm(input_embs, infer_state, layer_weight) # fetch k, v cache_k, cache_v = layer_infer._pre_cache_kv(infer_state, layer_weight) # gen new q, k, v (batch different adapters) q = self._lora_get_qkv(layer_id, input1, cache_k, cache_v, infer_state, no_lora_compute) input1 = None layer_infer._post_cache_kv(cache_k, cache_v, infer_state, layer_weight) # compute attention o = layer_infer._context_attention_kernel(q, cache_k, cache_v, infer_state, layer_weight) q = None o = self._lora_get_o(layer_id, o, infer_state, no_lora_compute) # if self.world_size_ > 1: # dist.all_reduce(o, op=dist.ReduceOp.SUM, async_op=False) # residual input_embs.add_(o.view(-1, layer_infer.embed_dim_)) return # @calculate_time(show=True, min_cost_ms=0) # this impl dont to use @mark_cost_time def _lora_token_attention(self, layer_id, input_embs, infer_state, no_lora_compute=False, no_lora_copy=False): layer_weight = self.base_model.trans_layers_weight[layer_id] layer_infer = self.base_model.layers_infer[layer_id] # layer normalization input1 = layer_infer._att_norm(input_embs, infer_state, layer_weight) # fetch k, v cache_k, cache_v = layer_infer._pre_cache_kv(infer_state, layer_weight) # gen new q, k, v (batch different adapters) q = self._batch_lora_get_qkv(layer_id, input1, cache_k, cache_v, infer_state, no_lora_compute, no_lora_copy) input1 = None layer_infer._post_cache_kv(cache_k, cache_v, infer_state, layer_weight) # compute attention o = layer_infer._token_attention_kernel(q, infer_state, layer_weight) q = None o = self._batch_lora_get_o(layer_id, o, infer_state, no_lora_compute) # if self.world_size_ > 1: # dist.all_reduce(o, op=dist.ReduceOp.SUM, async_op=False) input_embs.add_(o.view(-1, layer_infer.embed_dim_)) return # @calculate_time(show=True, min_cost_ms=0) def _batch_lora_get_qkv(self, layer_id, input_embs, cache_k, cache_v, infer_state, no_lora_compute=False, no_lora_copy=False)->torch.Tensor: base_model = self.base_model base_layer_weight = base_model.trans_layers_weight[layer_id] base_layer_infer = base_model.layers_infer[layer_id] # q (bs, H) q = torch.mm(input_embs.view(-1, base_layer_infer.embed_dim_), base_layer_weight.q_weight_) # @TODO: fix me, filter requests querying only base model assert(len(q)==len(self.req_bins)) if not no_lora_compute: # mark_start("get_q") delta_qA = self.delta[0] dispatch_bgmv(delta_qA, input_embs.view(-1, base_layer_infer.embed_dim_), self.key_buffer[layer_id], self.infer_adapter.a_start, self.infer_adapter.a_len, self.infer_adapter.a_loc, self.req_bins, 0, self.infer_adapter.a_scaling) dispatch_bgmv(q, delta_qA, self.value_buffer[layer_id], self.infer_adapter.a_start, self.infer_adapter.a_len, self.infer_adapter.a_loc, self.req_bins, 0, self.infer_adapter.a_scaling) # delta_qA = None # mark_end("get_q") rotary_emb_fwd(q.view(-1, base_layer_infer.tp_q_head_num_, base_model.head_dim_), infer_state.position_cos, infer_state.position_sin) # k (bs, H) torch.mm(input_embs.view(-1, base_layer_infer.embed_dim_), base_layer_weight.k_weight_, out=cache_k.view(-1, base_model.tp_k_head_num_ * base_model.head_dim_)) if not no_lora_compute: # mark_start("get_k") delta_kA = self.delta[1] dispatch_bgmv(delta_kA, input_embs.view(-1, base_layer_infer.embed_dim_), self.key_buffer[layer_id], self.infer_adapter.a_start, self.infer_adapter.a_len, self.infer_adapter.a_loc, self.req_bins, 1, self.infer_adapter.a_scaling) dispatch_bgmv(cache_k.view(-1, base_model.tp_k_head_num_ * base_model.head_dim_), delta_kA, self.value_buffer[layer_id], self.infer_adapter.a_start, self.infer_adapter.a_len, self.infer_adapter.a_loc, self.req_bins, 1, self.infer_adapter.a_scaling) # delta_kA = None # mark_end("get_k") rotary_emb_fwd(cache_k, infer_state.position_cos, infer_state.position_sin) # v (bs, H) torch.mm(input_embs.view(-1, base_layer_infer.embed_dim_), base_layer_weight.v_weight_, out=cache_v.view(-1, base_model.tp_k_head_num_ * base_model.head_dim_)) if not no_lora_compute: # mark_start("get_v") delta_vA = self.delta[2] dispatch_bgmv(delta_vA, input_embs.view(-1, base_layer_infer.embed_dim_), self.key_buffer[layer_id], self.infer_adapter.a_start, self.infer_adapter.a_len, self.infer_adapter.a_loc, self.req_bins, 2, self.infer_adapter.a_scaling) dispatch_bgmv(cache_v.view(-1, base_model.tp_k_head_num_ * base_model.head_dim_), delta_vA, self.value_buffer[layer_id], self.infer_adapter.a_start, self.infer_adapter.a_len, self.infer_adapter.a_loc, self.req_bins, 2, self.infer_adapter.a_scaling) # delta_vA = None # mark_end("get_v") return q def _lora_get_qkv(self, layer_id, input_embs, cache_k, cache_v, infer_state, no_lora_compute=False)->torch.Tensor: base_model = self.base_model base_layer_weight = base_model.trans_layers_weight[layer_id] base_layer_infer = base_model.layers_infer[layer_id] # q (S, H) q = torch.mm(input_embs.view(-1, base_layer_infer.embed_dim_), base_layer_weight.q_weight_) assert(len(q)==len(self.batch_req_bins)) # q = q_base + input * A * B * scaling # input: (S, H) A: (H, R) B: (R, H) if not no_lora_compute: # fix me: @TODO we need to filter out requests querying only base model delta_qA = self.delta[0] if self.max_b_seq_len >= 200 and self.max_lora_dim >= 64 and len(infer_state.b_seq_len) >= 2: # if 1 == 0: lora_get_qkvo_fwd_shrink(input_embs.view(-1, base_layer_infer.embed_dim_), self.key_buffer[layer_id].view(-1, self.kv_embed_dim), delta_qA, self.infer_adapter.a_loc, self.infer_adapter.a_start, self.infer_adapter.a_len, infer_state.b_start_loc, infer_state.b_seq_len, self.req_bins, base_layer_infer.embed_dim_, 0, self.max_lora_dim, self.max_b_seq_len)
lora_get_qkvo_fwd_expand(delta_qA, self.value_buffer[layer_id].view(-1, self.kv_embed_dim),
1
2023-10-16 02:39:47+00:00
12k
MobileLLM/AutoDroid
droidbot/input_policy.py
[ { "identifier": "UTG", "path": "droidbot/utg.py", "snippet": "class UTG(object):\n \"\"\"\n UI transition graph\n \"\"\"\n\n def __init__(self, device, app, random_input):\n self.logger = logging.getLogger(self.__class__.__name__)\n self.device = device\n self.app = app\n self.random_input = random_input\n\n self.G = nx.DiGraph()\n self.G2 = nx.DiGraph() # graph with same-structure states clustered\n\n self.transitions = []\n self.effective_event_strs = set()\n self.ineffective_event_strs = set()\n self.explored_state_strs = set()\n self.reached_state_strs = set()\n self.reached_activities = set()\n\n self.first_state = None\n self.last_state = None\n\n self.start_time = datetime.datetime.now()\n\n @property\n def first_state_str(self):\n return self.first_state.state_str if self.first_state else None\n\n @property\n def last_state_str(self):\n return self.last_state.state_str if self.last_state else None\n\n @property\n def effective_event_count(self):\n return len(self.effective_event_strs)\n\n @property\n def num_transitions(self):\n return len(self.transitions)\n\n def add_transition(self, event, old_state, new_state):\n self.add_node(old_state)\n self.add_node(new_state)\n\n # make sure the states are not None\n if not old_state or not new_state:\n return\n\n event_str = event.get_event_str(old_state)\n self.transitions.append((old_state, event, new_state))\n\n if old_state.state_str == new_state.state_str:\n self.ineffective_event_strs.add(event_str)\n # delete the transitions including the event from utg\n for new_state_str in self.G[old_state.state_str]:\n if event_str in self.G[old_state.state_str][new_state_str][\"events\"]:\n self.G[old_state.state_str][new_state_str][\"events\"].pop(event_str)\n if event_str in self.effective_event_strs:\n self.effective_event_strs.remove(event_str)\n return\n\n self.effective_event_strs.add(event_str)\n\n if (old_state.state_str, new_state.state_str) not in self.G.edges():\n self.G.add_edge(old_state.state_str, new_state.state_str, events={})\n self.G[old_state.state_str][new_state.state_str][\"events\"][event_str] = {\n \"event\": event,\n \"id\": self.effective_event_count\n }\n\n if (old_state.structure_str, new_state.structure_str) not in self.G2.edges():\n self.G2.add_edge(old_state.structure_str, new_state.structure_str, events={})\n self.G2[old_state.structure_str][new_state.structure_str][\"events\"][event_str] = {\n \"event\": event,\n \"id\": self.effective_event_count\n }\n\n self.last_state = new_state\n self.__output_utg()\n\n def remove_transition(self, event, old_state, new_state):\n event_str = event.get_event_str(old_state)\n if (old_state.state_str, new_state.state_str) in self.G.edges():\n events = self.G[old_state.state_str][new_state.state_str][\"events\"]\n if event_str in events.keys():\n events.pop(event_str)\n if len(events) == 0:\n self.G.remove_edge(old_state.state_str, new_state.state_str)\n if (old_state.structure_str, new_state.structure_str) in self.G2.edges():\n events = self.G2[old_state.structure_str][new_state.structure_str][\"events\"]\n if event_str in events.keys():\n events.pop(event_str)\n if len(events) == 0:\n self.G2.remove_edge(old_state.structure_str, new_state.structure_str)\n\n def add_node(self, state):\n if not state:\n return\n if state.state_str not in self.G.nodes():\n state.save2dir()\n self.G.add_node(state.state_str, state=state)\n if self.first_state is None:\n self.first_state = state\n\n if state.structure_str not in self.G2.nodes():\n self.G2.add_node(state.structure_str, states=[])\n self.G2.nodes[state.structure_str]['states'].append(state)\n\n if state.foreground_activity.startswith(self.app.package_name):\n self.reached_activities.add(state.foreground_activity)\n\n def __output_utg(self):\n \"\"\"\n Output current UTG to a js file\n \"\"\"\n if not self.device.output_dir:\n return\n\n def list_to_html_table(dict_data):\n table = \"<table class=\\\"table\\\">\\n\"\n for (key, value) in dict_data:\n table += \"<tr><th>%s</th><td>%s</td></tr>\\n\" % (key, value)\n table += \"</table>\"\n return table\n\n utg_file_path = os.path.join(self.device.output_dir, \"utg.js\")\n utg_file = open(utg_file_path, \"w\")\n utg_nodes = []\n utg_edges = []\n for state_str in self.G.nodes():\n state = self.G.nodes[state_str][\"state\"]\n package_name = state.foreground_activity.split(\"/\")[0]\n activity_name = state.foreground_activity.split(\"/\")[1]\n short_activity_name = activity_name.split(\".\")[-1]\n\n state_desc = list_to_html_table([\n (\"package\", package_name),\n (\"activity\", activity_name),\n (\"state_str\", state.state_str),\n (\"structure_str\", state.structure_str)\n ])\n\n utg_node = {\n \"id\": state_str,\n \"shape\": \"image\",\n \"image\": os.path.relpath(state.screenshot_path, self.device.output_dir),\n \"label\": short_activity_name,\n # \"group\": state.foreground_activity,\n \"package\": package_name,\n \"activity\": activity_name,\n \"state_str\": state_str,\n \"structure_str\": state.structure_str,\n \"title\": state_desc,\n \"content\": \"\\n\".join([package_name, activity_name, state.state_str, state.search_content])\n }\n\n if state.state_str == self.first_state_str:\n utg_node[\"label\"] += \"\\n<FIRST>\"\n utg_node[\"font\"] = \"14px Arial red\"\n if state.state_str == self.last_state_str:\n utg_node[\"label\"] += \"\\n<LAST>\"\n utg_node[\"font\"] = \"14px Arial red\"\n\n utg_nodes.append(utg_node)\n\n for state_transition in self.G.edges():\n from_state = state_transition[0]\n to_state = state_transition[1]\n\n events = self.G[from_state][to_state][\"events\"]\n event_short_descs = []\n event_list = []\n\n for event_str, event_info in sorted(iter(events.items()), key=lambda x: x[1][\"id\"]):\n event_short_descs.append((event_info[\"id\"], event_str))\n if self.device.adapters[self.device.minicap]:\n view_images = [\"views/view_\" + view[\"view_str\"] + \".jpg\"\n for view in event_info[\"event\"].get_views()]\n else:\n view_images = [\"views/view_\" + view[\"view_str\"] + \".png\"\n for view in event_info[\"event\"].get_views()]\n event_list.append({\n \"event_str\": event_str,\n \"event_id\": event_info[\"id\"],\n \"event_type\": event_info[\"event\"].event_type,\n \"view_images\": view_images\n })\n\n utg_edge = {\n \"from\": from_state,\n \"to\": to_state,\n \"id\": from_state + \"-->\" + to_state,\n \"title\": list_to_html_table(event_short_descs),\n \"label\": \", \".join([str(x[\"event_id\"]) for x in event_list]),\n \"events\": event_list\n }\n\n # # Highlight last transition\n # if state_transition == self.last_transition:\n # utg_edge[\"color\"] = \"red\"\n\n utg_edges.append(utg_edge)\n\n utg = {\n \"nodes\": utg_nodes,\n \"edges\": utg_edges,\n\n \"num_nodes\": len(utg_nodes),\n \"num_edges\": len(utg_edges),\n \"num_effective_events\": len(self.effective_event_strs),\n \"num_reached_activities\": len(self.reached_activities),\n \"test_date\": self.start_time.strftime(\"%Y-%m-%d %H:%M:%S\"),\n \"time_spent\": (datetime.datetime.now() - self.start_time).total_seconds(),\n \"num_transitions\": self.num_transitions,\n\n \"device_serial\": self.device.serial,\n \"device_model_number\": self.device.get_model_number(),\n \"device_sdk_version\": self.device.get_sdk_version(),\n\n \"app_sha256\": self.app.hashes[2],\n \"app_package\": self.app.package_name,\n \"app_main_activity\": self.app.main_activity,\n \"app_num_total_activities\": len(self.app.activities),\n }\n\n utg_json = json.dumps(utg, indent=2)\n utg_file.write(\"var utg = \\n\")\n utg_file.write(utg_json)\n utg_file.close()\n\n def is_event_explored(self, event, state):\n event_str = event.get_event_str(state)\n return event_str in self.effective_event_strs or event_str in self.ineffective_event_strs\n\n def is_state_explored(self, state):\n if state.state_str in self.explored_state_strs:\n return True\n for possible_event in state.get_possible_input():\n if not self.is_event_explored(possible_event, state):\n return False\n self.explored_state_strs.add(state.state_str)\n return True\n\n def is_state_reached(self, state):\n if state.state_str in self.reached_state_strs:\n return True\n self.reached_state_strs.add(state.state_str)\n return False\n\n def get_reachable_states(self, current_state):\n reachable_states = []\n for target_state_str in nx.descendants(self.G, current_state.state_str):\n target_state = self.G.nodes[target_state_str][\"state\"]\n reachable_states.append(target_state)\n return reachable_states\n\n def get_navigation_steps(self, from_state, to_state):\n if from_state is None or to_state is None:\n return None\n try:\n steps = []\n from_state_str = from_state.state_str\n to_state_str = to_state.state_str\n state_strs = nx.shortest_path(G=self.G, source=from_state_str, target=to_state_str)\n if not isinstance(state_strs, list) or len(state_strs) < 2:\n self.logger.warning(f\"Error getting path from {from_state_str} to {to_state_str}\")\n start_state_str = state_strs[0]\n for state_str in state_strs[1:]:\n edge = self.G[start_state_str][state_str]\n edge_event_strs = list(edge[\"events\"].keys())\n if self.random_input:\n random.shuffle(edge_event_strs)\n start_state = self.G.nodes[start_state_str]['state']\n event = edge[\"events\"][edge_event_strs[0]][\"event\"]\n steps.append((start_state, event))\n start_state_str = state_str\n return steps\n except Exception as e:\n print(e)\n self.logger.warning(f\"Cannot find a path from {from_state.state_str} to {to_state.state_str}\")\n return None\n\n # def get_simplified_nav_steps(self, from_state, to_state):\n # nav_steps = self.get_navigation_steps(from_state, to_state)\n # if nav_steps is None:\n # return None\n # simple_nav_steps = []\n # last_state, last_action = nav_steps[-1]\n # for state, action in nav_steps:\n # if state.structure_str == last_state.structure_str:\n # simple_nav_steps.append((state, last_action))\n # break\n # simple_nav_steps.append((state, action))\n # return simple_nav_steps\n\n def get_G2_nav_steps(self, from_state, to_state):\n if from_state is None or to_state is None:\n return None\n from_state_str = from_state.structure_str\n to_state_str = to_state.structure_str\n try:\n nav_steps = []\n state_strs = nx.shortest_path(G=self.G2, source=from_state_str, target=to_state_str)\n if not isinstance(state_strs, list) or len(state_strs) < 2:\n return None\n start_state_str = state_strs[0]\n for state_str in state_strs[1:]:\n edge = self.G2[start_state_str][state_str]\n edge_event_strs = list(edge[\"events\"].keys())\n start_state = random.choice(self.G2.nodes[start_state_str]['states'])\n event_str = random.choice(edge_event_strs)\n event = edge[\"events\"][event_str][\"event\"]\n nav_steps.append((start_state, event))\n start_state_str = state_str\n if nav_steps is None:\n return None\n # return nav_steps\n # simplify the path\n simple_nav_steps = []\n last_state, last_action = nav_steps[-1]\n for state, action in nav_steps:\n if state.structure_str == last_state.structure_str:\n simple_nav_steps.append((state, last_action))\n break\n simple_nav_steps.append((state, action))\n return simple_nav_steps\n except Exception as e:\n print(e)\n return None" }, { "identifier": "ScrollEvent", "path": "droidbot/input_event.py", "snippet": "class ScrollEvent(UIEvent):\n \"\"\"\n swipe gesture\n \"\"\"\n\n def __init__(self, x=None, y=None, view=None, direction=\"DOWN\", event_dict=None):\n super().__init__(view)\n self.event_type = KEY_ScrollEvent\n self.x = x\n self.y = y\n self.view = view\n self.direction = direction\n\n if event_dict is not None:\n self.__dict__.update(event_dict)\n\n @staticmethod\n def get_random_instance(device, app):\n x = random.uniform(0, device.get_width())\n y = random.uniform(0, device.get_height())\n direction = random.choice([\"UP\", \"DOWN\", \"LEFT\", \"RIGHT\"])\n return ScrollEvent(x, y, direction)\n\n def send(self, device):\n if self.view is not None:\n from .device_state import DeviceState\n width = DeviceState.get_view_width(view_dict=self.view)\n height = DeviceState.get_view_height(view_dict=self.view)\n else:\n width = device.get_width()\n height = device.get_height()\n\n x, y = UIEvent.get_xy(x=self.x, y=self.y, view=self.view)\n if not x or not y:\n # If no view and no coordinate specified, use the screen center coordinate\n x = width / 2\n y = height / 2\n\n start_x, start_y = x, y\n end_x, end_y = x, y\n duration = 500\n\n drag_length = 3/10\n # bias = 5/11\n\n # if self.direction == \"UP\":\n # start_y -= height * 2 / 5\n # end_y += height * 2 / 5\n # elif self.direction == \"DOWN\":\n # start_y += height * 2 / 5\n # end_y -= height * 2 / 5\n # elif self.direction == \"LEFT\":\n # start_x -= width * 2 / 5\n # end_x += width * 2 / 5\n # elif self.direction == \"RIGHT\":\n # start_x += width * 2 / 5\n # end_x -= width * 2 / 5\n\n if self.direction == \"UP\":\n start_y -= height * drag_length\n end_y += height * drag_length\n # do not drag from the center to avoid mis-touch\n # start_x += width * bias\n # end_x += width * bias\n # print(height, start_y, end_y, start_x, end_x, width)\n elif self.direction == \"DOWN\":\n start_y += height * drag_length\n end_y -= height * drag_length\n # do not drag from the center to avoid mis-touch\n # start_x += width * bias\n # end_x += width * bias\n # print(height, start_y, end_y)\n elif self.direction == \"LEFT\":\n start_x -= width * drag_length\n end_x += width * drag_length\n elif self.direction == \"RIGHT\":\n start_x += width * drag_length\n end_x -= width * drag_length\n '''\n this has been used for special case for calendar application. You can change 200 due to other special cases\n '''\n if abs(end_y - start_y) >= 200:\n device.view_drag((start_x, start_y), (end_x, end_y), duration)\n return True\n\n def get_event_str(self, state):\n if self.view is not None:\n return \\\n f\"{self.__class__.__name__}({UIEvent.view_str(state, self.view)}, direction={self.direction})\"\n elif self.x is not None and self.y is not None:\n return \"%s(state=%s, x=%s, y=%s, direction=%s)\" %\\\n (self.__class__.__name__, state.state_str, self.x, self.y, self.direction)\n else:\n return \"%s(state=%s, direction=%s)\" % \\\n (self.__class__.__name__, state.state_str, self.direction)\n\n def get_views(self):\n return [self.view] if self.view else []" }, { "identifier": "prompt_llm_with_history", "path": "query_lmql.py", "snippet": "@lmql.query(model=model,decoder='argmax')\ndef prompt_llm_with_history(task,history,ui_desc,ids):\n '''lmql\n \"\"\"You are a smartphone assistant to help users complete tasks by interacting with mobile apps.Given a task, the previous UI actions, and the content of current UI state, your job is to decide whether the task is already finished by the previous actions, and if not, decide which UI element in current UI state should be interacted.\n Task:{task}\n Previous UI actions: {history}\n Current UI State:{ui_desc}\n Your answer should always use the following format:1. Completing this task on a smartphone usually involves these steps: <?>.\\n2. Analyses of the relations between the task and the previous UI actions and current UI state: <?>.\\n3. Based on the previous actions, is the task already finished? <Y/N>. The next step should be <?/None>.\\n4. Can the task be proceeded with the current UI state? <Y/N>. Fill in the blanks about the next one interaction: - id=<id number> - action=<tap/input> - input text=<text or N/A>\n - id=[ID] - action=[ACTION] - input text=[INPUT_TEXT]. \"\"\" where ACTION in [\"tap\", \"input\", \"N/A\"] and ID in {ids} and len(TOKENS(INPUT_TEXT))<6\n\n return ID,ACTION,INPUT_TEXT\n '''" } ]
import sys import json import re import logging import random import yaml import copy import requests import ast import time import tools import pdb import os import traceback import time import time import os import time import numpy as np from abc import abstractmethod from .input_event import * from .utg import UTG from .input_event import ScrollEvent from query_lmql import prompt_llm_with_history from xmlrpc.client import ServerProxy from xmlrpclib import ServerProxy from InstructorEmbedding import INSTRUCTOR from sklearn.metrics.pairwise import cosine_similarity
8,152
yaml.dump(data, f) def _make_prompt_lmql(self, state_prompt, action_history, is_text, state_str, view_text=None, thought_history=None, use_thoughts=False): if self.use_memory: # if isinstance(state_str, list): # if len(state_str) == 1: # state_str = state_str[0] # else: # state_str = self.memory.hash_state(state_prompt) # new_state_prompt = self.f(action_history, state_prompt, state_str) # if new_state_prompt !z= None and new_state_prompt != 'no_description': # state_prompt = new_state_prompt if len(action_history) <= len(self.similar_ele_path): current_ui_id = len(action_history) - 1 new_state_prompt = tools.insert_onclick_into_prompt(state_prompt, self.similar_ele_path[current_ui_id], self.similar_ele_function) if new_state_prompt != state_prompt: # current state contains an element of insight self.state_ele_memory[state_str] = new_state_prompt state_prompt = new_state_prompt # elif state_str in self.state_ele_memory.keys(): # state_prompt = self.state_ele_memory[state_str] if use_thoughts: history_with_thought = [] for idx in range(len(action_history)): history_with_thought.append(action_history[idx] + ' Reason: ' + thought_history[idx]) else: history_with_thought = action_history return '\n'.join(history_with_thought),state_prompt def _make_prompt(self, state_prompt, action_history, is_text, state_str, view_text=None, thought_history=None, use_thoughts=False): if self.use_memory: # if isinstance(state_str, list): # if len(state_str) == 1: # state_str = state_str[0] # else: # state_str = self.memory.hash_state(state_prompt) # new_state_prompt = self.f(action_history, state_prompt, state_str) # if new_state_prompt !z= None and new_state_prompt != 'no_description': # state_prompt = new_state_prompt if len(action_history) <= len(self.similar_ele_path): current_ui_id = len(action_history) - 1 new_state_prompt = tools.insert_onclick_into_prompt(state_prompt, self.similar_ele_path[current_ui_id], self.similar_ele_function) if new_state_prompt != state_prompt: # current state contains an element of insight self.state_ele_memory[state_str] = new_state_prompt state_prompt = new_state_prompt # elif state_str in self.state_ele_memory.keys(): # state_prompt = self.state_ele_memory[state_str] if use_thoughts: history_with_thought = [] for idx in range(len(action_history)): history_with_thought.append(action_history[idx] + ' Reason: ' + thought_history[idx]) else: history_with_thought = action_history introduction = '''You are a smartphone assistant to help users complete tasks by interacting with mobile apps.Given a task, the previous UI actions, and the content of current UI state, your job is to decide whether the task is already finished by the previous actions, and if not, decide which UI element in current UI state should be interacted.''' task_prompt = 'Task: ' + self.task history_prompt = 'Previous UI actions: \n' + '\n'.join(history_with_thought) full_state_prompt = 'Current UI state: \n' + state_prompt request_prompt = '''Your answer should always use the following format:1. Completing this task on a smartphone usually involves these steps: <?>.\n2. Analyses of the relations between the task and the previous UI actions and current UI state: <?>.\n3. Based on the previous actions, is the task already finished? <Y/N>. The next step should be <?/None>.\n4. Can the task be proceeded with the current UI state? <Y/N>. Fill in the blanks about the next one interaction: - id=<id number> - action=<tap/input> - input text=<text or N/A>''' prompt = introduction + '\n' + task_prompt + '\n' + history_prompt + '\n' + full_state_prompt + '\n' + request_prompt return prompt def _extract_input_text(self, string, start='Text: ', end=' Thought'): start_index = string.find(start) + len(start) # Find the location of 'start' if start_index == -1: start_index = 0 end_index = string.find(end) # Find the location of 'end' substring = string[start_index:end_index] if end_index != -1 else string[start_index:] return substring def _extract_input_textv2(self, string): if string[:11] == 'InputText: ': return string[11:] else: return string def _get_text_view_description(self, view): content_description = safe_dict_get(view, 'content_description', default='') view_text = safe_dict_get(view, 'text', default='') view_desc = f"<input class='&'>#</input>"#.replace('&', view_class)#.replace('#', text) if view_text: view_desc = view_desc.replace('#', view_text) else: view_desc = view_desc.replace('#', '') if content_description: view_desc = view_desc.replace('&', content_description) else: view_desc = view_desc.replace(" class='&'", "") return view_desc def _get_action_from_views_actions(self, action_history, thought_history, views=None, candidate_actions=None, state_strs=None, current_state=None): ''' get action choice from LLM based on a list of views and corresponding actions ''' if current_state: state_prompt, candidate_actions, _, _ = current_state.get_described_actions() state_str = current_state.state_str if USE_LMQL: history, state_prompt = self._make_prompt_lmql(state_prompt, action_history, is_text=False, state_str=state_str, thought_history=thought_history) else: prompt = self._make_prompt(state_prompt, action_history, is_text=False, state_str=state_str, thought_history=thought_history) else: views_with_id = [] for id in range(len(views)): views_with_id.append(tools.insert_id_into_view(views[id], id)) state_prompt = '\n'.join(views_with_id) state_str = tools.hash_string(state_prompt) if USE_LMQL: history, state_prompt = self._make_prompt_lmql(state_prompt, action_history, is_text=False, state_str=state_str, thought_history=thought_history) else: prompt = self._make_prompt(state_prompt, action_history, is_text=False, state_str=state_str, thought_history=thought_history) # ids = [str(idx) for idx, i in enumerate(candidate_actions)] ids = str([i for i in range(len(candidate_actions))]) if USE_LMQL:
# from memory.memory_builder import Memory os.environ["TOKENIZERS_PARALLELISM"] = "false" # Max number of restarts MAX_NUM_RESTARTS = 5 # Max number of steps outside the app MAX_NUM_STEPS_OUTSIDE = 1000 MAX_NUM_STEPS_OUTSIDE_KILL = 1000 # Max number of replay tries MAX_REPLY_TRIES = 5 # Some input event flags EVENT_FLAG_STARTED = "+started" EVENT_FLAG_START_APP = "+start_app" EVENT_FLAG_STOP_APP = "+stop_app" EVENT_FLAG_EXPLORE = "+explore" EVENT_FLAG_NAVIGATE = "+navigate" EVENT_FLAG_TOUCH = "+touch" # Policy taxanomy POLICY_NAIVE_DFS = "dfs_naive" POLICY_GREEDY_DFS = "dfs_greedy" POLICY_NAIVE_BFS = "bfs_naive" POLICY_GREEDY_BFS = "bfs_greedy" POLICY_REPLAY = "replay" POLICY_MANUAL = "manual" POLICY_MONKEY = "monkey" POLICY_TASK = "task" POLICY_NONE = "none" POLICY_MEMORY_GUIDED = "memory_guided" # implemented in input_policy2 FINISHED = "task_completed" MAX_SCROLL_NUM = 7 USE_LMQL = False class InputInterruptedException(Exception): pass def safe_dict_get(view_dict, key, default=None): return_itm = view_dict[key] if (key in view_dict) else default if return_itm == None: return_itm = '' return return_itm class InputPolicy(object): """ This class is responsible for generating events to stimulate more app behaviour It should call AppEventManager.send_event method continuously """ def __init__(self, device, app): self.logger = logging.getLogger(self.__class__.__name__) self.device = device self.app = app self.action_count = 0 self.master = None def start(self, input_manager): """ start producing events :param input_manager: instance of InputManager """ self.action_count = 0 while input_manager.enabled and self.action_count < input_manager.event_count: try: # # make sure the first event is go to HOME screen # # the second event is to start the app # if self.action_count == 0 and self.master is None: # event = KeyEvent(name="HOME") # elif self.action_count == 1 and self.master is None: # event = IntentEvent(self.app.get_start_intent()) if self.action_count == 0 and self.master is None: event = KillAppEvent(app=self.app) else: event = self.generate_event(input_manager) if event == FINISHED: break input_manager.add_event(event) except KeyboardInterrupt: break except InputInterruptedException as e: self.logger.warning("stop sending events: %s" % e) break # except RuntimeError as e: # self.logger.warning(e.message) # break except Exception as e: self.logger.warning("exception during sending events: %s" % e) traceback.print_exc() continue self.action_count += 1 @abstractmethod def generate_event(self, input_manager): """ generate an event @return: """ pass class NoneInputPolicy(InputPolicy): """ do not send any event """ def __init__(self, device, app): super(NoneInputPolicy, self).__init__(device, app) def generate_event(self): """ generate an event @return: """ return None class UtgBasedInputPolicy(InputPolicy): """ state-based input policy """ def __init__(self, device, app, random_input): super(UtgBasedInputPolicy, self).__init__(device, app) self.random_input = random_input self.script = None self.master = None self.script_events = [] self.last_event = None self.last_state = None self.current_state = None self.utg = UTG(device=device, app=app, random_input=random_input) self.script_event_idx = 0 if self.device.humanoid is not None: self.humanoid_view_trees = [] self.humanoid_events = [] def generate_event(self, input_manager): """ generate an event @return: """ # Get current device state self.current_state = self.device.get_current_state() if self.current_state is None: time.sleep(5) return KeyEvent(name="BACK") self.__update_utg() # update last view trees for humanoid if self.device.humanoid is not None: self.humanoid_view_trees = self.humanoid_view_trees + [self.current_state.view_tree] if len(self.humanoid_view_trees) > 4: self.humanoid_view_trees = self.humanoid_view_trees[1:] event = None # if the previous operation is not finished, continue if len(self.script_events) > self.script_event_idx: event = self.script_events[self.script_event_idx].get_transformed_event(self) self.script_event_idx += 1 # First try matching a state defined in the script if event is None and self.script is not None: operation = self.script.get_operation_based_on_state(self.current_state) if operation is not None: self.script_events = operation.events # restart script event = self.script_events[0].get_transformed_event(self) self.script_event_idx = 1 if event is None: old_state, event = self.generate_event_based_on_utg(input_manager) time.sleep(3) # update last events for humanoid if self.device.humanoid is not None: self.humanoid_events = self.humanoid_events + [event] if len(self.humanoid_events) > 3: self.humanoid_events = self.humanoid_events[1:] self.last_state = self.current_state if old_state is None else old_state self.last_event = event return event def __update_utg(self): self.utg.add_transition(self.last_event, self.last_state, self.current_state) @abstractmethod def generate_event_based_on_utg(self, input_manager): """ generate an event based on UTG :return: InputEvent """ pass class UtgNaiveSearchPolicy(UtgBasedInputPolicy): """ depth-first strategy to explore UFG (old) """ def __init__(self, device, app, random_input, search_method): super(UtgNaiveSearchPolicy, self).__init__(device, app, random_input) self.logger = logging.getLogger(self.__class__.__name__) self.explored_views = set() self.state_transitions = set() self.search_method = search_method self.last_event_flag = "" self.last_event_str = None self.last_state = None self.preferred_buttons = ["yes", "ok", "activate", "detail", "more", "access", "allow", "check", "agree", "try", "go", "next"] def generate_event_based_on_utg(self): """ generate an event based on current device state note: ensure these fields are properly maintained in each transaction: last_event_flag, last_touched_view, last_state, exploited_views, state_transitions @return: InputEvent """ self.save_state_transition(self.last_event_str, self.last_state, self.current_state) if self.device.is_foreground(self.app): # the app is in foreground, clear last_event_flag self.last_event_flag = EVENT_FLAG_STARTED else: number_of_starts = self.last_event_flag.count(EVENT_FLAG_START_APP) # If we have tried too many times but the app is still not started, stop DroidBot if number_of_starts > MAX_NUM_RESTARTS: raise InputInterruptedException("The app cannot be started.") # if app is not started, try start it if self.last_event_flag.endswith(EVENT_FLAG_START_APP): # It seems the app stuck at some state, and cannot be started # just pass to let viewclient deal with this case self.logger.info("The app had been restarted %d times.", number_of_starts) self.logger.info("Trying to restart app...") pass else: start_app_intent = self.app.get_start_intent() self.last_event_flag += EVENT_FLAG_START_APP self.last_event_str = EVENT_FLAG_START_APP return IntentEvent(start_app_intent) # select a view to click view_to_touch = self.select_a_view(self.current_state) # if no view can be selected, restart the app if view_to_touch is None: stop_app_intent = self.app.get_stop_intent() self.last_event_flag += EVENT_FLAG_STOP_APP self.last_event_str = EVENT_FLAG_STOP_APP return IntentEvent(stop_app_intent) view_to_touch_str = view_to_touch['view_str'] if view_to_touch_str.startswith('BACK'): result = KeyEvent('BACK') else: result = TouchEvent(view=view_to_touch) self.last_event_flag += EVENT_FLAG_TOUCH self.last_event_str = view_to_touch_str self.save_explored_view(self.current_state, self.last_event_str) return result def select_a_view(self, state): """ select a view in the view list of given state, let droidbot touch it @param state: DeviceState @return: """ views = [] for view in state.views: if view['enabled'] and len(view['children']) == 0: views.append(view) if self.random_input: random.shuffle(views) # add a "BACK" view, consider go back first/last according to search policy mock_view_back = {'view_str': 'BACK_%s' % state.foreground_activity, 'text': 'BACK_%s' % state.foreground_activity} if self.search_method == POLICY_NAIVE_DFS: views.append(mock_view_back) elif self.search_method == POLICY_NAIVE_BFS: views.insert(0, mock_view_back) # first try to find a preferable view for view in views: view_text = view['text'] if view['text'] is not None else '' view_text = view_text.lower().strip() if view_text in self.preferred_buttons \ and (state.foreground_activity, view['view_str']) not in self.explored_views: self.logger.info("selected an preferred view: %s" % view['view_str']) return view # try to find a un-clicked view for view in views: if (state.foreground_activity, view['view_str']) not in self.explored_views: self.logger.info("selected an un-clicked view: %s" % view['view_str']) return view # if all enabled views have been clicked, try jump to another activity by clicking one of state transitions if self.random_input: random.shuffle(views) transition_views = {transition[0] for transition in self.state_transitions} for view in views: if view['view_str'] in transition_views: self.logger.info("selected a transition view: %s" % view['view_str']) return view # no window transition found, just return a random view # view = views[0] # self.logger.info("selected a random view: %s" % view['view_str']) # return view # DroidBot stuck on current state, return None self.logger.info("no view could be selected in state: %s" % state.tag) return None def save_state_transition(self, event_str, old_state, new_state): """ save the state transition @param event_str: str, representing the event cause the transition @param old_state: DeviceState @param new_state: DeviceState @return: """ if event_str is None or old_state is None or new_state is None: return if new_state.is_different_from(old_state): self.state_transitions.add((event_str, old_state.tag, new_state.tag)) def save_explored_view(self, state, view_str): """ save the explored view @param state: DeviceState, where the view located @param view_str: str, representing a view @return: """ if not state: return state_activity = state.foreground_activity self.explored_views.add((state_activity, view_str)) class UtgGreedySearchPolicy(UtgBasedInputPolicy): """ DFS/BFS (according to search_method) strategy to explore UFG (new) """ def __init__(self, device, app, random_input, search_method): super(UtgGreedySearchPolicy, self).__init__(device, app, random_input) self.logger = logging.getLogger(self.__class__.__name__) self.search_method = search_method self.preferred_buttons = ["yes", "ok", "activate", "detail", "more", "access", "allow", "check", "agree", "try", "go", "next"] self.__nav_target = None self.__nav_num_steps = -1 self.__num_restarts = 0 self.__num_steps_outside = 0 self.__event_trace = "" self.__missed_states = set() self.__random_explore = False def generate_event_based_on_utg(self, input_manager): """ generate an event based on current UTG @return: InputEvent """ current_state = self.current_state self.logger.info("Current state: %s" % current_state.state_str) if current_state.state_str in self.__missed_states: self.__missed_states.remove(current_state.state_str) if current_state.get_app_activity_depth(self.app) < 0: # If the app is not in the activity stack start_app_intent = self.app.get_start_intent() # It seems the app stucks at some state, has been # 1) force stopped (START, STOP) # just start the app again by increasing self.__num_restarts # 2) started at least once and cannot be started (START) # pass to let viewclient deal with this case # 3) nothing # a normal start. clear self.__num_restarts. if self.__event_trace.endswith(EVENT_FLAG_START_APP + EVENT_FLAG_STOP_APP) \ or self.__event_trace.endswith(EVENT_FLAG_START_APP): self.__num_restarts += 1 self.logger.info("The app had been restarted %d times.", self.__num_restarts) else: self.__num_restarts = 0 # pass (START) through if not self.__event_trace.endswith(EVENT_FLAG_START_APP): if self.__num_restarts > MAX_NUM_RESTARTS: # If the app had been restarted too many times, enter random mode msg = "The app had been restarted too many times. Entering random mode." self.logger.info(msg) self.__random_explore = True else: # Start the app self.__event_trace += EVENT_FLAG_START_APP self.logger.info("Trying to start the app...") return IntentEvent(intent=start_app_intent) elif current_state.get_app_activity_depth(self.app) > 0: # If the app is in activity stack but is not in foreground self.__num_steps_outside += 1 if self.__num_steps_outside > MAX_NUM_STEPS_OUTSIDE: # If the app has not been in foreground for too long, try to go back if self.__num_steps_outside > MAX_NUM_STEPS_OUTSIDE_KILL: stop_app_intent = self.app.get_stop_intent() go_back_event = IntentEvent(stop_app_intent) else: go_back_event = KeyEvent(name="BACK") self.__event_trace += EVENT_FLAG_NAVIGATE self.logger.info("Going back to the app...") return go_back_event else: # If the app is in foreground self.__num_steps_outside = 0 # Get all possible input events possible_events = current_state.get_possible_input() if self.random_input: random.shuffle(possible_events) if self.search_method == POLICY_GREEDY_DFS: possible_events.append(KeyEvent(name="BACK")) elif self.search_method == POLICY_GREEDY_BFS: possible_events.insert(0, KeyEvent(name="BACK")) # get humanoid result, use the result to sort possible events # including back events if self.device.humanoid is not None: possible_events = self.__sort_inputs_by_humanoid(possible_events) # If there is an unexplored event, try the event first for input_event in possible_events: if not self.utg.is_event_explored(event=input_event, state=current_state): self.logger.info("Trying an unexplored event.") self.__event_trace += EVENT_FLAG_EXPLORE return input_event target_state = self.__get_nav_target(current_state) if target_state: navigation_steps = self.utg.get_navigation_steps(from_state=current_state, to_state=target_state) if navigation_steps and len(navigation_steps) > 0: self.logger.info("Navigating to %s, %d steps left." % (target_state.state_str, len(navigation_steps))) self.__event_trace += EVENT_FLAG_NAVIGATE return navigation_steps[0][1] if self.__random_explore: self.logger.info("Trying random event.") random.shuffle(possible_events) return possible_events[0] # If couldn't find a exploration target, stop the app stop_app_intent = self.app.get_stop_intent() self.logger.info("Cannot find an exploration target. Trying to restart app...") self.__event_trace += EVENT_FLAG_STOP_APP return IntentEvent(intent=stop_app_intent) def __sort_inputs_by_humanoid(self, possible_events): if sys.version.startswith("3"): else: proxy = ServerProxy("http://%s/" % self.device.humanoid) request_json = { "history_view_trees": self.humanoid_view_trees, "history_events": [x.__dict__ for x in self.humanoid_events], "possible_events": [x.__dict__ for x in possible_events], "screen_res": [self.device.display_info["width"], self.device.display_info["height"]] } result = json.loads(proxy.predict(json.dumps(request_json))) new_idx = result["indices"] text = result["text"] new_events = [] # get rid of infinite recursive by randomizing first event if not self.utg.is_state_reached(self.current_state): new_first = random.randint(0, len(new_idx) - 1) new_idx[0], new_idx[new_first] = new_idx[new_first], new_idx[0] for idx in new_idx: if isinstance(possible_events[idx], SetTextEvent): possible_events[idx].text = text new_events.append(possible_events[idx]) return new_events def __get_nav_target(self, current_state): # If last event is a navigation event if self.__nav_target and self.__event_trace.endswith(EVENT_FLAG_NAVIGATE): navigation_steps = self.utg.get_navigation_steps(from_state=current_state, to_state=self.__nav_target) if navigation_steps and 0 < len(navigation_steps) <= self.__nav_num_steps: # If last navigation was successful, use current nav target self.__nav_num_steps = len(navigation_steps) return self.__nav_target else: # If last navigation was failed, add nav target to missing states self.__missed_states.add(self.__nav_target.state_str) reachable_states = self.utg.get_reachable_states(current_state) if self.random_input: random.shuffle(reachable_states) for state in reachable_states: # Only consider foreground states if state.get_app_activity_depth(self.app) != 0: continue # Do not consider missed states if state.state_str in self.__missed_states: continue # Do not consider explored states if self.utg.is_state_explored(state): continue self.__nav_target = state navigation_steps = self.utg.get_navigation_steps(from_state=current_state, to_state=self.__nav_target) if len(navigation_steps) > 0: self.__nav_num_steps = len(navigation_steps) return state self.__nav_target = None self.__nav_num_steps = -1 return None class UtgReplayPolicy(InputPolicy): """ Replay DroidBot output generated by UTG policy """ def __init__(self, device, app, replay_output): super(UtgReplayPolicy, self).__init__(device, app) self.logger = logging.getLogger(self.__class__.__name__) self.replay_output = replay_output event_dir = os.path.join(replay_output, "events") self.event_paths = sorted([os.path.join(event_dir, x) for x in next(os.walk(event_dir))[2] if x.endswith(".json")]) # skip HOME and start app intent self.device = device self.app = app self.event_idx = 2 self.num_replay_tries = 0 self.utg = UTG(device=device, app=app, random_input=None) self.last_event = None self.last_state = None self.current_state = None def generate_event(self): """ generate an event based on replay_output @return: InputEvent """ while self.event_idx < len(self.event_paths) and \ self.num_replay_tries < MAX_REPLY_TRIES: self.num_replay_tries += 1 current_state = self.device.get_current_state() if current_state is None: time.sleep(5) self.num_replay_tries = 0 return KeyEvent(name="BACK") curr_event_idx = self.event_idx self.__update_utg() while curr_event_idx < len(self.event_paths): event_path = self.event_paths[curr_event_idx] with open(event_path, "r") as f: curr_event_idx += 1 try: event_dict = json.load(f) except Exception as e: self.logger.info("Loading %s failed" % event_path) continue if event_dict["start_state"] != current_state.state_str: continue if not self.device.is_foreground(self.app): # if current app is in background, bring it to foreground component = self.app.get_package_name() if self.app.get_main_activity(): component += "/%s" % self.app.get_main_activity() return IntentEvent(Intent(suffix=component)) self.logger.info("Replaying %s" % event_path) self.event_idx = curr_event_idx self.num_replay_tries = 0 # return InputEvent.from_dict(event_dict["event"]) event = InputEvent.from_dict(event_dict["event"]) self.last_state = self.current_state self.last_event = event return event time.sleep(5) # raise InputInterruptedException("No more record can be replayed.") def __update_utg(self): self.utg.add_transition(self.last_event, self.last_state, self.current_state) class ManualPolicy(UtgBasedInputPolicy): """ manually explore UFG """ def __init__(self, device, app): super(ManualPolicy, self).__init__(device, app, False) self.logger = logging.getLogger(self.__class__.__name__) self.__first_event = True def generate_event_based_on_utg(self): """ generate an event based on current UTG @return: InputEvent """ if self.__first_event: self.__first_event = False self.logger.info("Trying to start the app...") start_app_intent = self.app.get_start_intent() return IntentEvent(intent=start_app_intent) else: return ManualEvent() class TaskPolicy(UtgBasedInputPolicy): def __init__(self, device, app, random_input, task, use_memory=True, debug_mode=False): super(TaskPolicy, self).__init__(device, app, random_input) self.logger = logging.getLogger(self.__class__.__name__) self.task = task self.__nav_target = None self.__nav_num_steps = -1 self.__num_restarts = 0 self.__num_steps_outside = 0 self.__event_trace = "" self.__missed_states = set() self.__random_explore = random_input self.__action_history = [] self.__thought_history = [] self.use_memory = use_memory # if use_memory: # self.memory = Memory(app_name=self.app.app_name, app_output_path=self.device.output_dir) if self.use_memory: self.similar_ele_path, self.similar_ele_function, self.similar_ele_statement = self.get_most_similar_element() if not self.similar_ele_function: self.use_memory = False print('=============\nWarning: Did not find the memory of this app, the app memory is disabled\n=============') else: print(f'============\nFound element: {self.similar_ele_statement}\nPath: {self.similar_ele_path}\nFunction: {self.similar_ele_function}\n============') self.state_ele_memory = {} # memorize some important states that contain elements of insight def get_most_similar_element(self): model = INSTRUCTOR('hkunlp/instructor-xl') task_embedding = model.encode('task: ' + self.task).reshape(1, -1) with open('memory/node_filtered_elements.json') as file: ele_statements = json.load(file) with open('memory/element_description.json') as file: ele_functions = json.load(file) with open('memory/embedded_elements_desc.json') as file: embeddings = json.load(file) app_name = self.device.output_dir.split('/')[-1] if app_name not in embeddings.keys(): return None, None, None app_embeddings = embeddings[app_name] # similarities = {} max_similarity, similar_ele_idx = -9999, -9999 for state_str, elements in app_embeddings.items(): # if the target element is in the first ui, no onclick is needed # if ele_statements[app_name][state_str]['path'] == []: # continue # similarities[state_str] = [] for idx, ele in enumerate(elements): if ele: npele = np.array(ele).reshape(1, -1) similarity = cosine_similarity(task_embedding, npele)[0][0] else: similarity = -9999 # similarities[state_str].append(similarity) if similarity > max_similarity: max_similarity = similarity similar_ele_idx = idx similar_state_str = state_str similar_ele = ele_statements[app_name][similar_state_str]['elements'][similar_ele_idx] similar_ele_path = ele_statements[app_name][similar_state_str]['path'] similar_ele_desc = ele_functions[app_name][similar_state_str][similar_ele_idx] del model return similar_ele_path, similar_ele_desc, similar_ele def _scroll_to_top(self, scroller, all_views_for_mark, old_state=None): prefix_scroll_event = [] if old_state is None: old_state = self.current_state for _ in range(MAX_SCROLL_NUM): # first scroll up to the top self.device.send_event(ScrollEvent(view=scroller, direction="UP")) scrolled_state = self.device.get_current_state() self.utg.add_transition(ScrollEvent(view=scroller, direction="UP"), old_state, scrolled_state) old_state = scrolled_state state_prompt, scrolled_candidate_actions, scrolled_views, _ = scrolled_state.get_described_actions() scrolled_new_views = [] # judge whether there is a new view after scrolling for scrolled_view in scrolled_views: if scrolled_view not in all_views_for_mark: scrolled_new_views.append(scrolled_view) all_views_for_mark.append(scrolled_view) if len(scrolled_new_views) == 0: break prefix_scroll_event.append(ScrollEvent(view=scroller, direction="UP")) return prefix_scroll_event def generate_event_based_on_utg(self, input_manager): """ generate an event based on current UTG @return: InputEvent """ current_state = self.current_state self.logger.info("Current state: %s" % current_state.state_str) if current_state.state_str in self.__missed_states: self.__missed_states.remove(current_state.state_str) if current_state.get_app_activity_depth(self.app) < 0: # If the app is not in the activity stack start_app_intent = self.app.get_start_intent() # It seems the app stucks at some state, has been # 1) force stopped (START, STOP) # just start the app again by increasing self.__num_restarts # 2) started at least once and cannot be started (START) # pass to let viewclient deal with this case # 3) nothing # a normal start. clear self.__num_restarts. if self.__event_trace.endswith(EVENT_FLAG_START_APP + EVENT_FLAG_STOP_APP) \ or self.__event_trace.endswith(EVENT_FLAG_START_APP): self.__num_restarts += 1 self.logger.info("The app had been restarted %d times.", self.__num_restarts) else: self.__num_restarts = 0 # pass (START) through if not self.__event_trace.endswith(EVENT_FLAG_START_APP): if self.__num_restarts > MAX_NUM_RESTARTS: # If the app had been restarted too many times, enter random mode msg = "The app had been restarted too many times. Entering random mode." self.logger.info(msg) self.__random_explore = True else: # Start the app self.__event_trace += EVENT_FLAG_START_APP self.logger.info("Trying to start the app...") # self.__action_history = [f'- start the app {self.app.app_name}'] self.__action_history = [f'- launchApp {self.app.app_name}'] self.__thought_history = [f'launch the app {self.app.app_name} to finish the task {self.task}'] return None, IntentEvent(intent=start_app_intent) elif current_state.get_app_activity_depth(self.app) > 0: # If the app is in activity stack but is not in foreground self.__num_steps_outside += 1 if self.__num_steps_outside > MAX_NUM_STEPS_OUTSIDE: # If the app has not been in foreground for too long, try to go back if self.__num_steps_outside > MAX_NUM_STEPS_OUTSIDE_KILL: stop_app_intent = self.app.get_stop_intent() go_back_event = IntentEvent(stop_app_intent) else: go_back_event = KeyEvent(name="BACK") self.__event_trace += EVENT_FLAG_NAVIGATE self.logger.info("Going back to the app...") self.__action_history.append('- go back') self.__thought_history.append('the app has not been in foreground for too long, try to go back') return None, go_back_event else: # If the app is in foreground self.__num_steps_outside = 0 scrollable_views = current_state.get_scrollable_views()#self._get_scrollable_views(current_state) if len(scrollable_views) > 0: ''' if there is at least one scroller in the screen, we scroll each scroller many times until all the screens after scrolling have been recorded, you do not need to read ''' # print(scrollable_views) actions_dict = {} whole_state_views, whole_state_actions, whole_state_strs = [], [], [] # state_strs = [current_state.state_str] state_prompt, current_candidate_actions, current_views, _ = current_state.get_described_actions() all_views_for_mark = copy.deepcopy(current_views) # just for judging whether the screen has been scrolled up to the top for scrollerid in range(len(scrollable_views)): scroller = scrollable_views[scrollerid] # prefix_scroll_event = [] actions_dict[scrollerid] = [] prefix_scroll_event = self._scroll_to_top(scroller, all_views_for_mark) # after scrolling to the top, update the current_state top_state = self.device.get_current_state() state_prompt, top_candidate_actions, top_views, _ = top_state.get_described_actions() all_views_without_id, all_actions = top_views, top_candidate_actions too_few_item_time = 0 for _ in range(MAX_SCROLL_NUM): # then scroll down to the bottom whole_state_strs.append(top_state.state_str) # record the states from the top to the bottom self.device.send_event(ScrollEvent(view=scroller, direction="DOWN")) scrolled_state = self.device.get_current_state() state_prompt, scrolled_candidate_actions, scrolled_views, _ = scrolled_state.get_described_actions() scrolled_new_views = [] for scrolled_view_id in range(len(scrolled_views)): scrolled_view = scrolled_views[scrolled_view_id] if scrolled_view not in all_views_without_id: scrolled_new_views.append(scrolled_view) all_views_without_id.append(scrolled_view) all_actions.append(prefix_scroll_event + [ScrollEvent(view=scroller, direction="DOWN"), scrolled_candidate_actions[scrolled_view_id]]) # print('found new views:', scrolled_new_views) if len(scrolled_new_views) == 0: break prefix_scroll_event.append(ScrollEvent(view=scroller, direction="DOWN")) if len(scrolled_new_views) < 2: too_few_item_time += 1 if too_few_item_time >= 2: break self.utg.add_transition(ScrollEvent(view=scroller, direction="DOWN"), top_state, scrolled_state) top_state = scrolled_state # filter out the views that have been added to the whole_state by scrolling other scrollers for all_view_id in range(len(all_views_without_id)): view = all_views_without_id[all_view_id] if view not in whole_state_views: whole_state_views.append(view) whole_state_actions.append(all_actions[all_view_id]) all_views_for_mark = [] _ = self._scroll_to_top(scroller, all_views_for_mark, top_state) # print(whole_state_views) action, candidate_actions, target_view, thought = self._get_action_from_views_actions( views=whole_state_views, candidate_actions=whole_state_actions, state_strs=whole_state_strs, action_history=self.__action_history, thought_history=self.__thought_history) if isinstance(action, list): # the screen has to be scrolled first last_state = None for eventid in range(len(action) - 1): self.device.send_event(action[eventid]) last_state = self.device.get_current_state() # self.__action_history.append(current_state.get_action_desc(action[eventid])) self.__action_history.append(current_state.get_action_descv2(action[-1], target_view)) self.__thought_history.append(thought) return last_state, action[-1] ''' end for dealing with scrollers ''' else: action, candidate_actions, target_view, thought = self._get_action_from_views_actions( current_state=current_state, action_history=self.__action_history, thought_history=self.__thought_history, state_strs=current_state.state_str) if action == FINISHED: return None, FINISHED if action is not None: self.__action_history.append(current_state.get_action_descv2(action, target_view)) self.__thought_history.append(thought) return None, action if self.__random_explore: self.logger.info("Trying random event.") action = random.choice(candidate_actions) self.__action_history.append(current_state.get_action_descv2(action, target_view)) self.__thought_history.append('random trying') return None, action # If couldn't find a exploration target, stop the app stop_app_intent = self.app.get_stop_intent() self.logger.info("Cannot find an exploration target. Trying to restart app...") self.__action_history.append('- stop the app') self.__thought_history.append("couldn't find a exploration target, stop the app") self.__event_trace += EVENT_FLAG_STOP_APP return None, IntentEvent(intent=stop_app_intent) def _save2yaml(self, file_name, state_prompt, idx, state_str, inputs='null'): if not os.path.exists(file_name): tmp_data = { 'task_name': self.task, 'step_num': 0, 'records': [] } with open(file_name, 'w', encoding='utf-8') as f: yaml.dump(tmp_data, f) with open(file_name, 'r', encoding='utf-8') as f: old_yaml_data = yaml.safe_load(f) new_records = old_yaml_data['records'] new_records.append( {'State': state_prompt, 'Choice': idx, 'Input': inputs, 'state_str': state_str} ) # import pdb;pdb.set_trace() data = { 'task_name': self.task, 'step_num': len(list(old_yaml_data['records'])), 'records': new_records } with open(file_name, 'w', encoding='utf-8') as f: yaml.dump(data, f) def _make_prompt_lmql(self, state_prompt, action_history, is_text, state_str, view_text=None, thought_history=None, use_thoughts=False): if self.use_memory: # if isinstance(state_str, list): # if len(state_str) == 1: # state_str = state_str[0] # else: # state_str = self.memory.hash_state(state_prompt) # new_state_prompt = self.f(action_history, state_prompt, state_str) # if new_state_prompt !z= None and new_state_prompt != 'no_description': # state_prompt = new_state_prompt if len(action_history) <= len(self.similar_ele_path): current_ui_id = len(action_history) - 1 new_state_prompt = tools.insert_onclick_into_prompt(state_prompt, self.similar_ele_path[current_ui_id], self.similar_ele_function) if new_state_prompt != state_prompt: # current state contains an element of insight self.state_ele_memory[state_str] = new_state_prompt state_prompt = new_state_prompt # elif state_str in self.state_ele_memory.keys(): # state_prompt = self.state_ele_memory[state_str] if use_thoughts: history_with_thought = [] for idx in range(len(action_history)): history_with_thought.append(action_history[idx] + ' Reason: ' + thought_history[idx]) else: history_with_thought = action_history return '\n'.join(history_with_thought),state_prompt def _make_prompt(self, state_prompt, action_history, is_text, state_str, view_text=None, thought_history=None, use_thoughts=False): if self.use_memory: # if isinstance(state_str, list): # if len(state_str) == 1: # state_str = state_str[0] # else: # state_str = self.memory.hash_state(state_prompt) # new_state_prompt = self.f(action_history, state_prompt, state_str) # if new_state_prompt !z= None and new_state_prompt != 'no_description': # state_prompt = new_state_prompt if len(action_history) <= len(self.similar_ele_path): current_ui_id = len(action_history) - 1 new_state_prompt = tools.insert_onclick_into_prompt(state_prompt, self.similar_ele_path[current_ui_id], self.similar_ele_function) if new_state_prompt != state_prompt: # current state contains an element of insight self.state_ele_memory[state_str] = new_state_prompt state_prompt = new_state_prompt # elif state_str in self.state_ele_memory.keys(): # state_prompt = self.state_ele_memory[state_str] if use_thoughts: history_with_thought = [] for idx in range(len(action_history)): history_with_thought.append(action_history[idx] + ' Reason: ' + thought_history[idx]) else: history_with_thought = action_history introduction = '''You are a smartphone assistant to help users complete tasks by interacting with mobile apps.Given a task, the previous UI actions, and the content of current UI state, your job is to decide whether the task is already finished by the previous actions, and if not, decide which UI element in current UI state should be interacted.''' task_prompt = 'Task: ' + self.task history_prompt = 'Previous UI actions: \n' + '\n'.join(history_with_thought) full_state_prompt = 'Current UI state: \n' + state_prompt request_prompt = '''Your answer should always use the following format:1. Completing this task on a smartphone usually involves these steps: <?>.\n2. Analyses of the relations between the task and the previous UI actions and current UI state: <?>.\n3. Based on the previous actions, is the task already finished? <Y/N>. The next step should be <?/None>.\n4. Can the task be proceeded with the current UI state? <Y/N>. Fill in the blanks about the next one interaction: - id=<id number> - action=<tap/input> - input text=<text or N/A>''' prompt = introduction + '\n' + task_prompt + '\n' + history_prompt + '\n' + full_state_prompt + '\n' + request_prompt return prompt def _extract_input_text(self, string, start='Text: ', end=' Thought'): start_index = string.find(start) + len(start) # Find the location of 'start' if start_index == -1: start_index = 0 end_index = string.find(end) # Find the location of 'end' substring = string[start_index:end_index] if end_index != -1 else string[start_index:] return substring def _extract_input_textv2(self, string): if string[:11] == 'InputText: ': return string[11:] else: return string def _get_text_view_description(self, view): content_description = safe_dict_get(view, 'content_description', default='') view_text = safe_dict_get(view, 'text', default='') view_desc = f"<input class='&'>#</input>"#.replace('&', view_class)#.replace('#', text) if view_text: view_desc = view_desc.replace('#', view_text) else: view_desc = view_desc.replace('#', '') if content_description: view_desc = view_desc.replace('&', content_description) else: view_desc = view_desc.replace(" class='&'", "") return view_desc def _get_action_from_views_actions(self, action_history, thought_history, views=None, candidate_actions=None, state_strs=None, current_state=None): ''' get action choice from LLM based on a list of views and corresponding actions ''' if current_state: state_prompt, candidate_actions, _, _ = current_state.get_described_actions() state_str = current_state.state_str if USE_LMQL: history, state_prompt = self._make_prompt_lmql(state_prompt, action_history, is_text=False, state_str=state_str, thought_history=thought_history) else: prompt = self._make_prompt(state_prompt, action_history, is_text=False, state_str=state_str, thought_history=thought_history) else: views_with_id = [] for id in range(len(views)): views_with_id.append(tools.insert_id_into_view(views[id], id)) state_prompt = '\n'.join(views_with_id) state_str = tools.hash_string(state_prompt) if USE_LMQL: history, state_prompt = self._make_prompt_lmql(state_prompt, action_history, is_text=False, state_str=state_str, thought_history=thought_history) else: prompt = self._make_prompt(state_prompt, action_history, is_text=False, state_str=state_str, thought_history=thought_history) # ids = [str(idx) for idx, i in enumerate(candidate_actions)] ids = str([i for i in range(len(candidate_actions))]) if USE_LMQL:
idx, action_type, input_text=prompt_llm_with_history(task=self.task, history=history, ui_desc=state_prompt, ids=ids)
2
2023-10-23 03:32:58+00:00
12k
cvlab-yonsei/ACLS
calibrate/evaluation/calibrate_evaluator.py
[ { "identifier": "DatasetEvaluator", "path": "calibrate/evaluation/evaluator.py", "snippet": "class DatasetEvaluator(metaclass=ABCMeta):\n \"\"\"\n Base class for a dataset evaluator\n \"\"\"\n @abstractmethod\n def reset(self):\n \"\"\"\n Preparation for a new round of evaluation.\n Should be called before starting a round of evaluation.\n \"\"\"\n pass\n\n @abstractmethod\n def update(self):\n \"\"\"\n Update status given a mini-batch results\n \"\"\"\n pass\n\n def curr_score(self):\n \"\"\"\n Return curr score after last batch\n \"\"\"\n pass\n\n @abstractmethod\n def mean_score(self):\n \"\"\"\n Return mean score across all classes/samples\n \"\"\"\n pass\n\n def class_score(self):\n \"\"\"\n Return score for different classes\n \"\"\"\n pass\n\n @abstractmethod\n def num_samples(self):\n \"\"\"\n return the evaluated samples\n \"\"\"\n pass\n\n @abstractmethod\n def main_metric(self):\n \"return the name of the main metric\"\n pass" }, { "identifier": "ECELoss", "path": "calibrate/evaluation/metrics.py", "snippet": "class ECELoss(nn.Module):\n '''\n Compute ECE (Expected Calibration Error)\n '''\n def __init__(self, n_bins=15):\n super(ECELoss, self).__init__()\n bin_boundaries = torch.linspace(0, 1, n_bins + 1)\n self.bin_lowers = bin_boundaries[:-1]\n self.bin_uppers = bin_boundaries[1:]\n\n def forward(self, logits, labels):\n softmaxes = F.softmax(logits, dim=1)\n confidences, predictions = torch.max(softmaxes, 1)\n accuracies = predictions.eq(labels)\n\n ece = torch.zeros(1, device=logits.device)\n for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers):\n # Calculated |confidence - accuracy| in each bin\n in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item())\n prop_in_bin = in_bin.float().mean()\n if prop_in_bin.item() > 0:\n accuracy_in_bin = accuracies[in_bin].float().mean()\n avg_confidence_in_bin = confidences[in_bin].mean()\n ece += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin\n\n return ece" }, { "identifier": "AdaptiveECELoss", "path": "calibrate/evaluation/metrics.py", "snippet": "class AdaptiveECELoss(nn.Module):\n '''\n Compute Adaptive ECE\n '''\n def __init__(self, n_bins=15):\n super(AdaptiveECELoss, self).__init__()\n self.nbins = n_bins\n\n def histedges_equalN(self, x):\n npt = len(x)\n return np.interp(np.linspace(0, npt, self.nbins + 1),\n np.arange(npt),\n np.sort(x))\n def forward(self, logits, labels):\n softmaxes = F.softmax(logits, dim=1)\n confidences, predictions = torch.max(softmaxes, 1)\n accuracies = predictions.eq(labels)\n n, bin_boundaries = np.histogram(confidences.cpu().detach(), self.histedges_equalN(confidences.cpu().detach()))\n #print(n,confidences,bin_boundaries)\n self.bin_lowers = bin_boundaries[:-1]\n self.bin_uppers = bin_boundaries[1:]\n ece = torch.zeros(1, device=logits.device)\n for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers):\n # Calculated |confidence - accuracy| in each bin\n in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item())\n prop_in_bin = in_bin.float().mean()\n if prop_in_bin.item() > 0:\n accuracy_in_bin = accuracies[in_bin].float().mean()\n avg_confidence_in_bin = confidences[in_bin].mean()\n ece += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin\n return ece" }, { "identifier": "ClasswiseECELoss", "path": "calibrate/evaluation/metrics.py", "snippet": "class ClasswiseECELoss(nn.Module):\n '''\n Compute Classwise ECE\n '''\n def __init__(self, n_bins=15):\n super(ClasswiseECELoss, self).__init__()\n bin_boundaries = torch.linspace(0, 1, n_bins + 1)\n self.bin_lowers = bin_boundaries[:-1]\n self.bin_uppers = bin_boundaries[1:]\n\n def forward(self, logits, labels):\n num_classes = int((torch.max(labels) + 1).item())\n softmaxes = F.softmax(logits, dim=1)\n per_class_sce = None\n\n for i in range(num_classes):\n class_confidences = softmaxes[:, i]\n class_sce = torch.zeros(1, device=logits.device)\n labels_in_class = labels.eq(i) # one-hot vector of all positions where the label belongs to the class i\n\n for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers):\n in_bin = class_confidences.gt(bin_lower.item()) * class_confidences.le(bin_upper.item())\n prop_in_bin = in_bin.float().mean()\n if prop_in_bin.item() > 0:\n accuracy_in_bin = labels_in_class[in_bin].float().mean()\n avg_confidence_in_bin = class_confidences[in_bin].mean()\n class_sce += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin\n\n if (i == 0):\n per_class_sce = class_sce\n else:\n per_class_sce = torch.cat((per_class_sce, class_sce), dim=0)\n\n sce = torch.mean(per_class_sce)\n return sce" }, { "identifier": "ReliabilityDiagram", "path": "calibrate/evaluation/reliability_diagram.py", "snippet": "class ReliabilityDiagram(object):\n \"\"\"\n Plot Confidence Histogram and Reliability Diagram to visualize miscalibration.\n On classification, plot the gaps between average confidence and observed accuracy bin-wise over the confidence\n space [1]_, [2]_.\n On detection, plot the miscalibration w.r.t. the additional regression information provided (1-D or 2-D) [3]_.\n\n Parameters\n ----------\n bins : int or iterable, default: 10\n Number of bins used by the ACE/ECE/MCE.\n On detection mode: if int, use same amount of bins for each dimension (nx1 = nx2 = ... = bins).\n If iterable, use different amount of bins for each dimension (nx1, nx2, ... = bins).\n equal_intervals : bool, optional, default: True\n If True, the bins have the same width. If False, the bins are splitted to equalize\n the number of samples in each bin.\n detection : bool, default: False\n If False, the input array 'X' is treated as multi-class confidence input (softmax)\n with shape (n_samples, [n_classes]).\n If True, the input array 'X' is treated as a box predictions with several box features (at least\n box confidence must be present) with shape (n_samples, [n_box_features]).\n fmin : float, optional, default: None\n Minimum value for scale color.\n fmax : float, optional, default: None\n Maximum value for scale color.\n metric : str, default: 'ECE'\n Metric to measure miscalibration. Might be either 'ECE', 'ACE' or 'MCE'.\n\n References\n ----------\n .. [1] Chuan Guo, Geoff Pleiss, Yu Sun and Kilian Q. Weinberger:\n \"On Calibration of Modern Neural Networks.\"\n Proceedings of the 34th International Conference on Machine Learning-Volume 70. JMLR. org, 2017.\n `Get source online <https://arxiv.org/abs/1706.04599>`_\n\n .. [2] A. Niculescu-Mizil and R. Caruana:\n “Predicting good probabilities with supervised learning.”\n Proceedings of the 22nd International Conference on Machine Learning, 2005, pp. 625–632.\n `Get source online <https://www.cs.cornell.edu/~alexn/papers/calibration.icml05.crc.rev3.pdf>`_\n\n .. [3] Fabian Küppers, Jan Kronenberger, Amirhossein Shantia and Anselm Haselhoff:\n \"Multivariate Confidence Calibration for Object Detection.\"\n The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2020.\n `Get source online <https://openaccess.thecvf.com/content_CVPRW_2020/papers/w20/Kuppers_Multivariate_Confidence_Calibration_for_Object_Detection_CVPRW_2020_paper.pdf>`_\n \"\"\"\n\n def __init__(self, bins: Union[int, Iterable[int]] = 10, equal_intervals: bool = True,\n detection: bool = False, sample_threshold: int = 1,\n fmin: float = None, fmax: float = None,\n metric: str = 'ECE', style: str = \"curve\", **kwargs):\n \"\"\" Constructor. For detailed parameter documentation view classdocs. \"\"\"\n\n assert style in [\"curve\", \"bar\"]\n self.bins = bins\n self.detection = detection\n self.sample_threshold = sample_threshold\n self.fmin = fmin\n self.fmax = fmax\n self.metric = metric\n self.style = style\n\n if 'feature_names' in kwargs:\n self.feature_names = kwargs['feature_names']\n\n if 'title_suffix' in kwargs:\n self.title_suffix = kwargs['title_suffix']\n\n self._miscalibration = _Miscalibration(bins=bins, equal_intervals=equal_intervals,\n detection=detection, sample_threshold=sample_threshold)\n\n def plot(self, X: Union[Iterable[np.ndarray], np.ndarray], y: Union[Iterable[np.ndarray], np.ndarray],\n batched: bool = False, uncertainty: str = None, filename: str = None, tikz: bool = False,\n title_suffix: str = None, feature_names: List[str] = None, **save_args) -> Union[plt.Figure, str]:\n \"\"\"\n Reliability diagram to visualize miscalibration. This could be either in classical way for confidences only\n or w.r.t. additional properties (like x/y-coordinates of detection boxes, width, height, etc.). The additional\n properties get binned. Afterwards, the miscalibration will be calculated for each bin. This is\n visualized as a 2-D plots.\n\n Parameters\n ----------\n X : iterable of np.ndarray, or np.ndarray of shape=([n_bayes], n_samples, [n_classes/n_box_features])\n NumPy array with confidence values for each prediction on classification with shapes\n 1-D for binary classification, 2-D for multi class (softmax).\n If 3-D, interpret first dimension as samples from an Bayesian estimator with mulitple data points\n for a single sample (e.g. variational inference or MC dropout samples).\n If this is an iterable over multiple instances of np.ndarray and parameter batched=True,\n interpret this parameter as multiple predictions that should be averaged.\n On detection, this array must have 2 dimensions with number of additional box features in last dim.\n y : iterable of np.ndarray with same length as X or np.ndarray of shape=([n_bayes], n_samples, [n_classes])\n NumPy array with ground truth labels.\n Either as label vector (1-D) or as one-hot encoded ground truth array (2-D).\n If 3-D, interpret first dimension as samples from an Bayesian estimator with mulitple data points\n for a single sample (e.g. variational inference or MC dropout samples).\n If iterable over multiple instances of np.ndarray and parameter batched=True,\n interpret this parameter as multiple predictions that should be averaged.\n batched : bool, optional, default: False\n Multiple predictions can be evaluated at once (e.g. cross-validation examinations) using batched-mode.\n All predictions given by X and y are separately evaluated and their results are averaged afterwards\n for visualization.\n uncertainty : str, optional, default: False\n Define uncertainty handling if input X has been sampled e.g. by Monte-Carlo dropout or similar methods\n that output an ensemble of predictions per sample. Choose one of the following options:\n - flatten: treat everything as a separate prediction - this option will yield into a slightly better\n calibration performance but without the visualization of a prediction interval.\n - mean: compute Monte-Carlo integration to obtain a simple confidence estimate for a sample\n (mean) with a standard deviation that is visualized.\n filename : str, optional, default: None\n Optional filename to save the plotted figure.\n tikz : bool, optional, default: False\n If True, use 'tikzplotlib' package to return tikz-code for Latex rather than a Matplotlib figure.\n title_suffix : str, optional, default: None\n Suffix for plot title.\n feature_names : list, optional, default: None\n Names of the additional features that are attached to the axes of a reliability diagram.\n **save_args : args\n Additional arguments passed to 'matplotlib.pyplot.Figure.savefig' function if 'tikz' is False.\n If 'tikz' is True, the argument are passed to 'tikzplotlib.get_tikz_code' function.\n\n Returns\n -------\n matplotlib.pyplot.Figure if 'tikz' is False else str with tikz code.\n\n Raises\n ------\n AttributeError\n - If parameter metric is not string or string is not 'ACE', 'ECE' or 'MCE'\n - If parameter 'feature_names' is set but length does not fit to second dim of X\n - If no ground truth samples are provided\n - If length of bins parameter does not match the number of features given by X\n - If more than 3 feature dimensions (including confidence) are provided\n \"\"\"\n\n # assign deprecated constructor parameter to title_suffix and feature_names\n if hasattr(self, 'title_suffix') and title_suffix is None:\n title_suffix = self.title_suffix\n\n if hasattr(self, 'feature_names') and feature_names is None:\n feature_names = self.feature_names\n\n # check if metric is correct\n if not isinstance(self.metric, str):\n raise AttributeError('Parameter \\'metric\\' must be string with either \\'ece\\', \\'ace\\' or \\'mce\\'.')\n\n # check metrics parameter\n if self.metric.lower() not in ['ece', 'ace', 'mce']:\n raise AttributeError('Parameter \\'metric\\' must be string with either \\'ece\\', \\'ace\\' or \\'mce\\'.')\n else:\n self.metric = self.metric.lower()\n\n # perform checks and prepare input data\n X, matched, sample_uncertainty, bin_bounds, num_features = self._miscalibration.prepare(X, y, batched, uncertainty)\n if num_features > 3:\n raise AttributeError(\"Diagram is not defined for more than 2 additional feature dimensions.\")\n\n histograms = []\n for batch_X, batch_matched, batch_uncertainty, bounds in zip(X, matched, sample_uncertainty, bin_bounds):\n batch_histograms = self._miscalibration.binning(bounds, batch_X, batch_matched, batch_X[:, 0], batch_uncertainty[:, 0])\n histograms.append(batch_histograms[:-1])\n\n # no additional dimensions? compute standard reliability diagram\n if num_features == 1:\n fig1, fig2 = self.__plot_confidence_histogram(X, matched, histograms, bin_bounds, title_suffix)\n return fig1, fig2\n\n # one additional feature? compute 1D-plot\n elif num_features == 2:\n fig = self.__plot_1d(histograms, bin_bounds, title_suffix, feature_names)\n\n # two additional features? compute 2D plot\n elif num_features == 3:\n fig = self.__plot_2d(histograms, bin_bounds, title_suffix, feature_names)\n\n # number of dimensions exceeds 3? quit\n else:\n raise AttributeError(\"Diagram is not defined for more than 2 additional feature dimensions.\")\n\n # if tikz is true, create tikz code from matplotlib figure\n if tikz:\n\n # get tikz code for our specific figure and also pass filename to store possible bitmaps\n tikz_fig = tikzplotlib.get_tikz_code(fig, filepath=filename, **save_args)\n\n # close matplotlib figure when tikz figure is requested to save memory\n plt.close(fig)\n fig = tikz_fig\n\n # save figure either as matplotlib PNG or as tikz output file\n if filename is not None:\n if tikz:\n with open(filename, \"w\") as open_file:\n open_file.write(fig)\n else:\n fig.savefig(filename, **save_args)\n\n return fig\n\n @classmethod\n def __interpolate_grid(cls, metric_map: np.ndarray) -> np.ndarray:\n \"\"\" Interpolate missing values in a 2D-grid using the mean of the data. The interpolation is done inplace. \"\"\"\n\n # get all NaNs\n nans = np.isnan(metric_map)\n x = lambda z: z.nonzero()\n\n # get mean of the remaining values and interpolate missing by the mean\n mean = float(np.mean(metric_map[~nans]))\n metric_map[nans] = griddata(x(~nans), metric_map[~nans], x(nans), method='cubic', fill_value=mean)\n return metric_map\n\n def __plot_confidence_histogram(self, X: List[np.ndarray], matched: List[np.ndarray], histograms: List[np.ndarray],\n bin_bounds: List, title_suffix: str = None) -> plt.Figure:\n \"\"\" Plot confidence histogram and reliability diagram to visualize miscalibration for condidences only. \"\"\"\n\n # get number of bins (self.bins has not been processed yet)\n n_bins = len(bin_bounds[0][0])-1\n\n median_confidence = [(bounds[0][1:] + bounds[0][:-1]) * 0.5 for bounds in bin_bounds]\n mean_acc, mean_conf = [], []\n for batch_X, batch_matched, batch_hist, batch_median in zip(X, matched, histograms, median_confidence):\n acc_hist, conf_hist, _, num_samples_hist = batch_hist\n empty_bins, = np.nonzero(num_samples_hist == 0)\n\n # calculate overall mean accuracy and confidence\n mean_acc.append(np.mean(batch_matched))\n mean_conf.append(np.mean(batch_X))\n\n # set empty bins to median bin value\n acc_hist[empty_bins] = batch_median[empty_bins]\n conf_hist[empty_bins] = batch_median[empty_bins]\n\n # convert num_samples to relative afterwards (inplace denoted by [:])\n num_samples_hist[:] = num_samples_hist / np.sum(num_samples_hist)\n\n # import ipdb; ipdb.set_trace()\n # get mean histograms and values over all batches\n acc = np.mean([hist[0] for hist in histograms], axis=0)\n conf = np.mean([hist[1] for hist in histograms], axis=0)\n uncertainty = np.sqrt(np.mean([hist[2] for hist in histograms], axis=0))\n num_samples = np.mean([hist[3] for hist in histograms], axis=0)\n mean_acc = np.mean(mean_acc)\n mean_conf = np.mean(mean_conf)\n median_confidence = np.mean(median_confidence, axis=0)\n bar_width = np.mean([np.diff(bounds[0]) for bounds in bin_bounds], axis=0)\n\n # compute credible interval of uncertainty\n p = 0.05\n z_score = norm.ppf(1. - (p / 2))\n uncertainty = z_score * uncertainty\n\n # if no uncertainty is given, set variable uncertainty to None in order to prevent drawing error bars\n if np.count_nonzero(uncertainty) == 0:\n uncertainty = None\n\n # calculate deviation\n deviation = conf - acc\n\n fig1 = plt.figure(\"Reliability {}\".format(title_suffix))\n ax = fig1.add_subplot()\n # set title suffix if given\n # if title_suffix is not None:\n # ax.set_title('Reliability Diagram' + \" - \" + title_suffix)\n # else:\n # ax.set_title('Reliability Diagram')\n \n # create two overlaying bar charts with bin accuracy and the gap of each bin to the perfect calibration\n if self.style == \"bar\":\n # ax.bar(median_confidence, height=median_confidence, width=bar_width, align='center',\n # edgecolor='black', color='pink', alpha=0.6)\n ax.bar(median_confidence, height=acc, width=bar_width, align='center',\n edgecolor='black', yerr=uncertainty, capsize=2)\n # ax.bar(median_confidence, height=deviation, bottom=acc, width=bar_width, align='center',\n # edgecolor='black', color='red', alpha=0.6)\n else:\n ax.plot(median_confidence, acc, color=\"blue\", linestyle=\"-\")\n\n # draw diagonal as perfect calibration line\n ax.plot([0, 1], [0, 1], color='red', linestyle='-.')\n # ax.set_xlim((0.0, 1.0))\n # ax.set_ylim((0.0, 1.0))\n\n # labels and legend of second plot\n # ax.set_xlabel('Confidence')\n # ax.set_ylabel('Accuracy')\n ax.legend(['Output', 'Expected'], fontsize=14)\n\n\n fig2 = plt.figure(\"Conf. Hist.\")\n ax = fig2.add_subplot()\n ax.bar(median_confidence, height=num_samples, width=bar_width, align='center', edgecolor='black')\n ax.plot([mean_acc, mean_acc], [0.0, 1.0], color='red', linestyle='--')\n ax.plot([mean_conf, mean_conf], [0.0, 1.0], color='blue', linestyle='--')\n ax.set_xlim((0.0, 1.0))\n ax.set_ylim((0.0, 1.0))\n\n plt.tight_layout()\n\n return fig1, fig2\n\n # -----------------------------------------\n # plot data distribution histogram first\n fig, axes = plt.subplots(2, squeeze=True, figsize=(7, 6))\n ax = axes[0]\n\n # set title suffix is given\n if title_suffix is not None:\n ax.set_title('Confidence Histogram - ' + title_suffix)\n else:\n ax.set_title('Confidence Histogram')\n\n # create bar chart with relative amount of samples in each bin\n # as well as average confidence and accuracy\n ax.bar(median_confidence, height=num_samples, width=bar_width, align='center', edgecolor='black')\n ax.plot([mean_acc, mean_acc], [0.0, 1.0], color='black', linestyle='--')\n ax.plot([mean_conf, mean_conf], [0.0, 1.0], color='gray', linestyle='--')\n ax.set_xlim((0.0, 1.0))\n ax.set_ylim((0.0, 1.0))\n\n # labels and legend\n ax.set_xlabel('Confidence')\n ax.set_ylabel('% of Samples')\n ax.legend(['Avg. Accuracy', 'Avg. Confidence', 'Relative Amount of Samples'])\n\n # second plot: reliability histogram\n ax = axes[1]\n\n # set title suffix if given\n if title_suffix is not None:\n ax.set_title('Reliability Diagram' + \" - \" + title_suffix)\n else:\n ax.set_title('Reliability Diagram')\n\n # create two overlaying bar charts with bin accuracy and the gap of each bin to the perfect calibration\n ax.bar(median_confidence, height=acc, width=bar_width, align='center',\n edgecolor='black', yerr=uncertainty, capsize=4)\n ax.bar(median_confidence, height=deviation, bottom=acc, width=bar_width, align='center',\n edgecolor='black', color='red', alpha=0.6)\n\n # draw diagonal as perfect calibration line\n ax.plot([0, 1], [0, 1], color='red', linestyle='--')\n ax.set_xlim((0.0, 1.0))\n ax.set_ylim((0.0, 1.0))\n\n # labels and legend of second plot\n ax.set_xlabel('Confidence')\n ax.set_ylabel('Accuracy')\n ax.legend(['Perfect Calibration', 'Output', 'Gap'])\n\n plt.tight_layout()\n return fig\n\n def __plot_1d(self, histograms: List[np.ndarray], bin_bounds: List,\n title_suffix: str = None, feature_names: List[str] = None) -> plt.Figure:\n \"\"\" Plot 1-D miscalibration w.r.t. one additional feature. \"\"\"\n\n # z score for credible interval (if uncertainty is given)\n p = 0.05\n z_score = norm.ppf(1. - (p / 2))\n\n results = []\n for batch_hist, bounds in zip(histograms, bin_bounds):\n result = self._miscalibration.process(self.metric, *batch_hist)\n bin_median = (bounds[-1][:-1] + bounds[-1][1:]) * 0.5\n\n # interpolate missing values\n x = np.linspace(0.0, 1.0, 1000)\n miscalibration = interp1d(bin_median, result[1], kind='cubic', fill_value='extrapolate')(x)\n acc = interp1d(bin_median, result[2], kind='cubic', fill_value='extrapolate')(x)\n conf = interp1d(bin_median, result[3], kind='cubic', fill_value='extrapolate')(x)\n uncertainty = interp1d(bin_median, result[4], kind='cubic', fill_value='extrapolate')(x)\n\n results.append((miscalibration, acc, conf, uncertainty))\n\n # get mean over all batches and convert mean variance to a std deviation afterwards\n miscalibration = np.mean([result[0] for result in results], axis=0)\n acc = np.mean([result[1] for result in results], axis=0)\n conf = np.mean([result[2] for result in results], axis=0)\n uncertainty = np.sqrt(np.mean([result[3] for result in results], axis=0))\n\n # draw routines\n fig, ax1 = plt.subplots()\n conf_color = 'tab:blue'\n\n # set name of the additional feature\n if feature_names is not None:\n ax1.set_xlabel(feature_names[0])\n\n ax1.set_xlim([0.0, 1.0])\n ax1.set_ylim([0.0, 1.0])\n ax1.set_ylabel('accuracy/confidence', color=conf_color)\n\n # draw confidence and accuracy on the same (left) axis\n x = np.linspace(0.0, 1.0, 1000)\n line1, = ax1.plot(x, acc, '-.', color='black')\n line2, = ax1.plot(x, conf, '--', color=conf_color)\n ax1.tick_params('y', labelcolor=conf_color)\n\n # if uncertainty is given, compute average of variances over all bins and get std deviation by sqrt\n # compute credible interval afterwards\n # define lower and upper bound\n uncertainty = z_score * uncertainty\n lb = conf - uncertainty\n ub = conf + uncertainty\n\n # create second axis for miscalibration\n ax11 = ax1.twinx()\n miscal_color = 'tab:red'\n line3, = ax11.plot(x, miscalibration, '-', color=miscal_color)\n\n if self.metric == 'ace':\n ax11.set_ylabel('Average Calibration Error (ACE)', color=miscal_color)\n elif self.metric == 'ece':\n ax11.set_ylabel('Expected Calibration Error (ECE)', color=miscal_color)\n elif self.metric == 'mce':\n ax11.set_ylabel('Maximum Calibration Error (MCE)', color=miscal_color)\n\n ax11.tick_params('y', labelcolor=miscal_color)\n\n # set miscalibration limits if given\n if self.fmin is not None and self.fmax is not None:\n ax11.set_ylim([self.fmin, self.fmax])\n\n ax1.legend((line1, line2, line3),\n ('accuracy', 'confidence', '%s' % self.metric.upper()),\n loc='best')\n\n if title_suffix is not None:\n ax1.set_title('Accuracy, confidence and %s\\n- %s -' % (self.metric.upper(), title_suffix))\n else:\n ax1.set_title('Accuracy, confidence and %s' % self.metric.upper())\n\n ax1.grid(True)\n\n fig.tight_layout()\n return fig\n\n def __plot_2d(self, histograms: List[np.ndarray], bin_bounds: List[np.ndarray],\n title_suffix: str = None, feature_names: List[str] = None) -> plt.Figure:\n \"\"\" Plot 2D miscalibration reliability diagram heatmap. \"\"\"\n\n results = []\n for batch_hist in histograms:\n result = self._miscalibration.process(self.metric, *batch_hist)\n\n # interpolate 2D data inplace to avoid \"empty\" bins\n batch_samples = result[-1]\n for map in result[1:-1]:\n map[batch_samples == 0.0] = 0.0\n # TODO: check what to do here\n # map[batch_samples == 0.0] = np.nan\n # self.__interpolate_grid(map)\n\n # on interpolation, it is sometimes possible that empty bins have negative values\n # however, this is invalid for variance\n result[4][result[4] < 0] = 0.0\n results.append(result)\n\n # calculate mean over all batches and transpose\n # transpose is necessary. Miscalibration is calculated in the order given by the features\n # however, imshow expects arrays in format [rows, columns] or [height, width]\n # e.g., miscalibration with additional x/y (in this order) will be drawn [y, x] otherwise\n miscalibration = np.mean([result[1] for result in results], axis=0).T\n acc = np.mean([result[2] for result in results], axis=0).T\n conf = np.mean([result[3] for result in results], axis=0).T\n mean = np.mean([result[4] for result in results], axis=0).T\n uncertainty = np.sqrt(mean)\n\n # -----------------------------------------------------------------------------------------\n # draw routines\n\n def set_axis(ax, map, vmin=None, vmax=None):\n \"\"\" Generic function to set all subplots equally \"\"\"\n # TODO: set proper fmin, fmax values\n img = ax.imshow(map, origin='lower', interpolation=\"gaussian\", cmap='jet', aspect=1, vmin=vmin, vmax=vmax)\n\n # set correct x- and y-ticks\n ax.set_xticks(np.linspace(0., len(bin_bounds[0][1])-2, 5))\n ax.set_xticklabels(np.linspace(0., 1., 5))\n ax.set_yticks(np.linspace(0., len(bin_bounds[0][2])-2, 5))\n ax.set_yticklabels(np.linspace(0., 1., 5))\n ax.set_xlim([0.0, len(bin_bounds[0][1])-2])\n ax.set_ylim([0.0, len(bin_bounds[0][2])-2])\n\n # draw feature names on axes if given\n if feature_names is not None:\n ax.set_xlabel(feature_names[0])\n ax.set_ylabel(feature_names[1])\n\n fig.colorbar(img, ax=ax, fraction=0.046, pad=0.04)\n\n return ax, img\n\n # -----------------------------------\n\n # create only two subplots if no additional uncertainty is given\n if np.count_nonzero(uncertainty) == 0:\n fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 5))\n\n # process additional uncertainty if given\n else:\n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, squeeze=True, figsize=(10, 10))\n ax4, img4 = set_axis(ax4, uncertainty)\n\n if title_suffix is not None:\n ax4.set_title(\"Confidence std deviation\\n- %s -\" % title_suffix)\n else:\n ax4.set_title(\"Confidence std deviation\")\n\n ax1, img1 = set_axis(ax1, acc, vmin=0, vmax=1)\n ax2, img2 = set_axis(ax2, conf, vmin=0, vmax=1)\n ax3, img3 = set_axis(ax3, miscalibration, vmin=self.fmin, vmax=self.fmax)\n\n # draw title if given\n if title_suffix is not None:\n ax1.set_title(\"Average accuracy\\n- %s -\" % title_suffix)\n ax2.set_title(\"Average confidence\\n- %s -\" % title_suffix)\n ax3.set_title(\"%s\\n- %s -\" % (self.metric.upper(), title_suffix))\n else:\n ax1.set_title(\"Average accuracy\")\n ax2.set_title(\"Average confidence\")\n ax3.set_title(\"%s\" % self.metric.upper())\n\n # -----------------------------------------------------------------------------------------\n\n return fig" }, { "identifier": "to_numpy", "path": "calibrate/utils/torch_helper.py", "snippet": "def to_numpy(x: torch.Tensor):\n return x.detach().cpu().numpy()" } ]
import logging import numpy as np import torch import torch.nn.functional as F from terminaltables import AsciiTable from torch import nn from .evaluator import DatasetEvaluator from .metrics import ECELoss, AdaptiveECELoss, ClasswiseECELoss from .reliability_diagram import ReliabilityDiagram from calibrate.utils.torch_helper import to_numpy
8,737
logger = logging.getLogger(__name__) class CalibrateEvaluator(DatasetEvaluator): def __init__(self, num_classes, num_bins=15, device="cuda:0") -> None: self.num_classes = num_classes self.num_bins = num_bins self.device = device self.reset() def reset(self) -> None: self.logits = None self.labels = None def num_samples(self): return ( self.labels.shape[0] if self.labels is not None else 0 ) def main_metric(self) -> None: return "ece" def update(self, logits: torch.Tensor, labels: torch.Tensor) -> None: """update Args: logits (torch.Tensor): n x num_classes label (torch.Tensor): n x 1 """ assert logits.shape[0] == labels.shape[0] if self.logits is None: self.logits = logits self.labels = labels else: self.logits = torch.cat((self.logits, logits), dim=0) self.labels = torch.cat((self.labels, labels), dim=0) def mean_score(self, print=False, all_metric=True): nll_criterion = nn.CrossEntropyLoss().to(self.device) ece_criterion = ECELoss(self.num_bins).to(self.device) aece_criterion = AdaptiveECELoss(self.num_bins).to(self.device) cece_criterion = ClasswiseECELoss(self.num_bins).to(self.device) nll = nll_criterion(self.logits, self.labels).item() ece = ece_criterion(self.logits, self.labels).item() aece = aece_criterion(self.logits, self.labels).item() cece = cece_criterion(self.logits, self.labels).item() metric = {"nll": nll, "ece": ece, "aece": aece, "cece": cece} columns = ["samples", "nll", "ece", "aece", "cece"] table_data = [columns] table_data.append( [ self.num_samples(), "{:.5f}".format(nll), "{:.5f}".format(ece), "{:.5f}".format(aece), "{:.5f}".format(cece), ] ) if print: table = AsciiTable(table_data) logger.info("\n" + table.table) if all_metric: return metric, table_data else: return metric[self.main_metric()] def plot_reliability_diagram(self, title=""):
logger = logging.getLogger(__name__) class CalibrateEvaluator(DatasetEvaluator): def __init__(self, num_classes, num_bins=15, device="cuda:0") -> None: self.num_classes = num_classes self.num_bins = num_bins self.device = device self.reset() def reset(self) -> None: self.logits = None self.labels = None def num_samples(self): return ( self.labels.shape[0] if self.labels is not None else 0 ) def main_metric(self) -> None: return "ece" def update(self, logits: torch.Tensor, labels: torch.Tensor) -> None: """update Args: logits (torch.Tensor): n x num_classes label (torch.Tensor): n x 1 """ assert logits.shape[0] == labels.shape[0] if self.logits is None: self.logits = logits self.labels = labels else: self.logits = torch.cat((self.logits, logits), dim=0) self.labels = torch.cat((self.labels, labels), dim=0) def mean_score(self, print=False, all_metric=True): nll_criterion = nn.CrossEntropyLoss().to(self.device) ece_criterion = ECELoss(self.num_bins).to(self.device) aece_criterion = AdaptiveECELoss(self.num_bins).to(self.device) cece_criterion = ClasswiseECELoss(self.num_bins).to(self.device) nll = nll_criterion(self.logits, self.labels).item() ece = ece_criterion(self.logits, self.labels).item() aece = aece_criterion(self.logits, self.labels).item() cece = cece_criterion(self.logits, self.labels).item() metric = {"nll": nll, "ece": ece, "aece": aece, "cece": cece} columns = ["samples", "nll", "ece", "aece", "cece"] table_data = [columns] table_data.append( [ self.num_samples(), "{:.5f}".format(nll), "{:.5f}".format(ece), "{:.5f}".format(aece), "{:.5f}".format(cece), ] ) if print: table = AsciiTable(table_data) logger.info("\n" + table.table) if all_metric: return metric, table_data else: return metric[self.main_metric()] def plot_reliability_diagram(self, title=""):
diagram = ReliabilityDiagram(bins=25, style="curve")
4
2023-10-23 09:55:13+00:00
12k
myshell-ai/AIlice
ailice/AIliceMain.py
[ { "identifier": "config", "path": "ailice/common/AConfig.py", "snippet": "class AConfig():\n def __init__(self):\n def Initialize(self, needOpenaiGPTKey = False):\n def Load(self, configFile: str) -> dict:\n def Store(self, configFile: str):" }, { "identifier": "AProcessor", "path": "ailice/core/AProcessor.py", "snippet": "class AProcessor():\n def __init__(self, name, modelID, promptName, outputCB, collection = None):\n self.name = name\n self.modelID = modelID\n self.llm = llmPool.GetModel(modelID)\n self.interpreter = AInterpreter()\n self.conversation = AConversations()\n self.subProcessors = dict()\n self.modules = {}\n \n self.RegisterModules([config.services['storage']['addr']])\n self.interpreter.RegisterAction(\"CALL\", {\"func\": self.EvalCall})\n self.interpreter.RegisterAction(\"RESPOND\", {\"func\": self.EvalRespond})\n self.interpreter.RegisterAction(\"COMPLETE\", {\"func\": self.EvalComplete})\n self.interpreter.RegisterAction(\"STORE\", {\"func\": self.EvalStore})\n self.interpreter.RegisterAction(\"QUERY\", {\"func\": self.EvalQuery})\n self.interpreter.RegisterAction(\"WAIT\", {\"func\": self.EvalWait})\n \n self.outputCB = outputCB\n self.collection = \"ailice\" + str(time.time()) if collection is None else collection\n self.prompt = promptsManager[promptName](processor=self, storage=self.modules['storage']['module'], collection=self.collection, conversations=self.conversation, formatter=self.llm.formatter, outputCB=self.outputCB)\n for nodeType, action in self.prompt.GetActions().items():\n self.interpreter.RegisterAction(nodeType, action)\n for nodeType, patterns in self.prompt.GetPatterns().items():\n for p in patterns:\n self.interpreter.RegisterPattern(nodeType, p[\"re\"], p[\"isEntry\"])\n self.result = \"None.\"\n return\n \n def RegisterAction(self, nodeType: str, action: dict):\n self.interpreter.RegisterAction(nodeType, action)\n return\n \n def RegisterModules(self, moduleAddrs):\n ret = []\n for moduleAddr in moduleAddrs:\n module = clientPool.GetClient(moduleAddr)\n if (not hasattr(module, \"ModuleInfo\")) or (not callable(getattr(module, \"ModuleInfo\"))):\n raise Exception(\"EXCEPTION: ModuleInfo() not found in module.\")\n info = module.ModuleInfo()\n if \"NAME\" not in info:\n raise Exception(\"EXCEPTION: 'NAME' is not found in module info.\")\n if \"ACTIONS\" not in info:\n raise Exception(\"EXCEPTION: 'ACTIONS' is not found in module info.\")\n \n self.modules[info['NAME']] = {'addr': moduleAddr, 'module': module}\n for actionName, actionMeta in info[\"ACTIONS\"].items():\n ret.append({\"action\": actionName, \"signature\": actionMeta[\"sig\"], \"prompt\": actionMeta[\"prompt\"]})\n actionFunc = actionMeta[\"sig\"][:actionMeta[\"sig\"].find(\"(\")]\n self.RegisterAction(nodeType=actionName, action={\"func\": self.CreateActionCB(actionName, module, actionFunc),\n \"signatureExpr\": actionMeta[\"sig\"]})\n return ret\n \n def CreateActionCB(self, actionName, module, actionFunc):\n def callback(*args,**kwargs):\n return f\"{actionName}_RESULT=[{getattr(module, actionFunc)(*args,**kwargs)}]\"\n return callback\n \n def GetPromptName(self) -> str:\n return self.prompt.PROMPT_NAME\n \n def __call__(self, txt: str) -> str:\n self.conversation.Add(role = \"USER\", msg = txt)\n self.EvalStore(txt)\n self.outputCB(\"<\")\n self.outputCB(f\"USER_{self.name}\", txt)\n\n while True:\n prompt = self.prompt.BuildPrompt()\n ret = self.llm.Generate(prompt, proc=partial(self.outputCB, \"ASSISTANT_\" + self.name), endchecker=self.interpreter.EndChecker, temperature = config.temperature)\n self.conversation.Add(role = \"ASSISTANT\", msg = ret)\n self.EvalStore(ret)\n self.result = ret\n \n resp = self.interpreter.EvalEntries(ret)\n \n if \"\" != resp:\n self.conversation.Add(role = \"SYSTEM\", msg = \"Function returned: {\" + resp + \"}\")\n self.EvalStore(\"Function returned: {\" + resp + \"}\")\n self.outputCB(f\"SYSTEM_{self.name}\", resp)\n else:\n self.outputCB(\">\")\n return self.result\n\n def EvalCall(self, agentType: str, agentName: str, msg: str) -> str:\n if agentType not in promptsManager:\n return f\"CALL FAILED. specified agentType {agentType} does not exist. This may be caused by using an agent type that does not exist or by getting the parameters in the wrong order.\"\n if (agentName not in self.subProcessors) or (agentType != self.subProcessors[agentName].GetPromptName()):\n self.subProcessors[agentName] = AProcessor(name=agentName, modelID=self.modelID, promptName=agentType, outputCB=self.outputCB, collection=self.collection)\n self.subProcessors[agentName].RegisterModules([self.modules[moduleName]['addr'] for moduleName in self.modules])\n resp = f\"Agent {agentName} returned: {self.subProcessors[agentName](msg)}\"\n return resp\n \n def EvalRespond(self, message: str):\n self.result = message\n return\n \n def EvalStore(self, txt: str):\n if not self.modules['storage']['module'].Store(self.collection, txt):\n return \"STORE FAILED, please check your input.\"\n return\n \n def EvalQuery(self, keywords: str) -> str:\n res = self.modules['storage']['module'].Query(self.collection, keywords)\n if (0 == len(res)) or (res[0][1] > 0.5):\n return \"Nothing found.\"\n return \"QUERY_RESULT={\" + res[0][0] +\"}\"\n \n def EvalComplete(self, result: str):\n self.result = result\n self.prompt.Reset()\n return\n \n def EvalWait(self, duration: int) -> str:\n time.sleep(duration)\n return f\"Waiting is over. It has been {duration} seconds.\"\n \n def ToJson(self) -> str:\n return {\"name\": self.name,\n \"modelID\": self.modelID,\n \"conversations\": self.conversation.ToJson(),\n \"subProcessors\": {k: p.ToJson() for k, p in self.subProcessors.items()},\n \"modules\": {k:{'addr': m['addr']} for k, m in self.modules.items()},\n \"collection\": self.collection}" }, { "identifier": "llmPool", "path": "ailice/core/llm/ALLMPool.py", "snippet": "class ALLMPool():\n def __init__(self):\n def ParseID(self, id):\n def Init(self, llmIDs: [str]):\n def GetModel(self, modelID: str):" }, { "identifier": "ALogger", "path": "ailice/common/utils/ALogger.py", "snippet": "class ALogger():\n def __init__(self, speech):\n self.colorMap = {'CONTEXT': 'blue', 'USER': 'green', 'ASSISTANT': 'green', 'SYSTEM': 'yellow', 'OUTPUT': 'green'}\n self.depth = -1\n self.speech = speech\n self.queue = queue.Queue()\n return\n \n def ParseChannel(self, channel: str) -> tuple[str]:\n l = channel.find(\"_\")\n channelType, agentName = channel[:l], channel[l+1:]\n return channelType, agentName\n \n def SinkPrint(self, channel: str, txt: str = None, action: str = ''):\n channelType, agentName = self.ParseChannel(channel)\n if 'open' == action:\n print(colored(channel + \": \", self.colorMap[channelType]), txt, end=\"\", flush=True)\n elif 'append' == action:\n print(txt, end=\"\", flush=True)\n elif 'close' == action:\n print(txt, end=\"\", flush=True)\n print(\"\")\n else:\n print(colored(channel + \": \", self.colorMap[channelType]), txt)\n return\n \n def SinkSpeech(self, channel: str, txt: str = None, action: str = ''):\n self.speech.Play(txt)\n return\n \n def SinkQueue(self, channel: str, txt: str = None, action: str = ''):\n if 'open' == action:\n self.txtBuf = {\"channel\": channel, \"txt\": txt}\n elif 'append' == action:\n assert self.txtBuf['channel'] == channel, \"assert self.txtBuf['channel'] == channel FAILED.\"\n self.txtBuf['txt'] += txt\n elif 'close' == action:\n assert self.txtBuf['channel'] == channel, \"assert self.txtBuf['channel'] == channel FAILED.\"\n self.txtBuf['txt'] += txt\n self.queue.put((channel, self.txtBuf['txt']))\n else:\n self.queue.put((channel, txt))\n return\n\n def Receiver(self, channel: str, txt: str = None, action: str = ''):\n braketMap = {\"<\": 1, \">\": -1}\n self.depth += (braketMap[channel] if channel in braketMap else 0)\n \n channelType, _ = self.ParseChannel(channel)\n if (channelType in [\"ASSISTANT\", \"SYSTEM\"]):\n self.SinkPrint(channel=channel, txt=txt, action=action)\n if config.speechOn and ((channelType in [\"ASSISTANT\"]) and (0 == self.depth)):\n self.SinkSpeech(channel=channel, txt=txt, action=action)\n if ((channelType in [\"OUTPUT\"]) and (1 == self.depth)) or\\\n (((channelType in [\"ASSISTANT\"]) and (0 == self.depth))):\n self.SinkQueue(channel=channel, txt=txt, action=action)\n if (channel in [\">\"]) and (-1 == self.depth):\n self.SinkQueue(channel=channel, txt=None, action=None)\n return" }, { "identifier": "clientPool", "path": "ailice/common/ARemoteAccessors.py", "snippet": "class AClientPool():\n def __init__(self):\n def Init(self):\n def GetClient(self, moduleAddr: str):" }, { "identifier": "StartServices", "path": "ailice/AServices.py", "snippet": "def StartServices():\n if config.localExecution:\n config.services['scripter'] = {\"cmd\": \"docker stop scripter; python3 -m ailice.modules.AScripter\", \"addr\": \"tcp://127.0.0.1:59000\"}\n else:\n try:\n subprocess.run(\"docker -v\", shell=True, check=True)\n except Exception:\n print(\"It looks like docker is not installed correctly. If you do not plan to use other virtual environments to execute scripts, please ensure that docker is installed correctly or use --localExecution to execute locally.\")\n \n for serviceName, cfg in config.services.items():\n if (\"speech\" == serviceName) and not config.speechOn:\n continue\n if (\"cmd\" not in cfg) or (\"\" == cfg['cmd'].strip()):\n print(f\"{serviceName}'s cmd is not configured and will attempt to connect {cfg['addr']} directly.\")\n continue\n p = subprocess.Popen(cfg['cmd'], shell=True, cwd=None, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n processes.append(p)\n print(serviceName,\" started.\")\n signal.signal(signal.SIGINT, TerminateSubprocess)\n signal.signal(signal.SIGTERM, TerminateSubprocess)" }, { "identifier": "promptsManager", "path": "ailice/common/APrompts.py", "snippet": "class APromptsManager():\n def __init__(self):\n def RegisterPrompt(self, promptClass):\n def __getitem__(self, promptName: str):\n def __iter__(self):" }, { "identifier": "APromptChat", "path": "ailice/prompts/APromptChat.py", "snippet": "class APromptChat():\n PROMPT_NAME = \"chat\"\n\n def __init__(self, processor, storage, collection, conversations, formatter, outputCB = None):\n self.processor = processor\n self.storage = storage\n self.collection = collection\n self.conversations = conversations\n self.formatter = formatter\n self.outputCB = outputCB\n self.prompt0 = \"You are a helpful assistant.\"\n self.PATTERNS = {}\n self.ACTIONS= {}\n return\n \n def Reset(self):\n return\n \n def GetPatterns(self):\n return self.PATTERNS\n \n def GetActions(self):\n return self.ACTIONS\n \n def ParameterizedBuildPrompt(self, n: int):\n prompt = f\"\"\"\n{self.prompt0}\n\"\"\"\n #prompt += \"\\nConversations:\"\n ret = self.formatter(prompt0 = prompt, conversations = self.conversations.GetConversations(frm = -n))\n return ret, self.formatter.Len(ret)\n \n def BuildPrompt(self):\n prompt, n = ConstructOptPrompt(self.ParameterizedBuildPrompt, low=1, high=len(self.conversations), maxLen=int(self.processor.llm.contextWindow * config.contextWindowRatio))\n if prompt is None:\n prompt = self.ParameterizedBuildPrompt(1)\n return prompt" }, { "identifier": "APromptMain", "path": "ailice/prompts/APromptMain.py", "snippet": "class APromptMain():\n PROMPT_NAME = \"main\"\n\n def __init__(self, processor, storage, collection, conversations, formatter, outputCB = None):\n self.processor = processor\n self.storage = storage\n self.collection = collection\n self.conversations = conversations\n self.formatter = formatter\n self.outputCB = outputCB\n self.prompt0 = read_text(\"ailice.prompts\", \"prompt_simple.txt\")\n self.PATTERNS = {\"CALL\": [{\"re\": GenerateRE4FunctionCalling(\"CALL<!|agentType: str, agentName: str, msg: str|!> -> str\"), \"isEntry\": True}]}\n self.ACTIONS= {}\n return\n \n def Recall(self, key: str):\n ret = self.storage.Query(self.collection, key)\n if (0 != len(ret)) and (ret[0][1] <= 0.5):\n return ret[0][0]\n else:\n return \"None.\"\n \n def Reset(self):\n return\n \n def GetPatterns(self):\n return self.PATTERNS\n \n def GetActions(self):\n return self.ACTIONS\n \n def ParameterizedBuildPrompt(self, n: int):\n context = str(self.formatter(prompt0 = \"\", conversations = self.conversations.GetConversations(frm = -1), encode = False))\n prompt = f\"\"\"\n{self.prompt0}\n\nEnd of general instructions.\n\nActive Agents: {[k+\": agentType \"+p.GetPromptName() for k,p in self.processor.subProcessors.items()]}\nRelevant Information:\n{self.Recall(context)}\n\"\"\"\n #prompt += \"\\nConversations:\"\n ret = self.formatter(prompt0 = prompt, conversations = self.conversations.GetConversations(frm = -n))\n return ret, self.formatter.Len(ret)\n \n def BuildPrompt(self):\n prompt, n = ConstructOptPrompt(self.ParameterizedBuildPrompt, low=1, high=len(self.conversations), maxLen=int(self.processor.llm.contextWindow * config.contextWindowRatio))\n if prompt is None:\n prompt = self.ParameterizedBuildPrompt(1)\n return prompt" }, { "identifier": "APromptSearchEngine", "path": "ailice/prompts/APromptSearchEngine.py", "snippet": "class APromptSearchEngine():\n PROMPT_NAME = \"search-engine\"\n\n def __init__(self, processor, storage, collection, conversations, formatter, outputCB = None):\n self.processor = processor\n self.conversations = conversations\n self.formatter = formatter\n self.outputCB = outputCB\n self.prompt0 = read_text(\"ailice.prompts\", \"prompt_searchengine.txt\")\n self.PATTERNS = {\"QUERY\": [{\"re\": GenerateRE4FunctionCalling(\"QUERY<!|request: str|!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"ARXIV\": [{\"re\": GenerateRE4FunctionCalling(\"ARXIV<!|keywords: str|!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"SCROLLDOWNARXIV\": [{\"re\": GenerateRE4FunctionCalling(\"SCROLLDOWNARXIV<!||!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"GOOGLE\": [{\"re\": GenerateRE4FunctionCalling(\"GOOGLE<!|keywords: str|!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"SCROLLDOWNGOOGLE\": [{\"re\": GenerateRE4FunctionCalling(\"SCROLLDOWNGOOGLE<!||!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"DUCKDUCKGO\": [{\"re\": GenerateRE4FunctionCalling(\"DUCKDUCKGO<!|keywords: str|!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"SCROLLDOWNDUCKDUCKGO\": [{\"re\": GenerateRE4FunctionCalling(\"SCROLLDOWNDUCKDUCKGO<!||!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"BROWSE\": [{\"re\": GenerateRE4FunctionCalling(\"BROWSE<!|url: str|!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"SCROLLDOWN\": [{\"re\": GenerateRE4FunctionCalling(\"SCROLLDOWN<!||!> -> str\"), \"isEntry\": True}],\n \"RESPOND\": [{\"re\": GenerateRE4FunctionCalling(\"RESPOND<!|message: str|!> -> None\", faultTolerance = True), \"isEntry\": True}]}\n self.ACTIONS= {}\n return\n \n def Reset(self):\n return\n \n def GetPatterns(self):\n return self.PATTERNS\n \n def GetActions(self):\n return self.ACTIONS\n \n def ParameterizedBuildPrompt(self, n: int):\n prompt = f\"\"\"\n{self.prompt0}\n\nEnd of general instructions.\n\n\"\"\"\n #prompt += \"Conversations:\"\n ret = self.formatter(prompt0 = prompt, conversations = self.conversations.GetConversations(frm = -n))\n return ret, self.formatter.Len(ret)\n \n def BuildPrompt(self):\n prompt, n = ConstructOptPrompt(self.ParameterizedBuildPrompt, low=1, high=len(self.conversations), maxLen=int(self.processor.llm.contextWindow * config.contextWindowRatio))\n if prompt is None:\n prompt = self.ParameterizedBuildPrompt(1)\n return prompt" }, { "identifier": "APromptResearcher", "path": "ailice/prompts/APromptResearcher.py", "snippet": "class APromptResearcher():\n PROMPT_NAME = \"researcher\"\n \n def __init__(self, processor, storage, collection, conversations, formatter, outputCB = None):\n self.processor = processor\n self.storage = storage\n self.collection = collection\n self.conversations = conversations\n self.formatter = formatter\n self.outputCB = outputCB\n self.prompt0 = read_text(\"ailice.prompts\", \"prompt_researcher.txt\")\n self.PATTERNS = {\"CALL\": [{\"re\": GenerateRE4FunctionCalling(\"CALL<!|agentType: str, agentName: str, msg: str|!> -> str\"), \"isEntry\": True}],\n \"RESPOND\": [{\"re\": GenerateRE4FunctionCalling(\"RESPOND<!|message: str|!> -> None\", faultTolerance = True), \"isEntry\": True}],\n \"BROWSE\": [{\"re\": GenerateRE4FunctionCalling(\"BROWSE<!|url: str|!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"SCROLLDOWN\": [{\"re\": GenerateRE4FunctionCalling(\"SCROLLDOWN<!||!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"BASH\": [{\"re\": GenerateRE4FunctionCalling(\"BASH<!|code: str|!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"SCROLLUPBASH\": [{\"re\": GenerateRE4FunctionCalling(\"SCROLLUPBASH<!||!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"PYTHON\": [{\"re\": GenerateRE4FunctionCalling(\"PYTHON<!|code: str|!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"SCROLLUPPY\": [{\"re\": GenerateRE4FunctionCalling(\"SCROLLUPPY<!||!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"STORE\": [{\"re\": GenerateRE4FunctionCalling(\"STORE<!|txt: str|!> -> None\", faultTolerance = True), \"isEntry\": True}],\n \"QUERY\": [{\"re\": GenerateRE4FunctionCalling(\"QUERY<!|keywords: str|!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"VAR\": [{\"re\": GenerateRE4FunctionCalling(\"VAR<!|name: str, content: str|!> -> None\", faultTolerance = True), \"isEntry\": True}]}\n self.ACTIONS = {\"VAR\": {\"func\": self.Var}}\n self.variables = dict()\n return\n \n def Var(self, name: str, content: str):\n self.variables[name] = content\n return\n \n def Recall(self, key: str):\n ret = self.storage.Query(self.collection, key)\n if (0 != len(ret)): #and (ret[0][1] <= 0.5):\n return ret[0][0]\n else:\n return \"None.\"\n \n def GetPatterns(self):\n return self.PATTERNS\n \n def GetActions(self):\n return self.ACTIONS\n\n def ParameterizedBuildPrompt(self, n: int):\n context = str(self.formatter(prompt0 = \"\", conversations = self.conversations.GetConversations(frm = -1), encode = False))\n prompt = f\"\"\"\n{self.prompt0}\n\nEnd of general instructions.\n\nActive Agents: {[k+\": agentType \"+p.GetPromptName() for k,p in self.processor.subProcessors.items()]}\n\nVariables:\n{[f\"{varName}: {content}\" for varName, content in self.variables.items()]}\n\nRelevant Information: {self.Recall(context).strip()}\n\n\"\"\"\n #print(prompt)\n ret = self.formatter(prompt0 = prompt, conversations = self.conversations.GetConversations(frm = -n))\n return ret, self.formatter.Len(ret)\n \n def BuildPrompt(self):\n prompt, n = ConstructOptPrompt(self.ParameterizedBuildPrompt, low=1, high=len(self.conversations), maxLen=int(self.processor.llm.contextWindow * config.contextWindowRatio))\n if prompt is None:\n prompt = self.ParameterizedBuildPrompt(1)\n return prompt" }, { "identifier": "APromptCoder", "path": "ailice/prompts/APromptCoder.py", "snippet": "class APromptCoder():\n PROMPT_NAME = \"coder\"\n\n def __init__(self, processor, storage, collection, conversations, formatter, outputCB = None):\n self.processor = processor\n self.storage = storage\n self.collection = collection\n self.conversations = conversations\n self.formatter = formatter\n self.outputCB = outputCB\n self.prompt0 = read_text(\"ailice.prompts\", \"prompt_coder.txt\")\n self.PATTERNS = {}\n self.ACTIONS= {}\n\n return\n \n def Reset(self):\n return\n \n def GetPatterns(self):\n return self.PATTERNS\n \n def GetActions(self):\n return self.ACTIONS\n \n def ParameterizedBuildPrompt(self, n: int):\n prompt = f\"\"\"\n{self.prompt0}\n\"\"\"\n #prompt += \"\\nConversations:\"\n ret = self.formatter(prompt0 = prompt, conversations = self.conversations.GetConversations(frm = -n))\n return ret, self.formatter.Len(ret)\n \n def BuildPrompt(self):\n prompt, n = ConstructOptPrompt(self.ParameterizedBuildPrompt, low=1, high=len(self.conversations), maxLen=int(self.processor.llm.contextWindow * config.contextWindowRatio))\n if prompt is None:\n prompt = self.ParameterizedBuildPrompt(1)\n return prompt" }, { "identifier": "APromptModuleCoder", "path": "ailice/prompts/APromptModuleCoder.py", "snippet": "class APromptModuleCoder():\n PROMPT_NAME = \"module-coder\"\n\n def __init__(self, processor, storage, collection, conversations, formatter, outputCB = None):\n self.processor = processor\n self.storage = storage\n self.collection = collection\n self.conversations = conversations\n self.formatter = formatter\n self.outputCB = outputCB\n self.prompt0 = read_text(\"ailice.prompts\", \"prompt_module_coder.txt\")\n self.PATTERNS = {}\n self.ACTIONS= {}\n\n return\n \n def Reset(self):\n return\n \n def GetPatterns(self):\n return self.PATTERNS\n \n def GetActions(self):\n return self.ACTIONS\n \n def ParameterizedBuildPrompt(self, n: int):\n prompt = f\"\"\"\n{self.prompt0}\n\"\"\"\n #prompt += \"\\nConversations:\"\n ret = self.formatter(prompt0 = prompt, conversations = self.conversations.GetConversations(frm = -n))\n return ret, self.formatter.Len(ret)\n \n def BuildPrompt(self):\n prompt, n = ConstructOptPrompt(self.ParameterizedBuildPrompt, low=1, high=len(self.conversations), maxLen=int(self.processor.llm.contextWindow * config.contextWindowRatio))\n if prompt is None:\n prompt = self.ParameterizedBuildPrompt(1)\n return prompt" }, { "identifier": "APromptModuleLoader", "path": "ailice/prompts/APromptModuleLoader.py", "snippet": "class APromptModuleLoader():\n PROMPT_NAME = \"module-loader\"\n\n def __init__(self, processor, storage, collection, conversations, formatter, outputCB = None):\n self.processor = processor\n self.storage = storage\n self.collection = collection\n self.conversations = conversations\n self.formatter = formatter\n self.outputCB = outputCB\n self.prompt0 = read_text(\"ailice.prompts\", \"prompt_module_loader.txt\")\n self.memory = \"\"\n self.PATTERNS = {\"LOADMODULE\": [{\"re\": GenerateRE4FunctionCalling(\"LOADMODULE<!|addr: str|!> -> str\", faultTolerance = True), \"isEntry\": True}]}\n self.ACTIONS= {\"LOADMODULE\": {\"func\": self.LoadModule}}\n return\n \n def Reset(self):\n return\n \n def GetPatterns(self):\n return self.PATTERNS\n \n def GetActions(self):\n return self.ACTIONS\n \n def LoadModule(self, addr: str) -> str:\n try:\n ret = self.processor.RegisterModules([addr])\n prompts = []\n for r in ret:\n t = r['signature'].replace(r['signature'][:r['signature'].find('(')], r['action'], 1)\n newSig = t.replace('(', '<!|').replace(')', '|!>')\n self.processor.interpreter.RegisterPattern(nodeType=r['action'], pattern=GenerateRE4FunctionCalling(newSig, faultTolerance = True), isEntry=True)\n prompts.append(f\"{newSig}: {r['prompt']}\")\n self.memory = \"\\n\".join(prompts)\n ret = self.memory\n except Exception as e:\n ret = f\"Exception: {str(e)}\"\n return ret\n \n def ParameterizedBuildPrompt(self, n: int):\n prompt = f\"\"\"\n{self.prompt0}\n\nMODULE DETAILS:\n{self.memory}\n\n\"\"\"\n #prompt += \"\\nConversations:\"\n ret = self.formatter(prompt0 = prompt, conversations = self.conversations.GetConversations(frm = -n))\n return ret, self.formatter.Len(ret)\n \n def BuildPrompt(self):\n prompt, n = ConstructOptPrompt(self.ParameterizedBuildPrompt, low=1, high=len(self.conversations), maxLen=int(self.processor.llm.contextWindow * config.contextWindowRatio))\n if prompt is None:\n prompt = self.ParameterizedBuildPrompt(1)\n return prompt" }, { "identifier": "APromptCoderProxy", "path": "ailice/prompts/APromptCoderProxy.py", "snippet": "class APromptCoderProxy():\n PROMPT_NAME = \"coder-proxy\"\n\n def __init__(self, processor, storage, collection, conversations, formatter, outputCB = None):\n self.processor = processor\n self.storage = storage\n self.collection = collection\n self.conversations = conversations\n self.formatter = formatter\n self.outputCB = outputCB\n self.prompt0 = read_text(\"ailice.prompts\", \"prompt_coderproxy.txt\")\n self.PATTERNS = {\"CALL\": [{\"re\": GenerateRE4FunctionCalling(\"CALL<!|agentType: str, agentName: str, msg: str|!> -> str\"), \"isEntry\": True}],\n \"RESPOND\": [{\"re\": GenerateRE4FunctionCalling(\"RESPOND<!|message: str|!> -> None\", faultTolerance = True), \"isEntry\": True}],\n \"BASH\": [{\"re\": GenerateRE4FunctionCalling(\"BASH<!|code: str|!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"SCROLLUPBASH\": [{\"re\": GenerateRE4FunctionCalling(\"SCROLLUPBASH<!||!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"PYTHON\": [{\"re\": GenerateRE4FunctionCalling(\"PYTHON<!|code: str|!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"SCROLLUPPY\": [{\"re\": GenerateRE4FunctionCalling(\"SCROLLUPPY<!||!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"WAIT\": [{\"re\": GenerateRE4FunctionCalling(\"WAIT<!|duration: int|!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"UpdateMemory\": [{\"re\": r\"UPDATED MEMORY(?P<newState>.*?)\", \"isEntry\": True}],\n \"SetVar\": [{\"re\": r\"(?P<varName>[a-zA-Z0-9_-]+)[ ]*=[ ]*<!\\|(?P<varValue>.*?)\\|!>\", \"isEntry\": True}],\n \"PrintVar\": [{\"re\": GenerateRE4FunctionCalling(\"PRINT<!|varName: str|!> -> str\", faultTolerance = True), \"isEntry\": True}]}\n self.ACTIONS= {\"UpdateMemory\": {\"func\": self.UpdateMemory},\n \"SetVar\": {\"func\": self.SetVar},\n \"PrintVar\": {\"func\": self.GetVar}}\n self.memory = \"\"\n self.vars = {}\n return\n \n def Reset(self):\n return\n \n def GetPatterns(self):\n return self.PATTERNS\n \n def GetActions(self):\n return self.ACTIONS\n \n def Recall(self, key: str):\n ret = self.storage.Query(self.collection, key)\n if (0 != len(ret)): #and (ret[0][1] <= 0.5):\n return ret[0][0]\n else:\n return \"None.\"\n \n def UpdateMemory(self, newMemory: str):\n self.memory = newMemory\n return\n \n def SetVar(self, varName: str, varValue: str):\n self.vars[varName] = varValue\n return\n \n def GetVar(self, varName: str) -> str:\n return self.vars.get(varName, f\"Variable {varName} NOT DEFINED. Only defined variable names are legal, this includes: {[k for k in self.vars]}\")\n \n def ParameterizedBuildPrompt(self, n: int):\n context = str(self.formatter(prompt0 = \"\", conversations = self.conversations.GetConversations(frm = -1), encode = False))\n prompt = f\"\"\"\n{self.prompt0}\n\nEnd of general instructions.\n\nActive Agents: {[k+\": agentType \"+p.GetPromptName() for k,p in self.processor.subProcessors.items()]}\n\nRelevant Information: {self.Recall(context).strip()}\n\n\"\"\"\n #prompt += \"\\nConversations:\"\n ret = self.formatter(prompt0 = prompt, conversations = self.conversations.GetConversations(frm = -n))\n return ret, self.formatter.Len(ret)\n \n def BuildPrompt(self):\n prompt, n = ConstructOptPrompt(self.ParameterizedBuildPrompt, low=1, high=len(self.conversations), maxLen=int(self.processor.llm.contextWindow * config.contextWindowRatio))\n if prompt is None:\n prompt = self.ParameterizedBuildPrompt(1)\n return prompt" }, { "identifier": "APromptArticleDigest", "path": "ailice/prompts/APromptArticleDigest.py", "snippet": "class APromptArticleDigest():\n PROMPT_NAME = \"article-digest\"\n \n def __init__(self, processor, storage, collection, conversations, formatter, outputCB = None):\n self.processor = processor\n self.storage = storage\n self.collection = collection\n self.conversations = conversations\n self.formatter = formatter\n self.outputCB = outputCB\n self.prompt0 = read_text('ailice.prompts', 'prompt_article_digest.txt')\n self.PATTERNS = {\"BROWSE\": [{\"re\": GenerateRE4FunctionCalling(\"BROWSE<!|url: str|!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"SCROLLDOWN\": [{\"re\": GenerateRE4FunctionCalling(\"SCROLLDOWN<!||!> -> str\", faultTolerance = True), \"isEntry\": True}],\n \"Output\": [{\"re\": r\"REPORT:(?P<txt>.*?)NOTEBOOK:\", \"isEntry\": True}],\n \"RESPOND\": [{\"re\": GenerateRE4FunctionCalling(\"RESPOND<!|message: str|!> -> None\", faultTolerance = True), \"isEntry\": True}]}\n self.ACTIONS = {\"Output\": {\"func\": self.Output}}\n return\n \n def Reset(self):\n return\n\n def Output(self, txt: str):\n txt = txt.strip()\n self.storage.Store(self.collection, txt)\n self.outputCB(f\"OUTPUT_{self.processor.name}\", txt)\n return\n \n def Recall(self, key: str):\n ret = self.storage.Query(self.collection, key)\n if (0 != len(ret)): #and (ret[0][1] <= 0.5):\n return ret[0][0]\n else:\n return \"None.\"\n \n def GetPatterns(self):\n return self.PATTERNS\n \n def GetActions(self):\n return self.ACTIONS\n\n def BuildPrompt(self):\n context = str(self.formatter(prompt0 = \"\", conversations = self.conversations.GetConversations(frm = -1), encode = False))\n prompt = f\"\"\"\n{self.prompt0}\n\nEnd of general instructions.\n\nRELEVANT INFORMATION: {self.Recall(context).strip()}\n\n\"\"\"\n #print(prompt)\n return self.formatter(prompt0 = prompt, conversations = self.conversations.GetConversations(frm = -2))" } ]
import time import simplejson as json import argparse from termcolor import colored from ailice.common.AConfig import config from ailice.core.AProcessor import AProcessor from ailice.core.llm.ALLMPool import llmPool from ailice.common.utils.ALogger import ALogger from ailice.common.ARemoteAccessors import clientPool from ailice.AServices import StartServices from ailice.common.APrompts import promptsManager from ailice.prompts.APromptChat import APromptChat from ailice.prompts.APromptMain import APromptMain from ailice.prompts.APromptSearchEngine import APromptSearchEngine from ailice.prompts.APromptResearcher import APromptResearcher from ailice.prompts.APromptCoder import APromptCoder from ailice.prompts.APromptModuleCoder import APromptModuleCoder from ailice.prompts.APromptModuleLoader import APromptModuleLoader from ailice.prompts.APromptCoderProxy import APromptCoderProxy from ailice.prompts.APromptArticleDigest import APromptArticleDigest
8,820
def GetInput(speech) -> str: if config.speechOn: print(colored("USER: ", "green"), end="", flush=True) inp = speech.GetAudio() print(inp, end="", flush=True) print("") else: inp = input(colored("USER: ", "green")) return inp def mainLoop(modelID: str, quantization: str, maxMemory: dict, prompt: str, temperature: float, flashAttention2: bool, speechOn: bool, ttsDevice: str, sttDevice: str, contextWindowRatio: float, localExecution: bool, trace: str): config.Initialize(needOpenaiGPTKey = ("oai:" in modelID)) config.quantization = quantization config.maxMemory = maxMemory config.temperature = temperature config.flashAttention2 = flashAttention2 config.speechOn = speechOn config.contextWindowRatio = contextWindowRatio config.localExecution = localExecution print(colored("The port range of the ext-modules has been changed from 2005-2016 to 59000-59200. If you are using an old version, startup failure will occur after updating the code. Please modify the port number in config.json and rebuild the docker image.", "yellow")) StartServices() clientPool.Init() if speechOn: speech = clientPool.GetClient(config.services['speech']['addr']) if (ttsDevice not in {'cpu','cuda'}) or (sttDevice not in {'cpu','cuda'}): print("the value of ttsDevice and sttDevice should be one of cpu or cuda, the default is cpu.") exit(-1) else: speech.SetDevices({"tts": ttsDevice, "stt": sttDevice}) else: speech = None
def GetInput(speech) -> str: if config.speechOn: print(colored("USER: ", "green"), end="", flush=True) inp = speech.GetAudio() print(inp, end="", flush=True) print("") else: inp = input(colored("USER: ", "green")) return inp def mainLoop(modelID: str, quantization: str, maxMemory: dict, prompt: str, temperature: float, flashAttention2: bool, speechOn: bool, ttsDevice: str, sttDevice: str, contextWindowRatio: float, localExecution: bool, trace: str): config.Initialize(needOpenaiGPTKey = ("oai:" in modelID)) config.quantization = quantization config.maxMemory = maxMemory config.temperature = temperature config.flashAttention2 = flashAttention2 config.speechOn = speechOn config.contextWindowRatio = contextWindowRatio config.localExecution = localExecution print(colored("The port range of the ext-modules has been changed from 2005-2016 to 59000-59200. If you are using an old version, startup failure will occur after updating the code. Please modify the port number in config.json and rebuild the docker image.", "yellow")) StartServices() clientPool.Init() if speechOn: speech = clientPool.GetClient(config.services['speech']['addr']) if (ttsDevice not in {'cpu','cuda'}) or (sttDevice not in {'cpu','cuda'}): print("the value of ttsDevice and sttDevice should be one of cpu or cuda, the default is cpu.") exit(-1) else: speech.SetDevices({"tts": ttsDevice, "stt": sttDevice}) else: speech = None
for promptCls in [APromptChat, APromptMain, APromptSearchEngine, APromptResearcher, APromptCoder, APromptModuleCoder, APromptModuleLoader, APromptCoderProxy, APromptArticleDigest]:
10
2023-10-16 01:51:14+00:00
12k
city96/ComfyUI_ExtraModels
PixArt/models/PixArtMS.py
[ { "identifier": "auto_grad_checkpoint", "path": "PixArt/models/utils.py", "snippet": "def _ntuple(n):\n def parse(x):\ndef set_grad_checkpoint(model, use_fp32_attention=False, gc_step=1):\n def set_attr(module):\ndef auto_grad_checkpoint(module, *args, **kwargs):\ndef checkpoint_sequential(functions, step, input, *args, **kwargs):\n def run_function(start, end, functions):\n def forward(input):\ndef get_rel_pos(q_size, k_size, rel_pos):\ndef add_decomposed_rel_pos(attn, q, rel_pos_h, rel_pos_w, q_size, k_size):" }, { "identifier": "t2i_modulate", "path": "PixArt/models/PixArt_blocks.py", "snippet": "def t2i_modulate(x, shift, scale):\n return x * (1 + scale) + shift" }, { "identifier": "CaptionEmbedder", "path": "PixArt/models/PixArt_blocks.py", "snippet": "class CaptionEmbedder(nn.Module):\n \"\"\"\n Embeds class labels into vector representations. Also handles label dropout for classifier-free guidance.\n \"\"\"\n\n def __init__(self, in_channels, hidden_size, uncond_prob, act_layer=nn.GELU(approximate='tanh'), token_num=120):\n super().__init__()\n self.y_proj = Mlp(in_features=in_channels, hidden_features=hidden_size, out_features=hidden_size, act_layer=act_layer, drop=0)\n self.register_buffer(\"y_embedding\", nn.Parameter(torch.randn(token_num, in_channels) / in_channels ** 0.5))\n self.uncond_prob = uncond_prob\n\n def token_drop(self, caption, force_drop_ids=None):\n \"\"\"\n Drops labels to enable classifier-free guidance.\n \"\"\"\n if force_drop_ids is None:\n drop_ids = torch.rand(caption.shape[0]).cuda() < self.uncond_prob\n else:\n drop_ids = force_drop_ids == 1\n caption = torch.where(drop_ids[:, None, None, None], self.y_embedding, caption)\n return caption\n\n def forward(self, caption, train, force_drop_ids=None):\n if train:\n assert caption.shape[2:] == self.y_embedding.shape\n use_dropout = self.uncond_prob > 0\n if (train and use_dropout) or (force_drop_ids is not None):\n caption = self.token_drop(caption, force_drop_ids)\n caption = self.y_proj(caption)\n return caption" }, { "identifier": "WindowAttention", "path": "PixArt/models/PixArt_blocks.py", "snippet": "class WindowAttention(Attention_):\n \"\"\"Multi-head Attention block with relative position embeddings.\"\"\"\n\n def __init__(\n self,\n dim,\n num_heads=8,\n qkv_bias=True,\n use_rel_pos=False,\n rel_pos_zero_init=True,\n input_size=None,\n **block_kwargs,\n ):\n \"\"\"\n Args:\n dim (int): Number of input channels.\n num_heads (int): Number of attention heads.\n qkv_bias (bool: If True, add a learnable bias to query, key, value.\n rel_pos (bool): If True, add relative positional embeddings to the attention map.\n rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.\n input_size (int or None): Input resolution for calculating the relative positional\n parameter size.\n \"\"\"\n super().__init__(dim, num_heads=num_heads, qkv_bias=qkv_bias, **block_kwargs)\n\n self.use_rel_pos = use_rel_pos\n if self.use_rel_pos:\n # initialize relative positional embeddings\n self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, self.head_dim))\n self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, self.head_dim))\n\n if not rel_pos_zero_init:\n nn.init.trunc_normal_(self.rel_pos_h, std=0.02)\n nn.init.trunc_normal_(self.rel_pos_w, std=0.02)\n\n def forward(self, x, mask=None):\n B, N, C = x.shape # 2 4096 1152\n qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads)\n\n if model_management.xformers_enabled():\n q, k, v = qkv.unbind(2)\n\n if getattr(self, 'fp32_attention', False):\n q, k, v = q.float(), k.float(), v.float()\n\n attn_bias = None\n if mask is not None:\n attn_bias = torch.zeros([B * self.num_heads, q.shape[1], k.shape[1]], dtype=q.dtype, device=q.device)\n attn_bias.masked_fill_(mask.squeeze(1).repeat(self.num_heads, 1, 1) == 0, float('-inf'))\n # Switch between torch / xformers attention\n x = xformers.ops.memory_efficient_attention(q, k, v, p=self.attn_drop.p, attn_bias=attn_bias)\n x = x.view(B, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x\n else:\n q, k, v = qkv.permute(2, 0, 3, 1, 4).unbind(0)\n\n q = q * self.scale\n attn = q @ k.transpose(-2, -1)\n attn = attn.softmax(dim=-1)\n attn = self.attn_drop(attn)\n x = attn @ v\n\n x = x.transpose(1, 2).reshape(B, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x" }, { "identifier": "MultiHeadCrossAttention", "path": "PixArt/models/PixArt_blocks.py", "snippet": "class MultiHeadCrossAttention(nn.Module):\n def __init__(self, d_model, num_heads, attn_drop=0., proj_drop=0., **block_kwargs):\n super(MultiHeadCrossAttention, self).__init__()\n assert d_model % num_heads == 0, \"d_model must be divisible by num_heads\"\n\n self.d_model = d_model\n self.num_heads = num_heads\n self.head_dim = d_model // num_heads\n\n self.q_linear = nn.Linear(d_model, d_model)\n self.kv_linear = nn.Linear(d_model, d_model*2)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(d_model, d_model)\n self.proj_drop = nn.Dropout(proj_drop)\n\n def forward(self, x, cond, mask=None):\n # query/value: img tokens; key: condition; mask: if padding tokens\n B, N, C = x.shape\n\n if model_management.xformers_enabled():\n q = self.q_linear(x).view(1, -1, self.num_heads, self.head_dim)\n kv = self.kv_linear(cond).view(1, -1, 2, self.num_heads, self.head_dim)\n k, v = kv.unbind(2)\n attn_bias = None\n if mask is not None:\n attn_bias = xformers.ops.fmha.BlockDiagonalMask.from_seqlens([N] * B, mask)\n x = xformers.ops.memory_efficient_attention(q, k, v, p=self.attn_drop.p, attn_bias=attn_bias)\n x = x.view(B, -1, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x\n else:\n global competent_attention_implementation\n if not competent_attention_implementation:\n print(\"\"\"\\nYou should REALLY consider installing/enabling xformers.\\nAlternatively, open up ExtraModels/PixArt/models/PixArt_blocks.py and\\n- Fix the attention map on line 77 if you know how to\\n- Add scaled_dot_product_attention on line 150\\n- Send a PR and remove this message on line 32/66-69\\n\"\"\")\n competent_attention_implementation = True\n\n q = self.q_linear(x).view(1, -1, self.num_heads, self.head_dim)\n kv = self.kv_linear(cond).view(1, -1, 2, self.num_heads, self.head_dim)\n k, v = kv.unbind(2)\n q, k, v = map(lambda t: t.permute(0, 2, 1, 3),(q, k, v),)\n \n attn_mask = None\n if mask is not None and len(mask) > 1:\n # This is probably wrong\n attn_mask = torch.zeros(\n [1, q.shape[1], q.shape[2], v.shape[2]],\n dtype=q.dtype,\n device=q.device\n )\n attn_mask[:, :, (q.shape[2]//2):, mask[0]:] = True\n attn_mask[:, :, :(q.shape[2]//2), :mask[1]] = True\n\n x = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=attn_mask, dropout_p=self.attn_drop.p)\n x = x.permute(0, 2, 1, 3).contiguous()\n x = x.view(B, -1, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x" }, { "identifier": "T2IFinalLayer", "path": "PixArt/models/PixArt_blocks.py", "snippet": "class T2IFinalLayer(nn.Module):\n \"\"\"\n The final layer of PixArt.\n \"\"\"\n\n def __init__(self, hidden_size, patch_size, out_channels):\n super().__init__()\n self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)\n self.linear = nn.Linear(hidden_size, patch_size * patch_size * out_channels, bias=True)\n self.scale_shift_table = nn.Parameter(torch.randn(2, hidden_size) / hidden_size ** 0.5)\n self.out_channels = out_channels\n\n def forward(self, x, t):\n shift, scale = (self.scale_shift_table[None] + t[:, None]).chunk(2, dim=1)\n x = t2i_modulate(self.norm_final(x), shift, scale)\n x = self.linear(x)\n return x" }, { "identifier": "TimestepEmbedder", "path": "PixArt/models/PixArt_blocks.py", "snippet": "class TimestepEmbedder(nn.Module):\n \"\"\"\n Embeds scalar timesteps into vector representations.\n \"\"\"\n\n def __init__(self, hidden_size, frequency_embedding_size=256):\n super().__init__()\n self.mlp = nn.Sequential(\n nn.Linear(frequency_embedding_size, hidden_size, bias=True),\n nn.SiLU(),\n nn.Linear(hidden_size, hidden_size, bias=True),\n )\n self.frequency_embedding_size = frequency_embedding_size\n\n @staticmethod\n def timestep_embedding(t, dim, max_period=10000):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param t: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an (N, D) Tensor of positional embeddings.\n \"\"\"\n # https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py\n half = dim // 2\n freqs = torch.exp(\n -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32, device=t.device) / half)\n args = t[:, None].float() * freqs[None]\n embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)\n if dim % 2:\n embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)\n return embedding\n\n def forward(self, t):\n t_freq = self.timestep_embedding(t, self.frequency_embedding_size)\n t_emb = self.mlp(t_freq.to(t.dtype))\n return t_emb" }, { "identifier": "SizeEmbedder", "path": "PixArt/models/PixArt_blocks.py", "snippet": "class SizeEmbedder(TimestepEmbedder):\n \"\"\"\n Embeds scalar timesteps into vector representations.\n \"\"\"\n\n def __init__(self, hidden_size, frequency_embedding_size=256):\n super().__init__(hidden_size=hidden_size, frequency_embedding_size=frequency_embedding_size)\n self.mlp = nn.Sequential(\n nn.Linear(frequency_embedding_size, hidden_size, bias=True),\n nn.SiLU(),\n nn.Linear(hidden_size, hidden_size, bias=True),\n )\n self.frequency_embedding_size = frequency_embedding_size\n self.outdim = hidden_size\n\n def forward(self, s, bs):\n if s.ndim == 1:\n s = s[:, None]\n assert s.ndim == 2\n if s.shape[0] != bs:\n s = s.repeat(bs//s.shape[0], 1)\n assert s.shape[0] == bs\n b, dims = s.shape[0], s.shape[1]\n s = rearrange(s, \"b d -> (b d)\")\n s_freq = self.timestep_embedding(s, self.frequency_embedding_size)\n s_emb = self.mlp(s_freq.to(s.dtype))\n s_emb = rearrange(s_emb, \"(b d) d2 -> b (d d2)\", b=b, d=dims, d2=self.outdim)\n return s_emb" }, { "identifier": "PixArt", "path": "PixArt/models/PixArt.py", "snippet": "class PixArt(nn.Module):\n \"\"\"\n Diffusion model with a Transformer backbone.\n \"\"\"\n\n def __init__(\n self,\n input_size=32,\n patch_size=2,\n in_channels=4,\n hidden_size=1152,\n depth=28,\n num_heads=16,\n mlp_ratio=4.0,\n class_dropout_prob=0.1,\n pred_sigma=True,\n drop_path: float = 0.,\n window_size=0,\n window_block_indexes=[],\n use_rel_pos=False,\n caption_channels=4096,\n lewei_scale=1.0,\n config=None,\n **kwargs,\n ):\n super().__init__()\n self.pred_sigma = pred_sigma\n self.in_channels = in_channels\n self.out_channels = in_channels * 2 if pred_sigma else in_channels\n self.patch_size = patch_size\n self.num_heads = num_heads\n self.lewei_scale = lewei_scale,\n self.dtype = torch.get_default_dtype()\n\n self.x_embedder = PatchEmbed(input_size, patch_size, in_channels, hidden_size, bias=True)\n self.t_embedder = TimestepEmbedder(hidden_size)\n num_patches = self.x_embedder.num_patches\n self.base_size = input_size // self.patch_size\n # Will use fixed sin-cos embedding:\n self.register_buffer(\"pos_embed\", torch.zeros(1, num_patches, hidden_size))\n\n approx_gelu = lambda: nn.GELU(approximate=\"tanh\")\n self.t_block = nn.Sequential(\n nn.SiLU(),\n nn.Linear(hidden_size, 6 * hidden_size, bias=True)\n )\n self.y_embedder = CaptionEmbedder(in_channels=caption_channels, hidden_size=hidden_size, uncond_prob=class_dropout_prob, act_layer=approx_gelu)\n drop_path = [x.item() for x in torch.linspace(0, drop_path, depth)] # stochastic depth decay rule\n self.blocks = nn.ModuleList([\n PixArtBlock(hidden_size, num_heads, mlp_ratio=mlp_ratio, drop_path=drop_path[i],\n input_size=(input_size // patch_size, input_size // patch_size),\n window_size=window_size if i in window_block_indexes else 0,\n use_rel_pos=use_rel_pos if i in window_block_indexes else False)\n for i in range(depth)\n ])\n self.final_layer = T2IFinalLayer(hidden_size, patch_size, self.out_channels)\n\n self.initialize_weights()\n\n print(f'Warning: lewei scale: {self.lewei_scale}, base size: {self.base_size}')\n\n def forward_raw(self, x, t, y, mask=None, data_info=None):\n \"\"\"\n Original forward pass of PixArt.\n x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)\n t: (N,) tensor of diffusion timesteps\n y: (N, 1, 120, C) tensor of class labels\n \"\"\"\n self.h, self.w = x.shape[-2]//self.patch_size, x.shape[-1]//self.patch_size\n x = self.x_embedder(x) + self.pos_embed # (N, T, D), where T = H * W / patch_size ** 2\n t = self.t_embedder(t) # (N, D)\n t0 = self.t_block(t)\n y = self.y_embedder(y, self.training) # (N, 1, L, D)\n if mask is not None:\n if mask.shape[0] != y.shape[0]:\n mask = mask.repeat(y.shape[0] // mask.shape[0], 1)\n mask = mask.squeeze(1).squeeze(1)\n y = y.squeeze(1).masked_select(mask.unsqueeze(-1) != 0).view(1, -1, x.shape[-1])\n y_lens = mask.sum(dim=1).tolist()\n else:\n y_lens = [y.shape[2]] * y.shape[0]\n y = y.squeeze(1).view(1, -1, x.shape[-1])\n for block in self.blocks:\n x = auto_grad_checkpoint(block, x, y, t0, y_lens) # (N, T, D) #support grad checkpoint\n x = self.final_layer(x, t) # (N, T, patch_size ** 2 * out_channels)\n x = self.unpatchify(x) # (N, out_channels, H, W)\n return x\n\n def forward(self, x, timesteps, context, y=None, **kwargs):\n \"\"\"\n Forward pass that adapts comfy input to original forward function\n x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)\n timesteps: (N,) tensor of diffusion timesteps\n context: (N, 1, 120, C) conditioning\n y: extra conditioning.\n \"\"\"\n ## Still accepts the input w/o that dim but returns garbage\n if len(context.shape) == 3:\n context = context.unsqueeze(1)\n\n ## run original forward pass\n out = self.forward_raw(\n x = x.to(self.dtype),\n t = timesteps.to(self.dtype),\n y = context.to(self.dtype),\n )\n\n ## only return EPS\n out = out.to(torch.float)\n eps, rest = out[:, :self.in_channels], out[:, self.in_channels:]\n return eps\n\n def forward_with_dpmsolver(self, x, t, y, mask=None, **kwargs):\n \"\"\"\n dpm solver donnot need variance prediction\n \"\"\"\n # https://github.com/openai/glide-text2im/blob/main/notebooks/text2im.ipynb\n model_out = self.forward(x, t, y, mask)\n return model_out.chunk(2, dim=1)[0]\n\n def forward_with_cfg(self, x, t, y, cfg_scale, **kwargs):\n \"\"\"\n Forward pass of PixArt, but also batches the unconditional forward pass for classifier-free guidance.\n \"\"\"\n # https://github.com/openai/glide-text2im/blob/main/notebooks/text2im.ipynb\n half = x[: len(x) // 2]\n combined = torch.cat([half, half], dim=0)\n model_out = self.forward(combined, t, y, kwargs)\n model_out = model_out['x'] if isinstance(model_out, dict) else model_out\n eps, rest = model_out[:, :3], model_out[:, 3:]\n cond_eps, uncond_eps = torch.split(eps, len(eps) // 2, dim=0)\n half_eps = uncond_eps + cfg_scale * (cond_eps - uncond_eps)\n eps = torch.cat([half_eps, half_eps], dim=0)\n return torch.cat([eps, rest], dim=1)\n\n def unpatchify(self, x):\n \"\"\"\n x: (N, T, patch_size**2 * C)\n imgs: (N, H, W, C)\n \"\"\"\n c = self.out_channels\n p = self.x_embedder.patch_size[0]\n h = w = int(x.shape[1] ** 0.5)\n assert h * w == x.shape[1]\n\n x = x.reshape(shape=(x.shape[0], h, w, p, p, c))\n x = torch.einsum('nhwpqc->nchpwq', x)\n imgs = x.reshape(shape=(x.shape[0], c, h * p, h * p))\n return imgs\n\n def initialize_weights(self):\n # Initialize transformer layers:\n def _basic_init(module):\n if isinstance(module, nn.Linear):\n torch.nn.init.xavier_uniform_(module.weight)\n if module.bias is not None:\n nn.init.constant_(module.bias, 0)\n\n self.apply(_basic_init)\n\n # Initialize (and freeze) pos_embed by sin-cos embedding:\n pos_embed = get_2d_sincos_pos_embed(self.pos_embed.shape[-1], int(self.x_embedder.num_patches ** 0.5), lewei_scale=self.lewei_scale, base_size=self.base_size)\n self.pos_embed.data.copy_(torch.from_numpy(pos_embed).unsqueeze(0).to(self.dtype))\n\n # Initialize patch_embed like nn.Linear (instead of nn.Conv2d):\n w = self.x_embedder.proj.weight.data\n nn.init.xavier_uniform_(w.view([w.shape[0], -1]))\n\n # Initialize timestep embedding MLP:\n nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)\n nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)\n nn.init.normal_(self.t_block[1].weight, std=0.02)\n\n # Initialize caption embedding MLP:\n nn.init.normal_(self.y_embedder.y_proj.fc1.weight, std=0.02)\n nn.init.normal_(self.y_embedder.y_proj.fc2.weight, std=0.02)\n\n # Zero-out adaLN modulation layers in PixArt blocks:\n for block in self.blocks:\n nn.init.constant_(block.cross_attn.proj.weight, 0)\n nn.init.constant_(block.cross_attn.proj.bias, 0)\n\n # Zero-out output layers:\n nn.init.constant_(self.final_layer.linear.weight, 0)\n nn.init.constant_(self.final_layer.linear.bias, 0)" }, { "identifier": "get_2d_sincos_pos_embed", "path": "PixArt/models/PixArt.py", "snippet": "def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False, extra_tokens=0, lewei_scale=1.0, base_size=16):\n \"\"\"\n grid_size: int of the grid height and width\n return:\n pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)\n \"\"\"\n if isinstance(grid_size, int):\n grid_size = to_2tuple(grid_size)\n grid_h = np.arange(grid_size[0], dtype=np.float32) / (grid_size[0]/base_size) / lewei_scale\n grid_w = np.arange(grid_size[1], dtype=np.float32) / (grid_size[1]/base_size) / lewei_scale\n grid = np.meshgrid(grid_w, grid_h) # here w goes first\n grid = np.stack(grid, axis=0)\n grid = grid.reshape([2, 1, grid_size[1], grid_size[0]])\n\n pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)\n if cls_token and extra_tokens > 0:\n pos_embed = np.concatenate([np.zeros([extra_tokens, embed_dim]), pos_embed], axis=0)\n return pos_embed" } ]
import torch import torch.nn as nn from tqdm import tqdm from timm.models.layers import DropPath from timm.models.vision_transformer import Mlp from .utils import auto_grad_checkpoint, to_2tuple from .PixArt_blocks import t2i_modulate, CaptionEmbedder, WindowAttention, MultiHeadCrossAttention, T2IFinalLayer, TimestepEmbedder, SizeEmbedder from .PixArt import PixArt, get_2d_sincos_pos_embed
7,634
use_rel_pos=use_rel_pos, **block_kwargs) self.cross_attn = MultiHeadCrossAttention(hidden_size, num_heads, **block_kwargs) self.norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) # to be compatible with lower version pytorch approx_gelu = lambda: nn.GELU(approximate="tanh") self.mlp = Mlp(in_features=hidden_size, hidden_features=int(hidden_size * mlp_ratio), act_layer=approx_gelu, drop=0) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.window_size = window_size self.scale_shift_table = nn.Parameter(torch.randn(6, hidden_size) / hidden_size ** 0.5) def forward(self, x, y, t, mask=None, **kwargs): B, N, C = x.shape shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (self.scale_shift_table[None] + t.reshape(B, 6, -1)).chunk(6, dim=1) x = x + self.drop_path(gate_msa * self.attn(t2i_modulate(self.norm1(x), shift_msa, scale_msa))) x = x + self.cross_attn(x, y, mask) x = x + self.drop_path(gate_mlp * self.mlp(t2i_modulate(self.norm2(x), shift_mlp, scale_mlp))) return x ############################################################################# # Core PixArt Model # ################################################################################# class PixArtMS(PixArt): """ Diffusion model with a Transformer backbone. """ def __init__( self, input_size=32, patch_size=2, in_channels=4, hidden_size=1152, depth=28, num_heads=16, mlp_ratio=4.0, class_dropout_prob=0.1, learn_sigma=True, pred_sigma=True, drop_path: float = 0., window_size=0, window_block_indexes=[], use_rel_pos=False, caption_channels=4096, lewei_scale=1., config=None, **kwargs, ): super().__init__( input_size=input_size, patch_size=patch_size, in_channels=in_channels, hidden_size=hidden_size, depth=depth, num_heads=num_heads, mlp_ratio=mlp_ratio, class_dropout_prob=class_dropout_prob, learn_sigma=learn_sigma, pred_sigma=pred_sigma, drop_path=drop_path, window_size=window_size, window_block_indexes=window_block_indexes, use_rel_pos=use_rel_pos, lewei_scale=lewei_scale, config=config, **kwargs, ) self.dtype = torch.get_default_dtype() self.h = self.w = 0 approx_gelu = lambda: nn.GELU(approximate="tanh") self.t_block = nn.Sequential( nn.SiLU(), nn.Linear(hidden_size, 6 * hidden_size, bias=True) ) self.x_embedder = PatchEmbed(patch_size, in_channels, hidden_size, bias=True) self.y_embedder = CaptionEmbedder(in_channels=caption_channels, hidden_size=hidden_size, uncond_prob=class_dropout_prob, act_layer=approx_gelu) self.csize_embedder = SizeEmbedder(hidden_size//3) # c_size embed self.ar_embedder = SizeEmbedder(hidden_size//3) # aspect ratio embed drop_path = [x.item() for x in torch.linspace(0, drop_path, depth)] # stochastic depth decay rule self.blocks = nn.ModuleList([ PixArtMSBlock(hidden_size, num_heads, mlp_ratio=mlp_ratio, drop_path=drop_path[i], input_size=(input_size // patch_size, input_size // patch_size), window_size=window_size if i in window_block_indexes else 0, use_rel_pos=use_rel_pos if i in window_block_indexes else False) for i in range(depth) ]) self.final_layer = T2IFinalLayer(hidden_size, patch_size, self.out_channels) self.training = False self.initialize() def forward_raw(self, x, t, y, mask=None, data_info=None, **kwargs): """ Original forward pass of PixArt. x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images) t: (N,) tensor of diffusion timesteps y: (N, 1, 120, C) tensor of class labels """ bs = x.shape[0] c_size, ar = data_info['img_hw'], data_info['aspect_ratio'] self.h, self.w = x.shape[-2]//self.patch_size, x.shape[-1]//self.patch_size pos_embed = torch.from_numpy(get_2d_sincos_pos_embed(self.pos_embed.shape[-1], (self.h, self.w), lewei_scale=self.lewei_scale, base_size=self.base_size)).unsqueeze(0).to(x.device).to(self.dtype) x = self.x_embedder(x) + pos_embed # (N, T, D), where T = H * W / patch_size ** 2 t = self.t_embedder(t) # (N, D) csize = self.csize_embedder(c_size, bs) # (N, D) ar = self.ar_embedder(ar, bs) # (N, D) t = t + torch.cat([csize, ar], dim=1) t0 = self.t_block(t) y = self.y_embedder(y, self.training) # (N, D) if mask is not None: if mask.shape[0] != y.shape[0]: mask = mask.repeat(y.shape[0] // mask.shape[0], 1) mask = mask.squeeze(1).squeeze(1) y = y.squeeze(1).masked_select(mask.unsqueeze(-1) != 0).view(1, -1, x.shape[-1]) y_lens = mask.sum(dim=1).tolist() else: y_lens = [y.shape[2]] * y.shape[0] y = y.squeeze(1).view(1, -1, x.shape[-1]) for block in self.blocks:
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # -------------------------------------------------------- # References: # GLIDE: https://github.com/openai/glide-text2im # MAE: https://github.com/facebookresearch/mae/blob/main/models_mae.py # -------------------------------------------------------- class PatchEmbed(nn.Module): """ 2D Image to Patch Embedding """ def __init__( self, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True, bias=True, ): super().__init__() patch_size = to_2tuple(patch_size) self.patch_size = patch_size self.flatten = flatten self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size, bias=bias) self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() def forward(self, x): x = self.proj(x) if self.flatten: x = x.flatten(2).transpose(1, 2) # BCHW -> BNC x = self.norm(x) return x class PixArtMSBlock(nn.Module): """ A PixArt block with adaptive layer norm zero (adaLN-Zero) conditioning. """ def __init__(self, hidden_size, num_heads, mlp_ratio=4.0, drop_path=0., window_size=0, input_size=None, use_rel_pos=False, **block_kwargs): super().__init__() self.hidden_size = hidden_size self.norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) self.attn = WindowAttention(hidden_size, num_heads=num_heads, qkv_bias=True, input_size=input_size if window_size == 0 else (window_size, window_size), use_rel_pos=use_rel_pos, **block_kwargs) self.cross_attn = MultiHeadCrossAttention(hidden_size, num_heads, **block_kwargs) self.norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) # to be compatible with lower version pytorch approx_gelu = lambda: nn.GELU(approximate="tanh") self.mlp = Mlp(in_features=hidden_size, hidden_features=int(hidden_size * mlp_ratio), act_layer=approx_gelu, drop=0) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.window_size = window_size self.scale_shift_table = nn.Parameter(torch.randn(6, hidden_size) / hidden_size ** 0.5) def forward(self, x, y, t, mask=None, **kwargs): B, N, C = x.shape shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (self.scale_shift_table[None] + t.reshape(B, 6, -1)).chunk(6, dim=1) x = x + self.drop_path(gate_msa * self.attn(t2i_modulate(self.norm1(x), shift_msa, scale_msa))) x = x + self.cross_attn(x, y, mask) x = x + self.drop_path(gate_mlp * self.mlp(t2i_modulate(self.norm2(x), shift_mlp, scale_mlp))) return x ############################################################################# # Core PixArt Model # ################################################################################# class PixArtMS(PixArt): """ Diffusion model with a Transformer backbone. """ def __init__( self, input_size=32, patch_size=2, in_channels=4, hidden_size=1152, depth=28, num_heads=16, mlp_ratio=4.0, class_dropout_prob=0.1, learn_sigma=True, pred_sigma=True, drop_path: float = 0., window_size=0, window_block_indexes=[], use_rel_pos=False, caption_channels=4096, lewei_scale=1., config=None, **kwargs, ): super().__init__( input_size=input_size, patch_size=patch_size, in_channels=in_channels, hidden_size=hidden_size, depth=depth, num_heads=num_heads, mlp_ratio=mlp_ratio, class_dropout_prob=class_dropout_prob, learn_sigma=learn_sigma, pred_sigma=pred_sigma, drop_path=drop_path, window_size=window_size, window_block_indexes=window_block_indexes, use_rel_pos=use_rel_pos, lewei_scale=lewei_scale, config=config, **kwargs, ) self.dtype = torch.get_default_dtype() self.h = self.w = 0 approx_gelu = lambda: nn.GELU(approximate="tanh") self.t_block = nn.Sequential( nn.SiLU(), nn.Linear(hidden_size, 6 * hidden_size, bias=True) ) self.x_embedder = PatchEmbed(patch_size, in_channels, hidden_size, bias=True) self.y_embedder = CaptionEmbedder(in_channels=caption_channels, hidden_size=hidden_size, uncond_prob=class_dropout_prob, act_layer=approx_gelu) self.csize_embedder = SizeEmbedder(hidden_size//3) # c_size embed self.ar_embedder = SizeEmbedder(hidden_size//3) # aspect ratio embed drop_path = [x.item() for x in torch.linspace(0, drop_path, depth)] # stochastic depth decay rule self.blocks = nn.ModuleList([ PixArtMSBlock(hidden_size, num_heads, mlp_ratio=mlp_ratio, drop_path=drop_path[i], input_size=(input_size // patch_size, input_size // patch_size), window_size=window_size if i in window_block_indexes else 0, use_rel_pos=use_rel_pos if i in window_block_indexes else False) for i in range(depth) ]) self.final_layer = T2IFinalLayer(hidden_size, patch_size, self.out_channels) self.training = False self.initialize() def forward_raw(self, x, t, y, mask=None, data_info=None, **kwargs): """ Original forward pass of PixArt. x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images) t: (N,) tensor of diffusion timesteps y: (N, 1, 120, C) tensor of class labels """ bs = x.shape[0] c_size, ar = data_info['img_hw'], data_info['aspect_ratio'] self.h, self.w = x.shape[-2]//self.patch_size, x.shape[-1]//self.patch_size pos_embed = torch.from_numpy(get_2d_sincos_pos_embed(self.pos_embed.shape[-1], (self.h, self.w), lewei_scale=self.lewei_scale, base_size=self.base_size)).unsqueeze(0).to(x.device).to(self.dtype) x = self.x_embedder(x) + pos_embed # (N, T, D), where T = H * W / patch_size ** 2 t = self.t_embedder(t) # (N, D) csize = self.csize_embedder(c_size, bs) # (N, D) ar = self.ar_embedder(ar, bs) # (N, D) t = t + torch.cat([csize, ar], dim=1) t0 = self.t_block(t) y = self.y_embedder(y, self.training) # (N, D) if mask is not None: if mask.shape[0] != y.shape[0]: mask = mask.repeat(y.shape[0] // mask.shape[0], 1) mask = mask.squeeze(1).squeeze(1) y = y.squeeze(1).masked_select(mask.unsqueeze(-1) != 0).view(1, -1, x.shape[-1]) y_lens = mask.sum(dim=1).tolist() else: y_lens = [y.shape[2]] * y.shape[0] y = y.squeeze(1).view(1, -1, x.shape[-1]) for block in self.blocks:
x = auto_grad_checkpoint(block, x, y, t0, y_lens, **kwargs) # (N, T, D) #support grad checkpoint
0
2023-10-20 21:19:44+00:00
12k
apple/ml-nvas3d
demo/generate_demo_video.py
[ { "identifier": "convolve_moving_receiver", "path": "nvas3d/utils/dynamic_utils.py", "snippet": "def convolve_moving_receiver(\n source_audio: np.ndarray,\n rirs: np.ndarray,\n interp_index: T.List[int],\n interp_weight: T.List[float]\n) -> np.ndarray:\n \"\"\"\n Apply convolution between an audio signal and moving impulse responses (IRs).\n\n Args:\n - source_audio: Source audio of shape (audio_len,)\n - rirs: RIRs of shape (num_positions, num_channels, ir_length)\n - interp_index: Indices representing the start positions for interpolation of shape (audio_len,).\n - interp_weight: Weight values for linear interpolation of shape (audio_len,).\n\n Returns:\n - Convolved audio signal of shape (num_channels, audio_len)\n \"\"\"\n\n num_channels = rirs.shape[1]\n audio_len = source_audio.shape[0]\n\n # Perform convolution for each position and channel\n convolved_audios = oaconvolve(source_audio[None, None, :], rirs, axes=-1)[..., :audio_len]\n\n # NumPy fancy indexing and broadcasting for interpolation\n start_audio = convolved_audios[interp_index, np.arange(num_channels)[:, None], np.arange(audio_len)]\n end_audio = convolved_audios[interp_index + 1, np.arange(num_channels)[:, None], np.arange(audio_len)]\n interp_weight = interp_weight[None, :]\n\n # Apply linear interpolation\n moving_audio = (1 - interp_weight) * start_audio + interp_weight * end_audio\n\n return moving_audio" }, { "identifier": "setup_dynamic_interp", "path": "nvas3d/utils/dynamic_utils.py", "snippet": "def setup_dynamic_interp(\n receiver_position: np.ndarray,\n total_samples: int,\n) -> T.Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Setup moving path with a constant speed for a receiver, given its positions in 3D space.\n\n Args:\n - receiver_position: Receiver positions in 3D space of shape (num_positions, 3).\n - total_samples: Total number of samples in the audio.\n\n Returns:\n - interp_index: Indices representing the start positions for interpolation.\n - interp_weight: Weight values for linear interpolation.\n \"\"\"\n\n # Calculate the number of samples per interval\n distance = np.linalg.norm(np.diff(receiver_position, axis=0), axis=1)\n speed_per_sample = distance.sum() / total_samples\n samples_per_interval = np.round(distance / speed_per_sample).astype(int)\n\n # Distribute rounding errors\n error = total_samples - samples_per_interval.sum()\n for i in np.random.choice(len(samples_per_interval), abs(error)):\n samples_per_interval[i] += np.sign(error)\n\n # Calculate indices and weights for linear interpolation\n interp_index = np.repeat(np.arange(len(distance)), samples_per_interval)\n interp_weight = np.concatenate([np.linspace(0, 1, num, endpoint=False) for num in samples_per_interval])\n\n return interp_index, interp_weight.astype(np.float32)" }, { "identifier": "clip_two", "path": "nvas3d/utils/audio_utils.py", "snippet": "def clip_two(audio1, audio2):\n \"\"\"\n Clips two audio signals to the same length.\n\n Args:\n audio1: First audio signal.\n audio2: Second audio signal.\n\n Returns: \n - Two audio signals of the same length.\n \"\"\"\n\n length_diff = audio1.shape[-1] - audio2.shape[-1]\n\n if length_diff == 0:\n return audio1, audio2\n elif length_diff > 0:\n audio1 = audio1[..., :audio2.shape[-1]]\n elif length_diff < 0:\n audio2 = audio2[..., :audio1.shape[-1]]\n\n return audio1, audio2" }, { "identifier": "clip_all", "path": "nvas3d/utils/audio_utils.py", "snippet": "def clip_all(audio_list):\n \"\"\"\n Clips all audio signals in a list to the same length.\n\n Args: \n audio_list: List of audio signals.\n\n Returns: \n - List of audio signals of the same length.\n \"\"\"\n\n min_length = min(audio.shape[-1] for audio in audio_list)\n clipped_audio_list = []\n for audio in audio_list:\n clipped_audio = audio[..., :min_length]\n clipped_audio_list.append(clipped_audio)\n\n return clipped_audio_list" }, { "identifier": "create_scene", "path": "soundspaces_nvas3d/utils/ss_utils.py", "snippet": "def create_scene(room: str,\n receiver_position: T.Tuple[float, float, float] = [0.0, 0.0, 0.0],\n sample_rate: float = 48000,\n image_size: T.Tuple[int, int] = (512, 256),\n include_visual_sensor: bool = True,\n hfov: float = 90.0\n ) -> Scene:\n \"\"\"\n Create a soundspaces scene to render IR.\n \"\"\"\n\n # Note: Make sure mp3d room is downloaded\n with suppress_stdout_and_stderr():\n # Create a receiver\n receiver = Receiver(\n position=receiver_position,\n rotation=0,\n sample_rate=sample_rate\n )\n\n scene = Scene(\n room,\n [None], # placeholder for source class\n receiver=receiver,\n include_visual_sensor=include_visual_sensor,\n add_source_mesh=False,\n device=torch.device('cpu'),\n add_source=False,\n image_size=image_size,\n hfov=hfov\n )\n\n return scene" }, { "identifier": "render_rir_parallel", "path": "soundspaces_nvas3d/utils/ss_utils.py", "snippet": "def render_rir_parallel(room_list: T.List[str],\n source_position_list: T.List[T.Tuple[float, float, float]],\n receiver_position_list: T.List[T.Tuple[float, float, float]],\n filename_list: T.List[str] = None,\n receiver_rotation_list: T.List[float] = None,\n batch_size: int = 64,\n sample_rate: float = 48000,\n use_default_material: bool = False,\n channel_type: str = 'Ambisonics',\n channel_order: int = 1\n ) -> T.List[torch.Tensor]:\n \"\"\"\n Run render_ir parallely for all elements of zip(source_position_list, receiver_position_list).\n \"\"\"\n\n assert len(room_list) == len(source_position_list)\n assert len(source_position_list) == len(receiver_position_list)\n\n if filename_list is None:\n is_return = True\n else:\n is_return = False\n\n if receiver_rotation_list is None:\n receiver_rotation_list = [0] * len(receiver_position_list)\n\n # Note: Make sure all rooms are downloaded\n\n # Calculate the number of batches\n num_points = len(source_position_list)\n num_batches = (num_points + batch_size - 1) // batch_size\n\n # Use tqdm to display the progress bar\n progress_bar = tqdm(total=num_points)\n\n def update_progress(*_):\n progress_bar.update()\n\n ir_list = []\n # Process the tasks in batches\n for batch_idx in range(num_batches):\n # Calculate the start and end indices of the current batch\n start_idx = batch_idx * batch_size\n end_idx = min(start_idx + batch_size, num_points)\n if is_return:\n batch = [(room_list[i], source_position_list[i], receiver_position_list[i], None, receiver_rotation_list[i]) for i in range(start_idx, end_idx)]\n else:\n batch = [(room_list[i], source_position_list[i], receiver_position_list[i], filename_list[i], receiver_rotation_list[i]) for i in range(start_idx, end_idx)]\n\n # Create a multiprocessing Pool for the current batch\n with multiprocessing.Pool() as pool:\n tasks = []\n for room, source_position, receiver_position, filename, receiver_rotation in batch:\n # Apply async mapping of process_ir function\n task = pool.apply_async(render_ir, args=(room, source_position, receiver_position, filename, receiver_rotation, sample_rate, use_default_material, channel_type, channel_order), callback=update_progress)\n tasks.append(task)\n\n # Wait for all tasks in the batch to complete and collect results\n for task in tasks:\n if is_return:\n ir = task.get() # Block until the result is ready\n ir_list.append(ir) # Append the result to the list\n else:\n task.get()\n if is_return:\n return ir_list" }, { "identifier": "load_room_grid", "path": "soundspaces_nvas3d/utils/aihabitat_utils.py", "snippet": "def load_room_grid(\n room: str,\n grid_distance: float\n) -> T.Dict:\n \"\"\"\n Load grid data for a specified room. If the grid data does not exist, it generates one.\n\n Args:\n - room: Name of the room.\n - grid_distance: The spacing between grid points.\n\n Returns:\n - A dictionary containing grid information for the specified room.\n \"\"\"\n\n grid_distance_str = str(grid_distance).replace(\".\", \"_\")\n dirname_grid = f'data/scene_datasets/metadata/mp3d/grid_{grid_distance_str}'\n filename_grid = f'{dirname_grid}/grid_{room}.npy'\n if not os.path.exists(filename_grid):\n os.makedirs(dirname_grid, exist_ok=True)\n print(f'Computing grid_{room}...')\n from soundspaces_nvas3d.rir_generation.generate_grid import save_xy_grid_points\n grid_info = save_xy_grid_points(room, grid_distance, dirname_grid)\n\n # load grid\n grid_info = np.load(filename_grid, allow_pickle=True).item()\n\n return grid_info" }, { "identifier": "Receiver", "path": "soundspaces_nvas3d/soundspaces_nvas3d.py", "snippet": "class Receiver:\n \"\"\"\n Receiver for SoundSpaces\n \"\"\"\n\n def __init__(self,\n position: T.Tuple[float, float, float],\n rotation: float,\n sample_rate: float = 48000,\n ):\n\n self.position = position\n self.rotation = rotation\n self.sample_rate = sample_rate" }, { "identifier": "Source", "path": "soundspaces_nvas3d/soundspaces_nvas3d.py", "snippet": "class Source:\n \"\"\"\n Source for Soundspaces\n \"\"\"\n\n def __init__(self,\n position: T.Tuple[float, float, float],\n rotation: float,\n dry_sound: str,\n mesh: str,\n device: torch.device\n ):\n\n self.position = position\n self.rotation = rotation\n self.device = device # where to store dry_sound\n self.dry_sound = dry_sound\n self.mesh = mesh" }, { "identifier": "Scene", "path": "soundspaces_nvas3d/soundspaces_nvas3d.py", "snippet": "class Scene:\n \"\"\"\n Soundspaces scene including room, receiver, and source list\n \"\"\"\n\n def __init__(self,\n room: str,\n source_name_list: T.List[str],\n receiver: Receiver = None,\n source_list: T.List[Source] = None,\n include_visual_sensor: bool = True,\n add_source_mesh: bool = True,\n device: torch.device = torch.device('cpu'),\n add_source: bool = True,\n image_size: T.Tuple[int, int] = (512, 256),\n hfov: float = 90.0,\n use_default_material: bool = False,\n channel_type: str = 'Ambisonics',\n channel_order: int = 1\n ):\n\n # Set scene\n self.room = room\n self.n_sources = len(source_name_list)\n assert self.n_sources > 0\n self.receiver = receiver\n self.source_list = source_list\n self.source_current = None\n self.include_visual_sensor = include_visual_sensor\n self.add_source_mesh = add_source_mesh\n self.device = device # where to store IR\n\n # Set channel config for soundspaces\n self.channel = {}\n self.channel['type'] = channel_type\n self.channel['order'] = channel_order\n if channel_type == 'Ambisonics':\n self.channel_count = (self.channel['order'] + 1)**2\n elif channel_type == 'Binaural':\n self.channel_count = 2\n\n # Set aihabitat config for soundspaces\n self.aihabitat = {}\n self.aihabitat['default_agent'] = 0\n self.aihabitat['sensor_height'] = 1.5\n self.aihabitat['height'] = image_size[0]\n self.aihabitat['width'] = image_size[1]\n self.aihabitat['hfov'] = hfov\n\n # Set acoustics config for soundspaces\n self.acoustic_config = {}\n self.acoustic_config['sampleRate'] = 48000\n self.acoustic_config['direct'] = True\n self.acoustic_config['indirect'] = True\n self.acoustic_config['diffraction'] = True\n self.acoustic_config['transmission'] = True\n self.acoustic_config['directSHOrder'] = 5\n self.acoustic_config['indirectSHOrder'] = 3\n self.acoustic_config['unitScale'] = 1\n self.acoustic_config['frequencyBands'] = 32\n self.acoustic_config['indirectRayCount'] = 50000\n\n # Set audio material\n if use_default_material:\n self.audio_material = './data/material/mp3d_material_config_default.json'\n else:\n self.audio_material = './data/material/mp3d_material_config.json'\n\n # Create simulation\n self.create_scene()\n\n # Randomly set source and receiver position\n source_position, source_rotation = None, None\n receiver_position, receiver_rotation = None, None\n\n # Create receiver (inside the room)\n if self.receiver is None:\n # random receiver\n self.create_receiver(receiver_position, receiver_rotation)\n else:\n # input receiver\n self.update_receiver(self.receiver)\n\n if add_source:\n # Create source\n if self.source_list is None:\n # random source\n self.source_list = [None] * self.n_sources\n for source_id, source_name in enumerate(source_name_list):\n self.create_source(source_name, source_id, source_position, source_rotation)\n else:\n # input source\n for source_id, _ in enumerate(source_name_list):\n self.update_source(self.source_list[source_id], source_id)\n\n def create_scene(self):\n \"\"\"\n Given the configuration, create a scene for soundspaces\n \"\"\"\n\n # Set backend configuration\n backend_cfg = habitat_sim.SimulatorConfiguration()\n backend_cfg.scene_id = f'./data/scene_datasets/mp3d/{self.room}/{self.room}.glb'\n backend_cfg.scene_dataset_config_file = './data/scene_datasets/mp3d/mp3d.scene_dataset_config.json'\n backend_cfg.load_semantic_mesh = True\n backend_cfg.enable_physics = False\n\n # Set agent configuration\n agent_config = habitat_sim.AgentConfiguration()\n\n if self.include_visual_sensor:\n # Set color sensor\n rgb_sensor_spec = habitat_sim.CameraSensorSpec()\n rgb_sensor_spec.uuid = \"color_sensor\"\n rgb_sensor_spec.sensor_type = habitat_sim.SensorType.COLOR\n rgb_sensor_spec.resolution = [self.aihabitat['height'], self.aihabitat['width']]\n rgb_sensor_spec.position = [0.0, self.aihabitat[\"sensor_height\"], 0.0]\n rgb_sensor_spec.sensor_subtype = habitat_sim.SensorSubType.PINHOLE\n rgb_sensor_spec.hfov = self.aihabitat[\"hfov\"]\n agent_config.sensor_specifications = [rgb_sensor_spec]\n\n # Set depth sensor\n depth_sensor_spec = habitat_sim.CameraSensorSpec()\n depth_sensor_spec.uuid = \"depth_sensor\"\n depth_sensor_spec.sensor_type = habitat_sim.SensorType.DEPTH\n depth_sensor_spec.resolution = [self.aihabitat[\"height\"], self.aihabitat[\"width\"]]\n depth_sensor_spec.position = [0.0, self.aihabitat[\"sensor_height\"], 0.0]\n depth_sensor_spec.sensor_subtype = habitat_sim.SensorSubType.PINHOLE\n depth_sensor_spec.hfov = self.aihabitat[\"hfov\"]\n agent_config.sensor_specifications.append(depth_sensor_spec)\n\n # # Set semantic sensor\n # semantic_sensor_spec = habitat_sim.CameraSensorSpec()\n # semantic_sensor_spec.uuid = \"semantic_sensor\"\n # semantic_sensor_spec.sensor_type = habitat_sim.SensorType.SEMANTIC\n # semantic_sensor_spec.resolution = [self.aihabitat[\"height\"], self.aihabitat[\"width\"]]\n # semantic_sensor_spec.position = [0.0, self.aihabitat[\"sensor_height\"], 0.0]\n # semantic_sensor_spec.sensor_subtype = habitat_sim.SensorSubType.PINHOLE\n # semantic_sensor_spec.hfov = self.aihabitat[\"hfov\"]\n # agent_config.sensor_specifications.append(semantic_sensor_spec)\n\n # Set simulator configuration\n cfg = habitat_sim.Configuration(backend_cfg, [agent_config])\n\n # Set simulator\n sim = habitat_sim.Simulator(cfg)\n\n # set navmesh path for searching for navigatable points\n navmesh = f'./data/scene_datasets/mp3d/{self.room}/{self.room}.navmesh'\n sim.pathfinder.load_nav_mesh(navmesh)\n\n # seed for navmesh\n sim.seed(random.randint(0, 1024))\n\n # Set simulation\n self.sim = sim\n print('Scene created!')\n\n return self\n\n import torch\n\n def add_audio_sensor(self):\n \"\"\"\n Add audio sensor to the scene\n \"\"\"\n\n # set audio sensor\n audio_sensor_spec = habitat_sim.AudioSensorSpec()\n audio_sensor_spec.uuid = \"audio_sensor\"\n audio_sensor_spec.enableMaterials = True # make sure _semantic.ply file is in the scene folder\n audio_sensor_spec.channelLayout.type = getattr(habitat_sim.sensor.RLRAudioPropagationChannelLayoutType, self.channel['type'])\n audio_sensor_spec.channelLayout.channelCount = self.channel_count # ambisonics\n\n # Set acoustic configuration\n audio_sensor_spec.acousticsConfig.sampleRate = self.acoustic_config['sampleRate']\n audio_sensor_spec.acousticsConfig.direct = self.acoustic_config['direct']\n audio_sensor_spec.acousticsConfig.indirect = self.acoustic_config['indirect']\n audio_sensor_spec.acousticsConfig.diffraction = self.acoustic_config['diffraction']\n audio_sensor_spec.acousticsConfig.transmission = self.acoustic_config['transmission']\n audio_sensor_spec.acousticsConfig.directSHOrder = self.acoustic_config['directSHOrder']\n audio_sensor_spec.acousticsConfig.indirectSHOrder = self.acoustic_config['indirectSHOrder']\n audio_sensor_spec.acousticsConfig.unitScale = self.acoustic_config['unitScale']\n audio_sensor_spec.acousticsConfig.frequencyBands = self.acoustic_config['frequencyBands']\n audio_sensor_spec.acousticsConfig.indirectRayCount = self.acoustic_config['indirectRayCount']\n # audio_sensor_spec.acousticsConfig.maxIRLength = 40.0\n # audio_sensor_spec.acousticsConfig.sourceRayCount = 2000\n # audio_sensor_spec.acousticsConfig.meshSimplification = False\n\n # Initialize receiver\n audio_sensor_spec.position = [0.0, self.aihabitat['sensor_height'], 0.0] # audio sensor has a height of 1.5m\n self.sim.add_sensor(audio_sensor_spec)\n\n audio_sensor = self.sim.get_agent(self.aihabitat['default_agent'])._sensors['audio_sensor']\n audio_sensor.setAudioMaterialsJSON(self.audio_material)\n\n return self\n\n def create_receiver(self,\n position: T.Tuple[float, float, float] = None,\n rotation: float = None\n ):\n \"\"\"\n Randomly sample receiver position and rotation\n \"\"\"\n\n if position is None:\n # Randomly set receiver position in the room\n position = self.sim.pathfinder.get_random_navigable_point()\n rotation = random.uniform(0, 360)\n\n # Set sample rate\n sample_rate = self.acoustic_config['sampleRate']\n\n # Set receiver\n receiver = Receiver(position, rotation, sample_rate)\n\n # Update receiver\n self.update_receiver(receiver)\n\n return self\n\n def update_receiver(self,\n receiver: Receiver\n ):\n \"\"\"\n Update receiver\n \"\"\"\n\n agent = self.sim.get_agent(self.aihabitat[\"default_agent\"])\n new_state = self.sim.get_agent(self.aihabitat[\"default_agent\"]).get_state()\n new_state.position = np.array(receiver.position + np.array([0, 0.0, 0])) # agent height is already applied in audio_sensor_spec.position\n new_state.rotation = quat_from_angle_axis(math.radians(receiver.rotation), np.array([0, 1.0, 0])) # + -> left\n # new_state.rotation *= quat_from_angle_axis(math.radians(-30), np.array([1.0, 0, 0])) # + -> up\n new_state.sensor_states = {}\n agent.set_state(new_state, True)\n\n self.receiver = receiver # for reference\n\n return self\n\n def update_receiver_position(self,\n receiver_position: T.Tuple[float, float, float]\n ):\n \"\"\"\n Update receiver position\n \"\"\"\n\n self.receiver.position = receiver_position\n\n agent = self.sim.get_agent(self.aihabitat[\"default_agent\"])\n new_state = self.sim.get_agent(self.aihabitat[\"default_agent\"]).get_state()\n new_state.position = np.array(receiver_position + np.array([0, 0.0, 0])) # agent height is already applied in audio_sensor_spec.position\n new_state.sensor_states = {}\n agent.set_state(new_state, True)\n\n return self\n\n def create_source(self,\n source_name: str,\n source_id: int,\n position: T.Tuple[float, float, float] = None,\n rotation: float = None\n ):\n \"\"\"\n Set source given the source name, position, and rotation\n \"\"\"\n\n if position is None:\n # Randomly set source position in the room\n position = self.sim.pathfinder.get_random_navigable_point()\n rotation = random.uniform(0, 360) # only for mesh as source sound is omnidirectional\n\n # Randomly set source sound\n dry_sound, mesh = sample_dry_sound_and_mesh(source_name)\n\n # Set source\n source = Source(position, rotation, dry_sound, mesh, device=self.device)\n\n # Save source\n self.update_source(source, source_id)\n\n return self\n\n def update_source(self,\n source: Source,\n source_id: int = None\n ):\n \"\"\"\n Update source\n \"\"\"\n\n if source_id is not None:\n # update source list\n self.source_list[source_id] = source\n\n # Add mesh\n if self.add_source_mesh:\n ########## Add mesh (source.position, source.rotation) ##########\n obj_templates_mgr = self.sim.get_object_template_manager()\n rigid_obj_mgr = self.sim.get_rigid_object_manager()\n\n # Load the object template from the configuration file\n obj_templates_mgr.load_configs(str(os.path.join(\"data/objects\")))\n\n # Insert the object relative to the agent\n object_ids = []\n object_orientation = mn.Quaternion.rotation(mn.Deg(source.rotation), mn.Vector3.y_axis())\n object_template_handle = obj_templates_mgr.get_template_handles(f'data/objects/{source.mesh}')[0] # debug\n if source.mesh == 'male':\n scale = 0.5\n height_offset = 0.935\n elif source.mesh == 'female':\n scale = 1.0\n height_offset = 0.85\n elif source.mesh == 'guitar':\n scale = 1 / 1239.1628 * 2\n height_offset = 1.5\n object_orientation *= mn.Quaternion.rotation(mn.Deg(-90), mn.Vector3.x_axis())\n elif source.mesh == 'drum':\n scale = 1 / 1.8\n height_offset = 0.6\n elif source.mesh == 'classic_microphone':\n scale = 1 / 1.15\n height_offset = 0.67\n elif source.mesh == 'bluetooth_speaker':\n scale = 1 / 70\n height_offset = 1.0\n\n # Scale the object to fit the scene\n scaled_object_template = obj_templates_mgr.get_template_by_handle(object_template_handle)\n scaled_object_template.scale = np.array([scale, scale, scale])\n obj_templates_mgr.register_template(scaled_object_template, \"scaled\")\n object = rigid_obj_mgr.add_object_by_template_handle(\"scaled\")\n object.translation = np.array(source.position) + np.array([0, height_offset, 0])\n object.rotation = object_orientation\n\n object_ids.append(object.object_id)\n\n # rigid_obj_mgr.remove_all_objects()\n\n else:\n # update current source\n audio_sensor = self.sim.get_agent(self.aihabitat['default_agent'])._sensors['audio_sensor']\n audio_sensor.setAudioSourceTransform(source.position + np.array([0, self.aihabitat[\"sensor_height\"], 0])) # add 1.5m to the height calculation\n\n self.source_current = source # for reference\n\n return self\n\n def update_source_position(self,\n source_position\n ):\n \"\"\"\n Update Source position\n \"\"\"\n\n audio_sensor = self.sim.get_agent(self.aihabitat['default_agent'])._sensors['audio_sensor']\n audio_sensor.setAudioSourceTransform(source_position + np.array([0, self.aihabitat[\"sensor_height\"], 0])) # add 1.5m to the height calculation\n\n def render_ir(self,\n source_id: int\n ) -> torch.Tensor:\n \"\"\"\n Render IR given the source ID\n \"\"\"\n\n source = self.source_list[source_id]\n self.update_source(source)\n ir = torch.tensor(self.sim.get_sensor_observations()['audio_sensor'], device=self.device)\n\n return ir\n\n def render_ir_simple(self,\n source_position: T.Tuple[float, float, float],\n receiver_position: T.Tuple[float, float, float],\n ) -> torch.Tensor:\n \"\"\"\n Render IR given the source ID\n \"\"\"\n\n # source\n self.update_source_position(source_position)\n\n # receiver\n self.update_receiver_position(receiver_position)\n\n # render ir\n ir = torch.tensor(self.sim.get_sensor_observations()['audio_sensor'], device=self.device)\n\n return ir\n\n def render_ir_all(self) -> T.List[torch.Tensor]:\n \"\"\"\n Render IR for all sources\n \"\"\"\n\n ir_list = []\n for source_id in range(self.n_sources):\n print(f'Rendering IR {source_id}/{self.n_sources}...')\n ir = self.render_ir(source_id)\n ir_list.append(ir)\n\n return ir_list\n\n def render_image(self,\n is_instance=False\n ):\n \"\"\"\n Render image including rgb, depth, and semantic\n \"\"\"\n\n observation = self.sim.get_sensor_observations()\n rgb = observation[\"color_sensor\"]\n depth = observation[\"depth_sensor\"]\n\n # Semantic\n # semantic = sim.get_sensor_observations()[\"semantic_sensor\"]\n # is_valid = (depth != 0)\n # semantic[~is_valid] = semantic.max() + 1\n\n # if is_instance:\n # # Display instance id\n # aihabitat_utils.display_sample(rgb, semantic, depth, filename=f'{dir_results}/view/view_instance.png')\n # else:\n # # Display category id\n # category = aihabitat_utils.semantic_id_to_category_id(semantic, sim.semantic_scene.objects)\n # void_id = 0\n # category[~is_valid] = void_id\n # aihabitat_utils.display_sample(rgb, category, depth, filename=f'{dir_results}/view/view_category.png')\n\n return rgb, depth\n\n def render_envmap(self):\n \"\"\"\n Render environment map in *** format\n \"\"\"\n\n with suppress_stdout_and_stderr():\n angles = [0, 270, 180, 90]\n rgb_panorama = []\n depth_panorama = []\n\n for angle_offset in angles:\n angle = self.receiver.rotation + angle_offset\n agent = self.sim.get_agent(self.aihabitat[\"default_agent\"])\n new_state = self.sim.get_agent(self.aihabitat[\"default_agent\"]).get_state()\n new_state.rotation = quat_from_angle_axis(\n math.radians(angle), np.array([0, 1.0, 0])\n ) * quat_from_angle_axis(math.radians(0), np.array([1.0, 0, 0]))\n new_state.sensor_states = {}\n agent.set_state(new_state, True)\n\n observation = self.sim.get_sensor_observations()\n rgb_panorama.append(observation[\"color_sensor\"])\n depth_panorama.append((observation['depth_sensor']))\n envmap_rgb = np.concatenate(rgb_panorama, axis=1)\n envmap_depth = np.concatenate(depth_panorama, axis=1)\n\n # rotate receiver to original angle\n self.update_receiver(self.receiver)\n\n return envmap_rgb, envmap_depth\n\n def generate_xy_grid_points(self,\n grid_distance: float,\n height: float = None,\n filename_png: str = None,\n meters_per_pixel: float = 0.005\n ) -> torch.Tensor:\n \"\"\"\n Generate the 3D positions of grid points at the given height\n \"\"\"\n\n pathfinder = self.sim.pathfinder\n assert pathfinder.is_loaded\n # agent_height = pathfinder.nav_mesh_settings.agent_height # to be navigable, full body of the agent should be inside\n if height is None: # height of the agent foot\n height = 0\n # height = pathfinder.get_bounds()[0][1] # floor height\n\n # Sample grid\n bounds = pathfinder.get_bounds()\n x_points = torch.arange(bounds[0][0], bounds[1][0] + grid_distance, grid_distance)\n z_points = torch.arange(bounds[0][2], bounds[1][2] + grid_distance, grid_distance)\n x_grid, z_grid = torch.meshgrid(x_points, z_points)\n y_value = height * torch.ones_like(x_grid.reshape(-1))\n\n # Combine x, y, and z coordinates into a single tensor of points\n points = torch.stack([x_grid.reshape(-1), y_value.reshape(-1), z_grid.reshape(-1)], dim=-1)\n is_points_navigable = []\n for point in points:\n is_points_navigable.append(pathfinder.is_navigable(point)) # navigable points\n torch.tensor(is_points_navigable).sum()\n\n # Flatten the tensor of points into a list\n grid_points = points[is_points_navigable]\n\n # assert len(grid_points) > 0\n # save image\n if filename_png is not None:\n aihabitat_utils.save_town_map_grid(filename_png, pathfinder, grid_points, meters_per_pixel=meters_per_pixel)\n\n return grid_points\n\n def generate_data(self, use_dry_sound: bool = False):\n \"\"\"\n Generate all data including IR, envmap, audio, image\n \"\"\"\n\n # env map\n if self.include_visual_sensor:\n envmap_rgb, envmap_depth = self.render_image()\n else:\n envmap_rgb, envmap_depth = None, None\n\n # IR\n self.add_audio_sensor() # add audio_sensor after image rendering for faster image rendering\n ir_list = self.render_ir_all()\n # ir_total = sum_arrays_with_different_length(ir_list).detach().cpu()\n\n # audio_list\n dry_sound_list = []\n audio_list = []\n # audio_total = None\n if use_dry_sound:\n for source_id, source in enumerate(self.source_list):\n # load dry sound\n dry_sound = source.dry_sound\n if isinstance(dry_sound, str):\n dry_sound, sample_rate = torchaudio.load(dry_sound)\n self.dry_sound = dry_sound.to(self.device)\n self.sample_rate = sample_rate\n\n ir = ir_list[source_id]\n audio = torch.stack([audio_utils.fft_conv(dry_sound[0], ir_channel, is_cpu=True) for ir_channel in ir])\n dry_sound_list.append(dry_sound.detach().cpu())\n audio_list.append(audio.detach().cpu())\n\n # audio_total\n # audio_total = sum_arrays_with_different_length(audio_list)\n\n # cpu\n ir_list = [tensor.detach().cpu() for tensor in ir_list]\n\n # dirname = '.'\n # with open(f'{dirname}/debug.txt', 'w') as f:\n # f.write(f'NavMesh area: {self.sim.pathfinder.navigable_area}\\n')\n # f.write(f'NavMesh bounds: {self.sim.pathfinder.get_bounds()}\\n')\n # f.write(f'Receiver position: {self.receiver.position}\\n')\n # for s, source in enumerate(self.source_list):\n # f.write(f'Source {s} position: {source.position}\\n')\n # f.write(f'\\n')\n\n return dict(\n ir_list=ir_list,\n sample_rate=self.receiver.sample_rate,\n envmap=[envmap_rgb, envmap_depth],\n audio_list=audio_list,\n dry_sound_list=dry_sound_list,\n )" } ]
import os import json import argparse import itertools import subprocess import typing as T import torch import imageio import torchaudio import numpy as np import matplotlib.pyplot as plt from moviepy.editor import * from nvas3d.utils.dynamic_utils import convolve_moving_receiver, setup_dynamic_interp from nvas3d.utils.audio_utils import clip_two, clip_all from soundspaces_nvas3d.utils.ss_utils import create_scene, render_rir_parallel from soundspaces_nvas3d.utils.aihabitat_utils import load_room_grid from soundspaces_nvas3d.soundspaces_nvas3d import Receiver, Source, Scene
9,182
# # For licensing see accompanying LICENSE file. # Copyright (C) 2023 Apple Inc. All Rights Reserved. # def normalize(input: torch.Tensor) -> torch.Tensor: output = (input - input.min()) / (input.max() - input.min()) output = 2 * output - 1 return output def configure_scene_from_metadata( metadata: T.Dict[str, T.Any], image_size: T.Tuple[int, int] = (1000, 1000), hfov: float = 90.0, use_placeholder_mesh: bool = False ) -> Scene: """ Configures a scene using the provided metadata. Args: - metadata: Dictionary containing room and grid point information. - image_size: The size of the rendered image. - hfov: Horizontal field of view. - use_placeholder_mesh: Flag to determine if placeholder meshes should be used. Returns: - Configured scene object. """ room = metadata['room'][0] grid_points_source = metadata['grid_points'][0] source_idx_list = [metadata['source1_idx'][0].item(), metadata['source2_idx'][0].item()] receiver_idx_list_original = torch.tensor(metadata['receiver_idx_list'])[:4] scene = create_scene(room, image_size=image_size, hfov=hfov) if use_placeholder_mesh: # Add placeholder mesh for sources and receivers to the scene # Download the following mesh objects and locate it under data/objects/{mesh_name}.glb: # - "Bluetooth Speaker" (https://skfb.ly/6VLyL) by Ramanan is licensed under Creative Commons Attribution (http://creativecommons.org/licenses/by/4.0/). # - “Classic Microphone” (https://skfb.ly/6Aryq) by urbanmasque is licensed under Creative Commons Attribution (http://creativecommons.org/licenses/by/4.0/) # - "Standard Drum Set" (https://skfb.ly/owroB) by Heataker is licensed under Creative Commons Attribution (http://creativecommons.org/licenses/by/4.0/). # - "3D Posed People" (https://renderpeople.com/free-3d-people/) by Renderpeople: The licensing for our Renderpeople products includes that customers are allowed to use the data for rendering still images and animations for commercial or private purposes, such as video production, broadcasting, print, movies, advertising, illustrations and presentations (https://renderpeople.com/faq/) ss_source1 = Source( position=grid_points_source[source_idx_list[0]], rotation=0, dry_sound='', mesh='bluetooth_speaker', # Need mesh object device=torch.device('cpu') ) ss_source2 = Source( position=grid_points_source[source_idx_list[1]], rotation=-90, dry_sound='', mesh='bluetooth_speaker', # Need mesh object device=torch.device('cpu') ) ss_mic_list = [ Source( position=grid_points_source[idx], rotation=180, dry_sound='', mesh='classic_microphone', # Need mesh object device=torch.device('cpu') ) for idx in receiver_idx_list_original ] scene.add_source_mesh = True scene.source_list = [None] * (len(source_idx_list) + len(receiver_idx_list_original)) scene.update_source(ss_source1, 0) scene.update_source(ss_source2, 1) for m, mic in enumerate(ss_mic_list): scene.update_source(mic, m + 2) return scene def interpolate_moving_audio( source1_audio: torch.Tensor, source2_audio: torch.Tensor, ir1_list: T.List[torch.Tensor], ir2_list: T.List[torch.Tensor], receiver_position: torch.Tensor ) -> T.Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Interpolates audio for a moving receiver. Args: - source1_audio: First source audio. - source2_audio: Second source audio. - ir1_list: List of impulse responses for source 1. - ir2_list: List of impulse responses for source 2. - receiver_position: Positions of the moving receiver. Returns: - Tuple containing combined audio, interpolated audio from source 1, and interpolated audio from source 2. """ # Prepare for interpolation audio_len = source1_audio.shape[-1] interp_index, interp_weight = setup_dynamic_interp(receiver_position.numpy(), audio_len) # Generate audio for moving receiver
# # For licensing see accompanying LICENSE file. # Copyright (C) 2023 Apple Inc. All Rights Reserved. # def normalize(input: torch.Tensor) -> torch.Tensor: output = (input - input.min()) / (input.max() - input.min()) output = 2 * output - 1 return output def configure_scene_from_metadata( metadata: T.Dict[str, T.Any], image_size: T.Tuple[int, int] = (1000, 1000), hfov: float = 90.0, use_placeholder_mesh: bool = False ) -> Scene: """ Configures a scene using the provided metadata. Args: - metadata: Dictionary containing room and grid point information. - image_size: The size of the rendered image. - hfov: Horizontal field of view. - use_placeholder_mesh: Flag to determine if placeholder meshes should be used. Returns: - Configured scene object. """ room = metadata['room'][0] grid_points_source = metadata['grid_points'][0] source_idx_list = [metadata['source1_idx'][0].item(), metadata['source2_idx'][0].item()] receiver_idx_list_original = torch.tensor(metadata['receiver_idx_list'])[:4] scene = create_scene(room, image_size=image_size, hfov=hfov) if use_placeholder_mesh: # Add placeholder mesh for sources and receivers to the scene # Download the following mesh objects and locate it under data/objects/{mesh_name}.glb: # - "Bluetooth Speaker" (https://skfb.ly/6VLyL) by Ramanan is licensed under Creative Commons Attribution (http://creativecommons.org/licenses/by/4.0/). # - “Classic Microphone” (https://skfb.ly/6Aryq) by urbanmasque is licensed under Creative Commons Attribution (http://creativecommons.org/licenses/by/4.0/) # - "Standard Drum Set" (https://skfb.ly/owroB) by Heataker is licensed under Creative Commons Attribution (http://creativecommons.org/licenses/by/4.0/). # - "3D Posed People" (https://renderpeople.com/free-3d-people/) by Renderpeople: The licensing for our Renderpeople products includes that customers are allowed to use the data for rendering still images and animations for commercial or private purposes, such as video production, broadcasting, print, movies, advertising, illustrations and presentations (https://renderpeople.com/faq/) ss_source1 = Source( position=grid_points_source[source_idx_list[0]], rotation=0, dry_sound='', mesh='bluetooth_speaker', # Need mesh object device=torch.device('cpu') ) ss_source2 = Source( position=grid_points_source[source_idx_list[1]], rotation=-90, dry_sound='', mesh='bluetooth_speaker', # Need mesh object device=torch.device('cpu') ) ss_mic_list = [ Source( position=grid_points_source[idx], rotation=180, dry_sound='', mesh='classic_microphone', # Need mesh object device=torch.device('cpu') ) for idx in receiver_idx_list_original ] scene.add_source_mesh = True scene.source_list = [None] * (len(source_idx_list) + len(receiver_idx_list_original)) scene.update_source(ss_source1, 0) scene.update_source(ss_source2, 1) for m, mic in enumerate(ss_mic_list): scene.update_source(mic, m + 2) return scene def interpolate_moving_audio( source1_audio: torch.Tensor, source2_audio: torch.Tensor, ir1_list: T.List[torch.Tensor], ir2_list: T.List[torch.Tensor], receiver_position: torch.Tensor ) -> T.Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Interpolates audio for a moving receiver. Args: - source1_audio: First source audio. - source2_audio: Second source audio. - ir1_list: List of impulse responses for source 1. - ir2_list: List of impulse responses for source 2. - receiver_position: Positions of the moving receiver. Returns: - Tuple containing combined audio, interpolated audio from source 1, and interpolated audio from source 2. """ # Prepare for interpolation audio_len = source1_audio.shape[-1] interp_index, interp_weight = setup_dynamic_interp(receiver_position.numpy(), audio_len) # Generate audio for moving receiver
receiver_audio_1 = convolve_moving_receiver(source1_audio.numpy()[0], ir1_list.numpy(), interp_index, interp_weight)
0
2023-10-19 05:35:54+00:00
12k
tiejundong/FlexPose
FlexPose/utils/prediction.py
[ { "identifier": "FlexPose", "path": "FlexPose/model/layers.py", "snippet": "class FlexPose(torch.nn.Module):\n def __init__(self, args=None, param_path=None):\n super(FlexPose, self).__init__()\n if args is not None:\n self.init_param(args)\n else:\n self.init_param_with_save(param_path)\n\n def forward(self, complex_graph, explicit_cycle=False, cycle_i=0, args=None, epoch=1e+5):\n # for pregen data\n if self.do_pregen_data:\n complex_graph = self.pretrain(complex_graph)\n return complex_graph\n\n # with explicit cycle\n # if explicit_cycle:\n # if cycle_i == 0:\n # complex_graph = self.pretrain(complex_graph)\n # complex_graph = self.init_embed(complex_graph)\n # complex_graph = self.run_cycle(complex_graph, cycle_i)\n # tup_pred = self.pred_label(complex_graph)\n # return tup_pred\n\n # first embed\n complex_graph = self.init_embed(complex_graph)\n\n # MC cycle\n complex_graph = self.run_cycling(complex_graph)\n\n # prediction\n tup_pred = self.pred_label(complex_graph)\n\n return tup_pred\n\n def run_cycling(self, complex_graph):\n # MC cycle\n if self.training:\n cycle_num = random.sample(range(1, self.n_cycle + 1), 1)[0]\n for cycle_i in range(cycle_num - 1):\n with torch.no_grad():\n complex_graph = self.run_single_cycle(complex_graph, cycle_i)\n if self.use_min and cycle_i > 0:\n complex_graph = self.energy_min(complex_graph)\n complex_graph = self.run_single_cycle(complex_graph, cycle_num - 1)\n else:\n for cycle_i in range(self.n_cycle):\n complex_graph = self.run_single_cycle(complex_graph, cycle_i)\n if self.use_min and cycle_i > 0 and cycle_i < self.n_cycle-1:\n complex_graph = self.energy_min(complex_graph)\n return complex_graph\n\n def init_param(self, args):\n self.args = args\n self.n_cycle = args.n_cycle\n self.use_pretrain = args.use_pretrain\n self.do_pregen_data = args.do_pregen_data # do pre-generation\n self.use_pregen_data = args.use_pregen_data # use pre-generated data\n self.add_l_dismap = args.add_l_dismap\n self.coor_scale = args.coor_scale\n\n # pretrained\n if self.use_pretrain:\n self.p_encoder = PocketEncoder(args)\n self.l_feat_encoder = LigandFeatEncoder(args)\n self.load_encoder(args)\n\n # decoder\n self.c_decoder = ComplexDecoder(args)\n\n # E min\n self.use_min = args.MMFF_min\n if self.use_min:\n self.coor_min_object = CoorMin(args)\n\n # embedding\n # extra embedding for encoder (pretrain) and decoder\n if args.use_pretrain:\n # ligand embed\n self.l_extra_embed = True if args.l_x_sca_hidden != args.c_x_sca_hidden else False\n if self.l_extra_embed:\n self.l_x_sca_embed = make_embed(args.l_x_sca_hidden, args.c_x_sca_hidden)\n if self.add_l_dismap:\n self.l_edge_sca_embed = make_embed(args.l_edge_sca_hidden + 1, args.c_edge_sca_hidden)\n else:\n self.l_edge_sca_embed = make_embed(args.l_edge_sca_hidden, args.c_edge_sca_hidden)\n self.l_x_vec_embed = VecExpansion(args.l_x_vec_indim, args.c_x_vec_hidden)\n self.l_edge_vec_embed = VecExpansion(args.l_edge_vec_indim, args.c_edge_vec_hidden)\n\n # pocekt embed\n self.p_extra_embed = True if args.p_x_sca_hidden != args.c_x_sca_hidden else False\n self.p_x_sca_embed = make_embed(args.p_x_sca_hidden + 12, args.c_x_sca_hidden) # explicit torsion\n if self.p_extra_embed:\n self.p_edge_sca_embed = make_embed(args.p_edge_sca_hidden, args.c_edge_sca_hidden)\n self.p_x_vec_embed = VNL(args.p_x_vec_hidden, args.c_x_vec_hidden, leaky_relu=False)\n self.p_edge_vec_embed = VNL(args.p_edge_vec_hidden, args.c_edge_vec_hidden, leaky_relu=False)\n else:\n # ligand embed\n self.l_x_sca_embed = make_embed(args.l_x_sca_indim + 1, args.c_x_sca_hidden)\n if self.add_l_dismap:\n self.l_edge_sca_embed = make_embed(args.l_edge_sca_indim + 1 + 1, args.c_edge_sca_hidden)\n else:\n self.l_edge_sca_embed = make_embed(args.l_edge_sca_indim + 1, args.c_edge_sca_hidden)\n self.l_x_vec_embed = VecExpansion(args.l_x_vec_indim, args.c_x_vec_hidden)\n self.l_edge_vec_embed = VecExpansion(args.l_edge_vec_indim, args.c_edge_vec_hidden)\n\n # pocekt embed\n self.p_x_sca_embed = make_embed(args.p_x_sca_indim + 12 + 1, args.c_x_sca_hidden) # explicit torsion input\n self.p_edge_sca_embed = make_embed(args.p_edge_sca_indim, args.c_edge_sca_hidden)\n self.p_x_vec_embed = VNL(args.p_x_vec_indim, args.c_x_vec_hidden, leaky_relu=False)\n self.p_edge_vec_embed = VNL(args.p_edge_vec_indim, args.c_edge_vec_hidden, leaky_relu=False)\n\n\n # cycle\n self.x_gate = GVGateResidue(args.c_x_sca_hidden, args.c_x_vec_hidden, full_gate=True)\n self.edge_gate = GVGateResidue(args.c_edge_sca_hidden, args.c_edge_vec_hidden, full_gate=True)\n\n # for additional tasks\n self.pred_CB_layer = torch.nn.Sequential(\n GVP(args.c_x_sca_hidden, args.c_x_vec_hidden, args.c_x_sca_hidden, args.c_x_vec_hidden),\n GVL(args.c_x_sca_hidden, args.c_x_vec_hidden, args.c_x_sca_hidden, 1)\n )\n self.pred_tor_layer = torch.nn.Sequential(\n torch.nn.Linear(args.c_x_sca_hidden + args.c_x_vec_hidden,\n args.c_x_sca_hidden + args.c_x_vec_hidden),\n torch.nn.LeakyReLU(),\n torch.nn.Linear(args.c_x_sca_hidden + args.c_x_vec_hidden, 8),\n )\n self.pred_aff_layer = torch.nn.Sequential(\n torch.nn.Linear(args.c_x_sca_hidden + args.c_x_vec_hidden + args.c_edge_sca_hidden + args.c_edge_vec_hidden,\n args.c_x_sca_hidden + args.c_x_vec_hidden + args.c_edge_sca_hidden + args.c_edge_vec_hidden),\n torch.nn.LeakyReLU(),\n torch.nn.Linear(args.c_x_sca_hidden + args.c_x_vec_hidden + args.c_edge_sca_hidden + args.c_edge_vec_hidden, 1)\n )\n\n def init_param_with_save(self, param_path):\n if isinstance(param_path, str) and os.path.isfile(param_path):\n chk = torch.load(param_path, map_location='cpu')\n else:\n chk = load_FlexPose(param_path)\n self.init_param(chk['args'])\n self.load_state_dict(chk['model_state_dict'], strict=True)\n del chk\n\n def pred_label(self, complex_graph):\n # ligand coor\n l_coor_pred = rearrange(complex_graph.coor_hidden, 'b n h c -> (b n) h c')[complex_graph.ligand_node_loc_in_complex_flat]\n\n # CA\n CA_pred = rearrange(complex_graph.coor_hidden, 'b n h c -> (b n) h c')[complex_graph.p_partial_select_mask]\n\n # CB\n CB_pred = rearrange(complex_graph.coor + self.pred_CB_layer(complex_graph.x_sca_vec)[1].squeeze(-2),\n 'b n c -> (b n) c')[complex_graph.p_partial_select_mask]\n\n # aff\n x_sca_vec_cat = torch.cat([complex_graph.x_sca, complex_graph.x_vec.norm(p=2, dim=-1)], dim=-1)\n x_pooling = (x_sca_vec_cat * complex_graph.x_mask.float().unsqueeze(-1)).sum(dim=-2) / complex_graph.x_mask.float().sum(dim=-1, keepdims=True)\n edge_sca_vec_cat = torch.cat([complex_graph.edge_sca, complex_graph.edge_vec.norm(p=2, dim=-1)], dim=-1)\n edge_pooling = torch.einsum('b i j d -> b d', edge_sca_vec_cat * complex_graph.edge_mask.float().unsqueeze(-1)) / \\\n torch.einsum('b i j -> b', complex_graph.edge_mask.float()).unsqueeze(-1)\n x_edge_pooling = torch.cat([x_pooling, edge_pooling], dim=-1)\n aff_pred = self.pred_aff_layer(x_edge_pooling).squeeze(dim=-1)\n if not self.training:\n aff_pred = F.relu(aff_pred)\n\n # tor\n x_sca_vec_cat = rearrange(x_sca_vec_cat, 'b n d -> (b n) d')[complex_graph.p_partial_select_mask]\n # x_sca_vec_cat = torch.cat([x_sca_vec_cat, complex_graph.sc_in_partial_select], dim=-1)\n SC_pred = rearrange(self.pred_tor_layer(x_sca_vec_cat), '... (m s) -> ... m s', s=2)\n # if not self.training:\n # SC_pred = SC_pred.clamp(min=-1, max=1)\n\n return (l_coor_pred, CA_pred, CB_pred, aff_pred, SC_pred)\n\n @torch.no_grad()\n def infer(self, complex_graph):\n complex_graph = self.init_embed(complex_graph)\n complex_graph = self.run_cycling(complex_graph)\n\n # ligand coor\n coor_pred = complex_graph.coor_hidden\n\n CB_pred = complex_graph.coor + self.pred_CB_layer(complex_graph.x_sca_vec)[1].squeeze(-2)\n\n # aff\n x_sca_vec_cat = torch.cat([complex_graph.x_sca, complex_graph.x_vec.norm(p=2, dim=-1)], dim=-1)\n x_pooling = (x_sca_vec_cat * complex_graph.x_mask.float().unsqueeze(-1)).sum(\n dim=-2) / complex_graph.x_mask.float().sum(dim=-1, keepdims=True)\n edge_sca_vec_cat = torch.cat([complex_graph.edge_sca, complex_graph.edge_vec.norm(p=2, dim=-1)], dim=-1)\n edge_pooling = torch.einsum('b i j d -> b d',\n edge_sca_vec_cat * complex_graph.edge_mask.float().unsqueeze(-1)) / \\\n torch.einsum('b i j -> b', complex_graph.edge_mask.float()).unsqueeze(-1)\n x_edge_pooling = torch.cat([x_pooling, edge_pooling], dim=-1)\n aff_pred = self.pred_aff_layer(x_edge_pooling).squeeze(dim=-1)\n aff_pred = F.relu(aff_pred)\n\n # tor\n SC_pred = rearrange(self.pred_tor_layer(x_sca_vec_cat), '... (m s) -> ... m s', s=2)\n\n return (coor_pred, CB_pred, aff_pred, SC_pred)\n\n def load_encoder(self, args):\n if 'pretrain_protein_encoder' in args.__dict__.keys():\n if isinstance(args.pretrain_protein_encoder, str) and os.path.isfile(args.pretrain_protein_encoder):\n print('Loading pre-trained protein encoder ...')\n p_param = torch.load(args.pretrain_protein_encoder, map_location='cpu')\n else:\n p_param = load_pretrained_protein_encoder(args.pretrain_protein_encoder)\n self.p_encoder.load_state_dict(p_param['model_state_dict'], strict=True)\n del p_param\n else:\n pass\n # print('Skip loading pre-trained protein encoder parameters')\n\n if 'pretrain_ligand_encoder' in args.__dict__.keys():\n if isinstance(args.pretrain_ligand_encoder, str) and os.path.isfile(args.pretrain_ligand_encoder):\n print('Loading pre-trained ligand encoder ...')\n l_param = torch.load(args.pretrain_ligand_encoder, map_location='cpu')\n else:\n l_param = load_pretrained_ligand_encoder(args.pretrain_ligand_encoder)\n self.l_feat_encoder.load_state_dict(l_param['model_state_dict'], strict=True)\n del l_param\n else:\n pass\n # print('Skip loading pre-trained ligand encoder parameters')\n\n def pretrain(self, complex_graph):\n # pretrain\n cur_state = self.training\n self.train(False)\n with torch.no_grad():\n complex_graph = self.p_encoder(complex_graph)\n complex_graph = self.l_feat_encoder(complex_graph)\n self.train(cur_state)\n\n complex_graph.p_x_sca_vec_pretrained = complex_graph.p_x_sca_vec\n complex_graph.p_edge_sca_vec_pretrained = complex_graph.p_edge_sca_vec\n complex_graph.l_x_sca_pretrained = complex_graph.l_x_sca\n complex_graph.l_edge_sca_pretrained = complex_graph.l_edge_sca\n return complex_graph\n\n def init_embed(self, complex_graph):\n if self.use_pretrain:\n if not self.use_pregen_data:\n complex_graph = self.pretrain(complex_graph)\n\n # pocket\n p_x_sca, p_x_vec = complex_graph.p_x_sca_vec_pretrained\n p_edge_sca, p_edge_vec = complex_graph.p_edge_sca_vec_pretrained\n p_x_sca = torch.cat([p_x_sca, complex_graph.sc_in], dim=-1) # explicit torsion input\n p_x_sca = self.p_x_sca_embed(p_x_sca)\n if self.p_extra_embed:\n p_edge_sca = self.p_edge_sca_embed(p_edge_sca)\n p_x_vec = self.p_x_vec_embed(p_x_vec)\n p_edge_vec = self.p_edge_vec_embed(p_edge_vec)\n\n # ligand\n l_x_sca = complex_graph.l_x_sca_pretrained\n l_edge_sca = complex_graph.l_edge_sca_pretrained\n if self.l_extra_embed:\n l_x_sca = self.l_x_sca_embed(l_x_sca)\n if self.add_l_dismap:\n l_edge_sca = self.l_edge_sca_embed(torch.cat([l_edge_sca, complex_graph.l_dismap.unsqueeze(-1)], dim=-1))\n else:\n l_edge_sca = self.l_edge_sca_embed(l_edge_sca)\n l_x_vec = self.l_x_vec_embed(complex_graph.l_x_vec_init)\n l_edge_vec = self.l_edge_vec_embed(complex_graph.l_edge_vec_init)\n else:\n # pocket\n # explicit torsion input\n p_x_sca = torch.cat([complex_graph.p_x_sca_init, complex_graph.sc_in], dim=-1)\n p_x_sca = self.p_x_sca_embed(p_x_sca)\n p_edge_sca = self.p_edge_sca_embed(complex_graph.p_edge_sca_init)\n p_x_vec = self.p_x_vec_embed(complex_graph.p_x_vec_init)\n p_edge_vec = self.p_edge_vec_embed(complex_graph.p_edge_vec_init)\n\n # ligand\n l_x_sca = self.l_x_sca_embed(complex_graph.l_x_sca_init)\n if self.add_l_dismap:\n l_edge_sca = self.l_edge_sca_embed(torch.cat([complex_graph.l_edge_sca_init, complex_graph.l_dismap.unsqueeze(-1)], dim=-1))\n else:\n l_edge_sca = self.l_edge_sca_embed(complex_graph.l_edge_sca_init)\n l_x_vec = self.l_x_vec_embed(complex_graph.l_x_vec_init)\n l_edge_vec = self.l_edge_vec_embed(complex_graph.l_edge_vec_init)\n\n # merge\n complex_graph.x_sca_init = torch.cat([p_x_sca, l_x_sca], dim=1)\n complex_graph.x_vec_init = torch.cat([p_x_vec, l_x_vec], dim=1)\n complex_graph.x_sca_vec_init = (complex_graph.x_sca_init, complex_graph.x_vec_init)\n complex_graph.x_mask = torch.cat([complex_graph.p_x_mask, complex_graph.l_x_mask], dim=1)\n complex_graph.edge_sca_init = self.cat_edge(p_edge_sca, l_edge_sca)\n complex_graph.edge_vec_init = self.cat_edge(p_edge_vec, l_edge_vec)\n complex_graph.edge_sca_vec_init = (complex_graph.edge_sca_init, complex_graph.edge_vec_init)\n complex_graph.edge_mask = self.cat_edge(complex_graph.p_edge_mask, complex_graph.l_edge_mask)\n complex_graph.coor_init = torch.cat([complex_graph.p_coor_init, complex_graph.l_coor_init], dim=1)\n\n return complex_graph\n\n def run_single_cycle(self, complex_graph, cycle_i=0):\n if cycle_i == 0:\n complex_graph.x_sca_vec = complex_graph.x_sca_vec_init\n complex_graph.edge_sca_vec = complex_graph.edge_sca_vec_init\n complex_graph.coor = complex_graph.coor_init\n else:\n complex_graph.x_sca_vec = self.x_gate(complex_graph.x_sca_vec, complex_graph.x_sca_vec_init)\n complex_graph.edge_sca_vec = self.edge_gate(complex_graph.edge_sca_vec, complex_graph.edge_sca_vec_init)\n complex_graph = self.c_decoder(complex_graph)\n complex_graph.x_sca, complex_graph.x_vec = complex_graph.x_sca_vec\n complex_graph.edge_sca, complex_graph.edge_vec = complex_graph.edge_sca_vec\n return complex_graph\n\n def energy_min(self, complex_graph, loop=None, constraint=None, show_state=False, min_type='GD'):\n if self.use_min:\n coor_flat = rearrange(complex_graph.coor, 'b n c -> (b n) c')\n l_coor_pred = coor_flat[complex_graph.ligand_node_loc_in_complex_flat]\n l_coor_min = self.coor_min_object(l_coor_pred * self.coor_scale, complex_graph,\n loop=loop, constraint=constraint,\n show_state=show_state, min_type=min_type)\n coor_flat[complex_graph.ligand_node_loc_in_complex_flat] = l_coor_min / self.coor_scale\n complex_graph.coor = rearrange(coor_flat, '(b n) c -> b n c', b=complex_graph.coor.size(0))\n return complex_graph\n\n def cat_edge(self, edge_1, edge_2):\n d_1 = edge_1.size(1)\n d_2 = edge_2.size(1)\n if len(edge_1.size()) == 3:\n edge_1_pad = (0, d_2)\n edge_2_pad = (d_1, 0)\n elif len(edge_1.size()) == 4:\n edge_1_pad = (0, 0, 0, d_2)\n edge_2_pad = (0, 0, d_1, 0)\n elif len(edge_1.size()) == 5:\n edge_1_pad = (0, 0, 0, 0, 0, d_2)\n edge_2_pad = (0, 0, 0, 0, d_1, 0)\n else:\n assert len(edge_1.size()) in [3, 4, 5]\n edge_1 = F.pad(edge_1, edge_1_pad, 'constant', 0)\n edge_2 = F.pad(edge_2, edge_2_pad, 'constant', 0)\n edge = torch.cat([edge_1, edge_2], dim=1)\n return edge" }, { "identifier": "try_prepare_task", "path": "FlexPose/preprocess/prepare_for_training.py", "snippet": "def try_prepare_task(intup):\n f, task = intup\n try:\n f(task)\n return True\n except:\n return False" }, { "identifier": "pred_ens", "path": "FlexPose/utils/APOPDBbind_data.py", "snippet": "@torch.no_grad()\ndef pred_ens(coor_pred, dic_data, return_raw=False):\n # coor_pred: [ens, n_atom, 3]\n ens = coor_pred.shape[0]\n l_match = dic_data.l_match.reshape(ens, -1)[0]\n\n if ens > 1:\n ens_pred = coor_pred[0]\n first_pred = coor_pred[0]\n\n rest_pred = coor_pred[1:]\n\n rmsd_match_ens, tmp_pred, n_match = calc_rmsd(rest_pred,\n repeat(first_pred, 'n c -> e n c', e=rest_pred.size(0)),\n match=l_match) # return [match, ens]\n min_index = rmsd_match_ens.min(dim=0, keepdims=True)[1]\n rest_ens_matched_pred = torch.gather(tmp_pred, dim=0,\n index=repeat(min_index, 'm e -> m n e c', n=rest_pred.size(1),\n c=3)).squeeze(0) # to [n_atom, ens-1, 3]\n ens_pred = torch.cat([first_pred.unsqueeze(1), rest_ens_matched_pred], dim=1)\n if return_raw:\n return ens_pred\n else:\n ens_pred = ens_pred.mean(dim=1)\n else:\n ens_pred = coor_pred[0]\n\n return ens_pred" }, { "identifier": "MMFF_keys", "path": "FlexPose/model/MMFF.py", "snippet": "def pad_zero_param(index, param, index_dim, param_dim):\ndef get_BS_param(mol, props, dic_MMFF_param, dic_tmp):\ndef get_angle_by_bond(bond_list):\ndef get_AB_param(mol, props, dic_MMFF_param, dic_tmp):\ndef filter_SB(mol, props, SB_index):\ndef get_SB_param(mol, props, dic_MMFF_param, dic_tmp):\ndef get_dict_bond(bond_list):\ndef get_oop(mol, props, dic_bond, angle_list):\ndef get_OOP_param(mol, props, dic_MMFF_param, dic_tmp):\ndef get_torsion(mol, props, dic_bond, angle_list):\ndef get_TOR_param(mol, props, dic_MMFF_param, dic_tmp):\ndef get_14(mol):\ndef get_noncov_pair(mol, bond_list, angle_list, pair_14_list):\ndef get_VDW_param(mol, props, dic_MMFF_param, dic_tmp, select_pair_index=None):\ndef get_ELE_param(mol, props, dic_MMFF_param, dic_tmp):\ndef add_batch_info(dic_MMFF_param):\ndef get_MMFF_param(mol, props=None, strict=False):\ndef get_MMFF_param_for_complex(protein_mol, ligand_mol):\n def __init__(self, split_interact=False, warm=False):\n def __call__(self, coor, MMFF_param, return_sum=True):\n def get_BS(self, coor, BS_index, BS_param, BS_batch):\n def get_AB(self, coor, AB_index, AB_param, AB_batch):\n def get_SB(self, coor, SB_index, SB_param, SB_batch):\n def get_OOP(self, coor, OOP_index, OOP_param, OOP_batch):\n def get_TOR(self, coor, TOR_index, TOR_param, TOR_batch):\n def get_VDW(self, coor, VDW_index, VDW_param, VDW_batch):\n def get_ELE(self, coor, ELE_index, ELE_param, ELE_batch):\n def get_angle(self, coor, angle_index):\n def get_oop(self, coor, oop_index):\n def get_dihedral_angle(self, coor, dihedral_index):\n def get_cross(self, v1, v2):\n def split_to_single_dim(self, x):\n A = np.array([143.9325*kb/2 for kb, r0 in BS_param])\n B = np.array([1 for kb, r0 in BS_param])\n C = np.array([-2 for kb, r0 in BS_param])\n D = np.array([7/12*(-2)**2 for kb, r0 in BS_param])\n A = np.array([0.043844*ka/2 for lin, ka, theta0 in AB_param])\n B = np.array([1 for lin, ka, theta0 in AB_param])\n C = np.array([-0.006981317 for lin, ka, theta0 in AB_param])\n D = np.array([143.9325*ka for lin, ka, theta0 in AB_param])\n E = np.array([143.9325*ka for lin, ka, theta0 in AB_param])\n A = np.array([2.51210*kbaIJK for kbaIJK, kbaKJI, r0ij, r0kj, theta0 in SB_param])\n B = np.array([2.51210*kbaKJI for kbaIJK, kbaKJI, r0ij, r0kj, theta0 in SB_param])\n A = np.array([0.043844*koop/2 for koop in OOP_param])\n A = np.array([0.5*(V1+V2+V3) for V1, V2, V3 in TOR_param])\n B = np.array([0.5*V1 for V1, V2, V3 in TOR_param])\n C = np.array([0.5*(-V2) for V1, V2, V3 in TOR_param])\n D = np.array([0.5*V3 for V1, V2, V3 in TOR_param])\n A = np.array([epsilon*(1.07*R_ij_star)**7 for R_ij_star, epsilon in VDW_param])\n B = np.array([0.07*R_ij_star for R_ij_star, epsilon in VDW_param])\n C = np.array([1.12*R_ij_star**7 for R_ij_star, epsilon in VDW_param])\n D = np.array([0.12*R_ij_star**7 for R_ij_star, epsilon in VDW_param])\n E = np.array([-2 for R_ij_star, epsilon in VDW_param])\n A = np.array([332.07169*flag_14*qi*qj for flag_14, qi, qj in ELE_param])\n B = np.array([0.05 for flag_14, qi, qj in ELE_param])\n A = OOP_param\n A, B, C, D = TOR_param.split(1, dim=-1)\n A, B, C, D, E = VDW_param.split(1, dim=-1)\n A, B = ELE_param.split(1, dim=-1)\nclass MMFFLoss():" } ]
import os import shutil import sys import argparse import numpy as np import pandas as pd import torch import torch.nn.functional as F import pyrosetta import pyrosetta from biopandas.pdb import PandasPdb from collections import defaultdict from ray.util.multiprocessing import Pool from rdkit import Chem from rdkit.Chem import AllChem, Descriptors from einops import rearrange, repeat from torch_scatter import scatter_min, scatter_add from FlexPose.model.layers import FlexPose from FlexPose.utils.common import * from FlexPose.preprocess.prepare_for_training import try_prepare_task from FlexPose.utils.APOPDBbind_data import pred_ens from FlexPose.utils.pdbbind_preprocess import * from FlexPose.utils.data_utils import * from FlexPose.model.MMFF import MMFF_keys, MMFF_pad_dim, get_MMFF_param from tqdm.notebook import tqdm, trange from tqdm import tqdm, trange from modeller import Environ from modeller.scripts import complete_pdb
7,996
torch.cuda.set_device(device) def get_torsion_from_pose(pose): bb_torsion = [] sc_torsion = [] for i in range(1, pose.size() + 1): try: res = pose.residue(i) assert res.name3() in ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLN', 'GLU', 'GLY', 'HIS', 'ILE', 'LEU', 'LYS', 'MET', 'PHE', 'PRO', 'SER', 'THR', 'TRP', 'TYR', 'VAL'] phi_psi = [pose.phi(i), pose.psi(i)] chi = [c for c in res.chi()] bb_torsion.append(phi_psi) sc_torsion.append(chi) except: bb_torsion.append([None]) sc_torsion.append([None]) return {'bb_torsion': bb_torsion, 'sc_torsion': sc_torsion} def prepare_single_input(tupin): f_name_list, idx, cache_path = tupin p_path, l_path, ref_path = f_name_list max_len_ligand = 150 max_len_pocket = 150 # =========== ligand encoding =========== ligand_mol = read_rdkit_mol(l_path) if l_path.endswith('mol2'): ligand_template = ligand_mol else: mol2 = '.'.join(l_path.split('.')[:-1]) + '.mol2' if os.path.exists(mol2): try: ligand_template = Chem.MolFromMol2File(mol2) ligand_mol = AllChem.AssignBondOrdersFromTemplate(ligand_template, ligand_mol) print(f'Found mol2 {mol2} as input.') except: ligand_template = ligand_mol else: ligand_template = ligand_mol if ligand_mol.GetNumConformers() == 0: AllChem.EmbedMolecule(ligand_mol, maxAttempts=10, useRandomCoords=True, clearConfs=False) ff = Chem.rdForceFieldHelpers.MMFFGetMoleculeForceField( ligand_mol, Chem.rdForceFieldHelpers.MMFFGetMoleculeProperties(ligand_mol)) for atom_i in range(ligand_mol.GetNumAtoms()): ff.MMFFAddPositionConstraint(atom_i, 1, 100) # maxDispl: maximum displacement ff.Minimize(maxIts=20) try: dic_MMFF_param = get_MMFF_param(ligand_template) except: dic_MMFF_param = None ligand_node_features = get_node_feature(ligand_template, 'ligand') ligand_edge, ligand_edge_features = get_ligand_edge_feature(ligand_template) ligand_match = get_ligand_match(ligand_template) ligand_dismap = get_ligand_unrotable_distance(ligand_template) # not use in our model ligand_coor_true = get_true_posi(ligand_mol) ligand_coor_true = ligand_coor_true[get_ligand_match(ligand_mol, ligand_template)[0]] ligand_data = [ligand_node_features, ligand_edge_features, ligand_coor_true, ligand_match, ligand_dismap] assert len(ligand_node_features) <= max_len_ligand, 'ligand atoms need less than 150' # =========== protein encoding =========== # load modeller again for ray with suppress_stdout_stderr(): env_ = Environ() env_.libs.topology.read(file='$(LIB)/top_heav.lib') env_.libs.parameters.read(file='$(LIB)/par.lib') fixed_protein_path = cache_path + f'/{idx}_protein_tmp.pdb' pdb_m = complete_pdb(env_, p_path) pdb_m.write(fixed_protein_path) opts = '-mute true -ignore_unrecognized_res true' pyrosetta.distributed.init(opts) pose = pyrosetta.io.pose_from_pdb(fixed_protein_path) dic_tor = get_torsion_from_pose(pose) ref_mol = read_rdkit_mol(ref_path, silence=True) ref_coor = get_true_posi(ref_mol) biodf_protein = PandasPdb().read_pdb(fixed_protein_path) df_protein = biodf_protein.df['ATOM'] df_protein['chain_resi'] = df_protein['chain_id'].astype(str) + '_' + df_protein['residue_number'].astype(str) df_pocket, sele_res = get_pocket(df_protein, ref_coor, max_len_protein=max_len_pocket) SCtorsion_data = get_torsion(dic_tor, df_protein, df_pocket) protein_data = encode_pocket(df_pocket) + [SCtorsion_data] assert protein_data[0].shape[0] == SCtorsion_data[0].shape[0] assert protein_data[0].shape[0] <= max_len_pocket, 'pocket residues need less than 150' # os.remove(fixed_protein_path) dic_data = dict( ligand_data=ligand_data, protein_data=protein_data, protein_path=fixed_protein_path, ligand_path=l_path, sele_res=sele_res, dic_MMFF_param=dic_MMFF_param, ) pickle.dump(dic_data, open(cache_path + '/{}.pkl'.format(idx), 'wb')) return True def preprare_input_data(input_list, cache_path, prepare_data_with_multi_cpu): delmkdir(cache_path) tasks = [] for idx, f_name_list in enumerate(input_list): tasks.append((prepare_single_input, (f_name_list, idx, cache_path))) fail = 0 if prepare_data_with_multi_cpu: pool = Pool() print('Preparing input data...')
sys.path.append('/'.join(os.path.abspath(__file__).split('/')[:-2])) opts = '-mute true -ignore_unrecognized_res true' pyrosetta.distributed.init(opts) if is_notebook(): else: def set_device(device): if device == 'cpu': torch.set_num_threads(16) else: torch.cuda.set_device(device) def get_torsion_from_pose(pose): bb_torsion = [] sc_torsion = [] for i in range(1, pose.size() + 1): try: res = pose.residue(i) assert res.name3() in ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLN', 'GLU', 'GLY', 'HIS', 'ILE', 'LEU', 'LYS', 'MET', 'PHE', 'PRO', 'SER', 'THR', 'TRP', 'TYR', 'VAL'] phi_psi = [pose.phi(i), pose.psi(i)] chi = [c for c in res.chi()] bb_torsion.append(phi_psi) sc_torsion.append(chi) except: bb_torsion.append([None]) sc_torsion.append([None]) return {'bb_torsion': bb_torsion, 'sc_torsion': sc_torsion} def prepare_single_input(tupin): f_name_list, idx, cache_path = tupin p_path, l_path, ref_path = f_name_list max_len_ligand = 150 max_len_pocket = 150 # =========== ligand encoding =========== ligand_mol = read_rdkit_mol(l_path) if l_path.endswith('mol2'): ligand_template = ligand_mol else: mol2 = '.'.join(l_path.split('.')[:-1]) + '.mol2' if os.path.exists(mol2): try: ligand_template = Chem.MolFromMol2File(mol2) ligand_mol = AllChem.AssignBondOrdersFromTemplate(ligand_template, ligand_mol) print(f'Found mol2 {mol2} as input.') except: ligand_template = ligand_mol else: ligand_template = ligand_mol if ligand_mol.GetNumConformers() == 0: AllChem.EmbedMolecule(ligand_mol, maxAttempts=10, useRandomCoords=True, clearConfs=False) ff = Chem.rdForceFieldHelpers.MMFFGetMoleculeForceField( ligand_mol, Chem.rdForceFieldHelpers.MMFFGetMoleculeProperties(ligand_mol)) for atom_i in range(ligand_mol.GetNumAtoms()): ff.MMFFAddPositionConstraint(atom_i, 1, 100) # maxDispl: maximum displacement ff.Minimize(maxIts=20) try: dic_MMFF_param = get_MMFF_param(ligand_template) except: dic_MMFF_param = None ligand_node_features = get_node_feature(ligand_template, 'ligand') ligand_edge, ligand_edge_features = get_ligand_edge_feature(ligand_template) ligand_match = get_ligand_match(ligand_template) ligand_dismap = get_ligand_unrotable_distance(ligand_template) # not use in our model ligand_coor_true = get_true_posi(ligand_mol) ligand_coor_true = ligand_coor_true[get_ligand_match(ligand_mol, ligand_template)[0]] ligand_data = [ligand_node_features, ligand_edge_features, ligand_coor_true, ligand_match, ligand_dismap] assert len(ligand_node_features) <= max_len_ligand, 'ligand atoms need less than 150' # =========== protein encoding =========== # load modeller again for ray with suppress_stdout_stderr(): env_ = Environ() env_.libs.topology.read(file='$(LIB)/top_heav.lib') env_.libs.parameters.read(file='$(LIB)/par.lib') fixed_protein_path = cache_path + f'/{idx}_protein_tmp.pdb' pdb_m = complete_pdb(env_, p_path) pdb_m.write(fixed_protein_path) opts = '-mute true -ignore_unrecognized_res true' pyrosetta.distributed.init(opts) pose = pyrosetta.io.pose_from_pdb(fixed_protein_path) dic_tor = get_torsion_from_pose(pose) ref_mol = read_rdkit_mol(ref_path, silence=True) ref_coor = get_true_posi(ref_mol) biodf_protein = PandasPdb().read_pdb(fixed_protein_path) df_protein = biodf_protein.df['ATOM'] df_protein['chain_resi'] = df_protein['chain_id'].astype(str) + '_' + df_protein['residue_number'].astype(str) df_pocket, sele_res = get_pocket(df_protein, ref_coor, max_len_protein=max_len_pocket) SCtorsion_data = get_torsion(dic_tor, df_protein, df_pocket) protein_data = encode_pocket(df_pocket) + [SCtorsion_data] assert protein_data[0].shape[0] == SCtorsion_data[0].shape[0] assert protein_data[0].shape[0] <= max_len_pocket, 'pocket residues need less than 150' # os.remove(fixed_protein_path) dic_data = dict( ligand_data=ligand_data, protein_data=protein_data, protein_path=fixed_protein_path, ligand_path=l_path, sele_res=sele_res, dic_MMFF_param=dic_MMFF_param, ) pickle.dump(dic_data, open(cache_path + '/{}.pkl'.format(idx), 'wb')) return True def preprare_input_data(input_list, cache_path, prepare_data_with_multi_cpu): delmkdir(cache_path) tasks = [] for idx, f_name_list in enumerate(input_list): tasks.append((prepare_single_input, (f_name_list, idx, cache_path))) fail = 0 if prepare_data_with_multi_cpu: pool = Pool() print('Preparing input data...')
for r in pool.map(try_prepare_task, tasks):
1
2023-10-19 22:03:51+00:00
12k
openvpi/SingingVocoders
training/univnet.py
[ { "identifier": "UnivNet", "path": "models/univnet/univnet.py", "snippet": "class UnivNet(torch.nn.Module):\n \"\"\"Parallel WaveGAN Generator module.\"\"\"\n\n def __init__(self, h, use_weight_norm=True):\n\n super().__init__()\n\n in_channels = h['model_args']['cond_in_channels']\n out_channels = h['model_args']['out_channels']\n inner_channels = h['model_args']['cg_channels']\n cond_channels = h['audio_num_mel_bins']\n upsample_ratios = h['model_args']['upsample_rates']\n lvc_layers_each_block = h['model_args']['num_lvc_blocks']\n lvc_kernel_size = h['model_args']['lvc_kernels']\n kpnet_hidden_channels = h['model_args']['lvc_hidden_channels']\n kpnet_conv_size = h['model_args']['lvc_conv_size']\n dropout = h['model_args']['dropout']\n\n upmel=h['model_args'].get('upmel')\n self.upblocke=torch.nn.Sequential(*[Upspamper() for i in range(upmel//2)]) if upmel is not None or upmel==1 else torch.nn.Identity()\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.cond_channels = cond_channels\n self.lvc_block_nums = len(upsample_ratios)\n\n # define first convolution\n self.first_conv = torch.nn.Conv1d(in_channels, inner_channels,\n kernel_size=7, padding=(7 - 1) // 2,\n dilation=1, bias=True)\n\n # define residual blocks\n self.lvc_blocks = torch.nn.ModuleList()\n cond_hop_length = 1\n for n in range(self.lvc_block_nums):\n cond_hop_length = cond_hop_length * upsample_ratios[n]\n lvcb = LVCBlock(\n in_channels=inner_channels,\n cond_channels=cond_channels,\n upsample_ratio=upsample_ratios[n],\n conv_layers=lvc_layers_each_block,\n conv_kernel_size=lvc_kernel_size,\n cond_hop_length=cond_hop_length,\n kpnet_hidden_channels=kpnet_hidden_channels,\n kpnet_conv_size=kpnet_conv_size,\n kpnet_dropout=dropout,\n )\n self.lvc_blocks += [lvcb]\n\n # define output layers\n self.last_conv_layers = torch.nn.ModuleList([\n torch.nn.Conv1d(inner_channels, out_channels, kernel_size=7, padding=(7 - 1) // 2,\n dilation=1, bias=True),\n\n ])\n\n # apply weight norm\n if use_weight_norm:\n self.apply_weight_norm()\n\n def forward(self, x, c):\n \"\"\"Calculate forward propagation.\n Args:\n x (Tensor): Input noise signal (B, 1, T).\n c (Tensor): Local conditioning auxiliary features (B, C ,T').\n Returns:\n Tensor: Output tensor (B, out_channels, T)\n \"\"\"\n\n x = self.first_conv(x)\n c=self.upblocke(c)\n\n for n in range(self.lvc_block_nums):\n x = self.lvc_blocks[n](x, c)\n\n # apply final layers\n for f in self.last_conv_layers:\n x = F.leaky_relu(x, LRELU_SLOPE)\n x = f(x)\n x = torch.tanh(x)\n return x\n\n def remove_weight_norm(self):\n \"\"\"Remove weight normalization module from all of the layers.\"\"\"\n def _remove_weight_norm(m):\n try:\n logging.debug(f\"Weight norm is removed from {m}.\")\n torch.nn.utils.remove_weight_norm(m)\n except ValueError: # this module didn't have weight norm\n return\n\n self.apply(_remove_weight_norm)\n\n def apply_weight_norm(self):\n \"\"\"Apply weight normalization module from all of the layers.\"\"\"\n def _apply_weight_norm(m):\n if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d):\n torch.nn.utils.weight_norm(m)\n logging.debug(f\"Weight norm is applied to {m}.\")\n\n self.apply(_apply_weight_norm)\n\n @staticmethod\n def _get_receptive_field_size(layers, stacks, kernel_size,\n dilation=lambda x: 2 ** x):\n assert layers % stacks == 0\n layers_per_cycle = layers // stacks\n dilations = [dilation(i % layers_per_cycle) for i in range(layers)]\n return (kernel_size - 1) * sum(dilations) + 1\n\n @property\n def receptive_field_size(self):\n \"\"\"Return receptive field size.\"\"\"\n return self._get_receptive_field_size(self.layers, self.stacks, self.kernel_size)\n\n def inference(self, c=None, x=None):\n \"\"\"Perform inference.\n Args:\n c (Union[Tensor, ndarray]): Local conditioning auxiliary features (T' ,C).\n x (Union[Tensor, ndarray]): Input noise signal (T, 1).\n Returns:\n Tensor: Output tensor (T, out_channels)\n \"\"\"\n if x is not None:\n if not isinstance(x, torch.Tensor):\n x = torch.tensor(x, dtype=torch.float).to(next(self.parameters()).device)\n x = x.transpose(1, 0).unsqueeze(0)\n else:\n assert c is not None\n x = torch.randn(1, 1, len(c) * self.upsample_factor).to(next(self.parameters()).device)\n if c is not None:\n if not isinstance(c, torch.Tensor):\n c = torch.tensor(c, dtype=torch.float).to(next(self.parameters()).device)\n c = c.transpose(1, 0).unsqueeze(0)\n c = torch.nn.ReplicationPad1d(self.aux_context_window)(c)\n return self.forward(x, c).squeeze(0).transpose(1, 0)" }, { "identifier": "univloss", "path": "modules/loss/univloss.py", "snippet": "class univloss(nn.Module):\n def __init__(self,config:dict):\n super().__init__()\n self.mel=PitchAdjustableMelSpectrogram( sample_rate=config['audio_sample_rate'],\n n_fft=config['fft_size'],\n win_length=config['win_size'],\n hop_length=config['hop_size'],\n f_min=config['fmin'],\n f_max=config['fmax_for_loss'],\n n_mels=config['audio_num_mel_bins'],)\n self.L1loss=nn.L1Loss()\n self.labauxloss=config.get('lab_aux_loss',45)\n # self.stft=warp_stft({'fft_sizes':[1024, 2048, 512,],'hop_sizes':[120, 240, 50,],'win_lengths':[600, 1200, 240,]})\n\n self.stft = warp_stft({'fft_sizes': config['loss_fft_sizes'], 'hop_sizes': config['loss_hop_sizes'],\n 'win_lengths': config['loss_win_lengths']})\n # fft_sizes = [2048, 4096, 1024, 512, 256, 128],\n # hop_sizes = [240, 480, 100, 50, 25, 12],\n # win_lengths = [1200, 2400, 480, 240, 120, 60]\n\n def discriminator_loss(self,disc_real_outputs, disc_generated_outputs):\n loss = 0\n rlosses=0\n glosses=0\n r_losses = []\n g_losses = []\n\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n r_loss = torch.mean((1 - dr) ** 2)\n g_loss = torch.mean(dg ** 2)\n loss += r_loss + g_loss\n rlosses+=r_loss.item()\n glosses +=g_loss.item()\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, rlosses,glosses,r_losses, g_losses\n\n\n def Dloss(self,Dfake, Dtrue):\n\n (Fmrd_out, _), (Fmpd_out, _)=Dfake\n (Tmrd_out, _), (Tmpd_out, _)=Dtrue\n mrdloss, mrdrlosses, mrdglosses, _, _=self.discriminator_loss(Tmrd_out,Fmrd_out)\n mpdloss, mpdrlosses, mpdglosses, _, _ = self.discriminator_loss(Tmpd_out, Fmpd_out)\n loss=mrdloss+mpdloss\n return loss,{'DmrdlossF':mrdglosses,'DmrdlossT':mrdrlosses,'DmpdlossT':mpdrlosses,'DmpdlossF':mpdglosses}\n\n def feature_loss(self,fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2\n\n def GDloss(self,GDfake,GDtrue):\n loss = 0\n gen_losses = []\n mrd_losses=0\n mpd_losses = 0\n (mrd_out, Fmrd_feature), (mpd_out, Fmpd_feature)=GDfake\n (_, Tmrd_feature), (_, Tmpd_feature) = GDtrue\n for dg in mrd_out:\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l.item())\n # loss += l\n mrd_losses=l+mrd_losses\n\n for dg in mpd_out:\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l.item())\n # loss += l\n mpd_losses=l+mpd_losses\n\n mrd_feature_loss=self.feature_loss(Tmrd_feature,Fmrd_feature)\n mpd_feature_loss = self.feature_loss(Tmpd_feature, Fmpd_feature)\n # loss +=msd_feature_loss\n # loss +=mpd_feature_loss\n loss= mrd_feature_loss+mpd_feature_loss+mpd_losses+mrd_losses\n # (msd_losses, mpd_losses), (msd_feature_loss, mpd_feature_loss), gen_losses\n return loss, {'Gmrdloss':mrd_losses,'Gmpdloss':mpd_losses,'Gmrd_feature_loss':mrd_feature_loss,'Gmpd_feature_loss':mpd_feature_loss}\n\n # def Auxloss(self,Goutput, sample):\n #\n # Gmel=self.mel.dynamic_range_compression_torch(self.mel(Goutput['audio'].squeeze(1)))\n # # Rmel=sample['mel']\n # Rmel = self.mel.dynamic_range_compression_torch(self.mel(sample['audio'].squeeze(1)))\n # loss=self.L1loss(Gmel, Rmel)*self.labauxloss\n # return loss,{'auxloss':loss}\n\n def Auxloss(self,Goutput, sample):\n\n # Gmel=self.mel.dynamic_range_compression_torch(self.mel(Goutput['audio'].squeeze(1)))\n # # Rmel=sample['mel']\n # Rmel = self.mel.dynamic_range_compression_torch(self.mel(sample['audio'].squeeze(1)))\n sc_loss, mag_loss=self.stft.stft(Goutput['audio'].squeeze(1), sample['audio'].squeeze(1))\n loss=(sc_loss+ mag_loss)*self.labauxloss\n return loss,{'auxloss':loss,'auxloss_sc_loss':sc_loss,'auxloss_mag_loss':mag_loss}" }, { "identifier": "MultiPeriodDiscriminator", "path": "modules/univ_D/discriminator.py", "snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self,periods=[2,3,5,7,11]):\n super(MultiPeriodDiscriminator, self).__init__()\n # self.discriminators = nn.ModuleList([\n # DiscriminatorP(2),\n # DiscriminatorP(3),\n # DiscriminatorP(5),\n # DiscriminatorP(7),\n # DiscriminatorP(11),\n # ])\n self.discriminators = nn.ModuleList([\n DiscriminatorP(i) for i in periods\n\n ])\n\n def forward(self, y,):\n y_d_rs = []\n\n fmap_rs = []\n\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n\n y_d_rs.append(y_d_r)\n fmap_rs.append(fmap_r)\n\n\n return y_d_rs, fmap_rs" }, { "identifier": "MultiResSpecDiscriminator", "path": "modules/univ_D/discriminator.py", "snippet": "class MultiResSpecDiscriminator(torch.nn.Module):\n\n def __init__(self,\n fft_sizes=[1024, 2048, 512],\n hop_sizes=[120, 240, 50],\n win_lengths=[600, 1200, 240],\n window=\"hann_window\"):\n\n super(MultiResSpecDiscriminator, self).__init__()\n # self.discriminators = nn.ModuleList([\n # SpecDiscriminator(fft_sizes[0], hop_sizes[0], win_lengths[0], window),\n # SpecDiscriminator(fft_sizes[1], hop_sizes[1], win_lengths[1], window),\n # SpecDiscriminator(fft_sizes[2], hop_sizes[2], win_lengths[2], window)\n # ])\n self.discriminators = nn.ModuleList([\n SpecDiscriminator(i[0], i[1], i[2], window) for i in zip(fft_sizes,hop_sizes,win_lengths)\n\n ])\n\n def forward(self, y,):\n y_d_rs = []\n\n fmap_rs = []\n\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n\n y_d_rs.append(y_d_r)\n fmap_rs.append(fmap_r)\n\n return y_d_rs, fmap_rs" }, { "identifier": "GanBaseTask", "path": "training/base_task_gan.py", "snippet": "class GanBaseTask(pl.LightningModule):\n \"\"\"\n Base class for training tasks.\n 1. *load_ckpt*:\n load checkpoint;\n 2. *training_step*:\n record and log the loss;\n 3. *optimizer_step*:\n run backwards step;\n 4. *start*:\n load training configs, backup code, log to tensorboard, start training;\n 5. *configure_ddp* and *init_ddp_connection*:\n start parallel training.\n\n Subclasses should define:\n 1. *build_model*, *build_optimizer*, *build_scheduler*:\n how to build the model, the optimizer and the training scheduler;\n 2. *_training_step*:\n one training step of the model;\n 3. *on_validation_end* and *_on_validation_end*:\n postprocess the validation output.\n \"\"\"\n\n def __init__(self, config: dict, *args, **kwargs):\n # dataset configs\n super().__init__(*args, **kwargs)\n self.dataset_cls = None\n self.config = config\n # self.max_batch_frames = self.config['max_batch_frames']\n # self.max_batch_size = self.config['max_batch_size']\n # self.max_val_batch_frames = self.config['max_val_batch_frames']\n # self.max_val_batch_size = self.config['max_val_batch_size']\n\n # self.accumulate_grad_batches = self.config['accumulate_grad_batches']\n self.clip_grad_norm = self.config['clip_grad_norm']\n\n self.training_sampler = None\n self.model = None\n self.generator = None\n self.discriminator = None\n self.skip_immediate_validation = False\n self.skip_immediate_ckpt_save = False\n\n self.valid_losses: Dict[str, Metric] = {\n 'total_loss': MeanMetric()\n }\n self.valid_metric_names = set()\n self.mix_loss = None\n\n self.automatic_optimization = False\n self.skip_immediate_validations = 0\n\n self.aux_step = self.config.get('aux_step')\n self.train_dataset = None\n self.valid_dataset = None\n\n ###########\n\n # Training, validation and testing\n ###########\n def setup(self, stage):\n self.model = self.build_model()\n self.unfreeze_all_params()\n if self.config['freezing_enabled']:\n self.freeze_params()\n if self.config['finetune_enabled'] and get_latest_checkpoint_path(\n pathlib.Path(self.config['work_dir'])) is None:\n self.load_finetune_ckpt(self.load_pre_train_model())\n self.print_arch()\n self.build_losses_and_metrics()\n self.build_dataset()\n # self.train_dataset = self.dataset_cls(\n # config=self.config, data_dir=self.config['binary_data_dir'],\n # prefix=self.config['train_set_name'], allow_aug=True\n # )\n # self.valid_dataset = self.dataset_cls(\n # config=self.config, data_dir=self.config['binary_data_dir'],\n # prefix=self.config['valid_set_name'], allow_aug=False\n # )\n\n def build_dataset(self):\n raise NotImplementedError()\n\n def get_need_freeze_state_dict_key(self, model_state_dict) -> list:\n key_list = []\n for i in self.config['frozen_params']:\n for j in model_state_dict:\n if j.startswith(i):\n key_list.append(j)\n return list(set(key_list))\n\n def freeze_params(self) -> None:\n model_state_dict = self.state_dict().keys()\n freeze_key = self.get_need_freeze_state_dict_key(model_state_dict=model_state_dict)\n\n for i in freeze_key:\n params = self.get_parameter(i)\n\n params.requires_grad = False\n\n def unfreeze_all_params(self) -> None:\n for i in self.parameters():\n i.requires_grad = True\n\n def load_finetune_ckpt(\n self, state_dict\n ) -> None:\n\n adapt_shapes = self.config['finetune_strict_shapes']\n if not adapt_shapes:\n cur_model_state_dict = self.state_dict()\n unmatched_keys = []\n for key, param in state_dict.items():\n if key in cur_model_state_dict:\n new_param = cur_model_state_dict[key]\n if new_param.shape != param.shape:\n unmatched_keys.append(key)\n print('| Unmatched keys: ', key, new_param.shape, param.shape)\n for key in unmatched_keys:\n del state_dict[key]\n self.load_state_dict(state_dict, strict=False)\n\n def load_pre_train_model(self):\n\n pre_train_ckpt_path = self.config.get('finetune_ckpt_path')\n blacklist = self.config.get('finetune_ignored_params')\n if blacklist is None:\n blacklist = []\n # if whitelist is None:\n # raise RuntimeError(\"\")\n\n if pre_train_ckpt_path is not None:\n ckpt = torch.load(pre_train_ckpt_path)\n\n state_dict = {}\n for i in ckpt['state_dict']:\n # if 'diffusion' in i:\n # if i in rrrr:\n # continue\n skip = False\n for b in blacklist:\n if i.startswith(b):\n skip = True\n break\n\n if skip:\n continue\n\n state_dict[i] = ckpt['state_dict'][i]\n print(i)\n return state_dict\n else:\n raise RuntimeError(\"\")\n\n def build_model(self):\n raise NotImplementedError()\n\n @rank_zero_only\n def print_arch(self):\n utils.print_arch(self)\n\n def build_losses_and_metrics(self):\n raise NotImplementedError()\n\n def register_metric(self, name: str, metric: Metric):\n assert isinstance(metric, Metric)\n setattr(self, name, metric)\n self.valid_metric_names.add(name)\n\n # def run_model(self, sample, infer=False):\n # \"\"\"\n # steps:\n # 1. run the full model\n # 2. calculate losses if not infer\n # \"\"\"\n # raise NotImplementedError()\n\n def Gforward(self, sample, infer=False):\n \"\"\"\n steps:\n 1. run the full model\n 2. calculate losses if not infer\n \"\"\"\n raise NotImplementedError()\n\n def Dforward(self, Goutput):\n \"\"\"\n steps:\n 1. run the full model\n 2. calculate losses if not infer\n \"\"\"\n raise NotImplementedError()\n\n # def on_train_epoch_start(self):\n # if self.training_sampler is not None:\n # self.training_sampler.set_epoch(self.current_epoch)\n\n def _training_step(self, sample, batch_idx):\n \"\"\"\n :return: total loss: torch.Tensor, loss_log: dict, other_log: dict\n\n \"\"\"\n aux_only = False\n if self.aux_step is not None:\n if self.aux_step > self.global_step:\n aux_only = True\n\n log_diet = {}\n opt_g, opt_d = self.optimizers()\n Goutput = self.Gforward(sample=sample)\n if not aux_only:\n Dfake = self.Dforward(Goutput=Goutput['audio'].detach())\n Dtrue = self.Dforward(Goutput=sample['audio'])\n Dloss, Dlog = self.mix_loss.Dloss(Dfake=Dfake, Dtrue=Dtrue)\n log_diet.update(Dlog)\n # if self.clip_grad_norm is not None:\n # self.manual_backward(Dloss/self.clip_grad_norm)\n # else:\n opt_d.zero_grad()\n self.manual_backward(Dloss)\n if self.clip_grad_norm is not None:\n self.clip_gradients(opt_d, gradient_clip_val=self.clip_grad_norm, gradient_clip_algorithm=\"norm\")\n opt_d.step()\n opt_d.zero_grad()\n if not aux_only:\n GDfake = self.Dforward(Goutput=Goutput['audio'])\n GDtrue = self.Dforward(Goutput=sample['audio'])\n GDloss, GDlog = self.mix_loss.GDloss(GDfake=GDfake,GDtrue=GDtrue)\n log_diet.update(GDlog)\n Auxloss, Auxlog = self.mix_loss.Auxloss(Goutput=Goutput, sample=sample)\n\n log_diet.update(Auxlog)\n if not aux_only:\n Gloss=GDloss + Auxloss\n else:\n Gloss=Auxloss\n\n # if self.clip_grad_norm is not None:\n # self.manual_backward(Gloss / self.clip_grad_norm)\n # else:\n # self.manual_backward(Gloss)\n # if (batch_idx + 1) % self.accumulate_grad_batches == 0:\n opt_g.zero_grad()\n self.manual_backward(Gloss)\n if self.clip_grad_norm is not None:\n self.clip_gradients(opt_g, gradient_clip_val=self.clip_grad_norm, gradient_clip_algorithm=\"norm\")\n opt_g.step()\n\n\n\n return log_diet\n\n def training_step(self, sample, batch_idx, ): # todo\n log_outputs = self._training_step(sample, batch_idx)\n\n # logs to progress bar\n self.log_dict({'loss':sum(log_outputs.values())}, prog_bar=True, logger=False, on_step=True, on_epoch=False)\n # self.log('lr', self.lr_schedulers().get_last_lr()[0], prog_bar=True, logger=False, on_step=True, on_epoch=False)\n # logs to tensorboard\n if self.global_step % self.config['log_interval'] == 0:\n tb_log = {f'training/{k}': v for k, v in log_outputs.items()}\n # tb_log['training/lr'] = self.lr_schedulers().get_last_lr()[0]\n self.logger.log_metrics(tb_log, step=self.global_step)\n #\n # return total_loss\n\n # def on_before_optimizer_step(self, *args, **kwargs):\n # self.log_dict(grad_norm(self, norm_type=2))\n\n def _on_validation_start(self):\n pass\n\n def on_validation_start(self):\n self._on_validation_start()\n for metric in self.valid_losses.values():\n metric.to(self.device)\n metric.reset()\n\n def _validation_step(self, sample, batch_idx):\n \"\"\"\n\n :param sample:\n :param batch_idx:\n :return: loss_log: dict, weight: int\n \"\"\"\n raise NotImplementedError()\n\n def validation_step(self, sample, batch_idx):\n \"\"\"\n\n :param sample:\n :param batch_idx:\n\n \"\"\"\n\n # if self.skip_immediate_validations == 0 and self.global_step != 0:\n # self.skip_immediate_validation = True\n # self.skip_immediate_validations = 1\n # if self.global_step == 0:\n # self.skip_immediate_validations = 1\n\n if self.skip_immediate_validation:\n rank_zero_debug(f\"Skip validation {batch_idx}\")\n return {}\n with torch.autocast(self.device.type, enabled=False):\n losses, weight = self._validation_step(sample, batch_idx)\n losses = {\n 'total_loss': sum(losses.values()),\n **losses\n }\n for k, v in losses.items():\n if k not in self.valid_losses:\n self.valid_losses[k] = MeanMetric().to(self.device)\n self.valid_losses[k].update(v, weight=weight) # weight=1\n return losses\n\n def on_validation_epoch_end(self):\n if self.skip_immediate_validation:\n self.skip_immediate_validation = False\n self.skip_immediate_ckpt_save = True\n return\n loss_vals = {k: v.compute() for k, v in self.valid_losses.items()}\n self.log('val_loss', loss_vals['total_loss'], on_epoch=True, prog_bar=True, logger=False, sync_dist=True)\n self.logger.log_metrics({f'validation/{k}': v for k, v in loss_vals.items()}, step=self.global_step)\n for metric in self.valid_losses.values():\n metric.reset()\n metric_vals = {k: getattr(self, k).compute() for k in self.valid_metric_names}\n self.logger.log_metrics({f'metrics/{k}': v for k, v in metric_vals.items()}, step=self.global_step)\n for metric_name in self.valid_metric_names:\n getattr(self, metric_name).reset()\n\n # noinspection PyMethodMayBeStatic\n def build_scheduler(self, optimizer):\n from utils import build_lr_scheduler_from_config\n\n scheduler_args = self.config['lr_scheduler_args']\n assert scheduler_args['scheduler_cls'] != ''\n scheduler = build_lr_scheduler_from_config(optimizer, scheduler_args)\n return scheduler\n\n # noinspection PyMethodMayBeStatic\n def build_optimizer(self, model, optimizer_args):\n from utils import build_object_from_class_name\n\n assert optimizer_args['optimizer_cls'] != ''\n if 'beta1' in optimizer_args and 'beta2' in optimizer_args and 'betas' not in optimizer_args:\n optimizer_args['betas'] = (optimizer_args['beta1'], optimizer_args['beta2'])\n\n if isinstance(model, nn.ModuleList):\n parameterslist = []\n for i in model:\n parameterslist = parameterslist + list(i.parameters())\n optimizer = build_object_from_class_name(\n optimizer_args['optimizer_cls'],\n torch.optim.Optimizer,\n parameterslist,\n **optimizer_args\n )\n elif isinstance(model, nn.ModuleDict):\n parameterslist = []\n for i in model:\n # parameterslist = parameterslist + list(model[i].parameters())\n parameterslist.append({'params': model[i].parameters()})\n optimizer = build_object_from_class_name(\n optimizer_args['optimizer_cls'],\n torch.optim.Optimizer,\n parameterslist,\n **optimizer_args\n )\n elif isinstance(model, nn.Module):\n\n optimizer = build_object_from_class_name(\n optimizer_args['optimizer_cls'],\n torch.optim.Optimizer,\n model.parameters(),\n **optimizer_args\n )\n else:\n raise RuntimeError(\"\")\n\n return optimizer\n\n def configure_optimizers(self):\n optG = self.build_optimizer(self.generator, optimizer_args=self.config['generater_optimizer_args'])\n optD = self.build_optimizer(self.discriminator, optimizer_args=self.config['discriminate_optimizer_args'])\n\n return [optG, optD]\n # scheduler = self.build_scheduler(optm)\n # if scheduler is None:\n # return optm\n # return {\n # \"optimizer\": optm,\n # \"lr_scheduler\": {\n # \"scheduler\": scheduler,\n # \"interval\": \"step\",\n # \"frequency\": 1\n # }\n # }\n\n def train_dataloader(self):\n # self.training_sampler = DsBatchSampler(\n # self.train_dataset,\n # max_batch_frames=self.max_batch_frames,\n # max_batch_size=self.max_batch_size,\n # num_replicas=(self.trainer.distributed_sampler_kwargs or {}).get('num_replicas', 1),\n # rank=(self.trainer.distributed_sampler_kwargs or {}).get('rank', 0),\n # sort_by_similar_size=self.config['sort_by_len'],\n # required_batch_count_multiple=self.config['accumulate_grad_batches'],\n # frame_count_grid=self.config['sampler_frame_count_grid'],\n # shuffle_sample=True,\n # shuffle_batch=False,\n # seed=self.config['seed']\n # )\n return torch.utils.data.DataLoader(self.train_dataset,\n collate_fn=self.train_dataset.collater,\n batch_size=self.config['batch_size'],\n # batch_sampler=self.training_sampler,\n num_workers=self.config['ds_workers'],\n prefetch_factor=self.config['dataloader_prefetch_factor'],\n pin_memory=True,\n persistent_workers=True)\n\n def val_dataloader(self):\n # sampler = DsEvalBatchSampler(\n # self.valid_dataset,\n # max_batch_frames=self.max_val_batch_frames,\n # max_batch_size=self.max_val_batch_size,\n # rank=(self.trainer.distributed_sampler_kwargs or {}).get('rank', 0),\n # batch_by_size=False\n # )\n return torch.utils.data.DataLoader(self.valid_dataset,\n collate_fn=self.valid_dataset.collater,\n batch_size=1,\n # batch_sampler=sampler,\n num_workers=self.config['ds_workers'],\n prefetch_factor=self.config['dataloader_prefetch_factor'],\n shuffle=False)\n\n def test_dataloader(self):\n return self.val_dataloader()\n\n def on_test_start(self):\n self.on_validation_start()\n\n def test_step(self, sample, batch_idx):\n return self.validation_step(sample, batch_idx)\n\n def on_test_end(self):\n return self.on_validation_end()\n\n def on_save_checkpoint(self, checkpoint):\n pass\n # checkpoint['trainer_stage'] = self.trainer.state.stage.value\n\n # def on_load_checkpoint(self, checkpoint):\n # # from lightning.pytorch.trainer.states import RunningStage\n # from utils import simulate_lr_scheduler\n # # if checkpoint.get('trainer_stage', '') == RunningStage.VALIDATING.value:\n # # self.skip_immediate_validation = True\n #\n # optimizer_args = self.config['optimizer_args']\n # scheduler_args = self.config['lr_scheduler_args']\n #\n # if 'beta1' in optimizer_args and 'beta2' in optimizer_args and 'betas' not in optimizer_args:\n # optimizer_args['betas'] = (optimizer_args['beta1'], optimizer_args['beta2'])\n #\n # if checkpoint.get('optimizer_states', None):\n # opt_states = checkpoint['optimizer_states']\n # assert len(opt_states) == 1 # only support one optimizer\n # opt_state = opt_states[0]\n # for param_group in opt_state['param_groups']:\n # for k, v in optimizer_args.items():\n # if k in param_group and param_group[k] != v:\n # if 'lr_schedulers' in checkpoint and checkpoint['lr_schedulers'] and k == 'lr':\n # continue\n # rank_zero_info(f'| Overriding optimizer parameter {k} from checkpoint: {param_group[k]} -> {v}')\n # param_group[k] = v\n # if 'initial_lr' in param_group and param_group['initial_lr'] != optimizer_args['lr']:\n # rank_zero_info(\n # f'| Overriding optimizer parameter initial_lr from checkpoint: {param_group[\"initial_lr\"]} -> {optimizer_args[\"lr\"]}'\n # )\n # param_group['initial_lr'] = optimizer_args['lr']\n #\n # if checkpoint.get('lr_schedulers', None):\n # assert checkpoint.get('optimizer_states', False)\n # assert len(checkpoint['lr_schedulers']) == 1 # only support one scheduler\n # checkpoint['lr_schedulers'][0] = simulate_lr_scheduler(\n # optimizer_args, scheduler_args,\n # step_count=checkpoint['global_step'],\n # num_param_groups=len(checkpoint['optimizer_states'][0]['param_groups'])\n # )\n # for param_group, new_lr in zip(\n # checkpoint['optimizer_states'][0]['param_groups'],\n # checkpoint['lr_schedulers'][0]['_last_lr'],\n # ):\n # if param_group['lr'] != new_lr:\n # rank_zero_info(\n # f'| Overriding optimizer parameter lr from checkpoint: {param_group[\"lr\"]} -> {new_lr}')\n # param_group['lr'] = new_lr" }, { "identifier": "PitchAdjustableMelSpectrogram", "path": "utils/wav2mel.py", "snippet": "class PitchAdjustableMelSpectrogram:\n def __init__(\n self,\n sample_rate=44100,\n n_fft=2048,\n win_length=2048,\n hop_length=512,\n f_min=40,\n f_max=16000,\n n_mels=128,\n center=False,\n ):\n self.sample_rate = sample_rate\n self.n_fft = n_fft\n self.win_size = win_length\n self.hop_length = hop_length\n self.f_min = f_min\n self.f_max = f_max\n self.n_mels = n_mels\n self.center = center\n\n self.mel_basis = {}\n self.hann_window = {}\n\n def __call__(self, y, key_shift=0, speed=1.0):\n factor = 2 ** (key_shift / 12)\n n_fft_new = int(np.round(self.n_fft * factor))\n win_size_new = int(np.round(self.win_size * factor))\n hop_length = int(np.round(self.hop_length * speed))\n\n # if torch.min(y) < -1.0:\n # logger.warning(f\"min value is {torch.min(y)}\")\n # if torch.max(y) > 1.0:\n # logger.warning(f\"max value is {torch.max(y)}\")\n\n mel_basis_key = f\"{self.f_max}_{y.device}\"\n if mel_basis_key not in self.mel_basis:\n mel = librosa_mel_fn(\n sr=self.sample_rate,\n n_fft=self.n_fft,\n n_mels=self.n_mels,\n fmin=self.f_min,\n fmax=self.f_max,\n )\n self.mel_basis[mel_basis_key] = torch.from_numpy(mel).float().to(y.device)\n\n hann_window_key = f\"{key_shift}_{y.device}\"\n if hann_window_key not in self.hann_window:\n self.hann_window[hann_window_key] = torch.hann_window(\n win_size_new, device=y.device\n )\n\n y = torch.nn.functional.pad(\n y.unsqueeze(1),\n (\n int((win_size_new - hop_length) // 2),\n int((win_size_new - hop_length+1) // 2),\n ),\n mode=\"reflect\",\n )\n y = y.squeeze(1)\n\n spec = torch.stft(\n y,\n n_fft_new,\n hop_length=hop_length,\n win_length=win_size_new,\n window=self.hann_window[hann_window_key],\n center=self.center,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n return_complex=True,\n ).abs()\n # spec = torch.view_as_real(spec)\n # spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9))\n\n if key_shift != 0:\n size = self.n_fft // 2 + 1\n resize = spec.size(1)\n if resize < size:\n spec = F.pad(spec, (0, 0, 0, size - resize))\n\n spec = spec[:, :size, :] * self.win_size / win_size_new\n\n spec = torch.matmul(self.mel_basis[mel_basis_key], spec)\n\n return spec\n\n def dynamic_range_compression_torch(self,x, C=1, clip_val=1e-5):\n return torch.log(torch.clamp(x, min=clip_val) * C)" } ]
import logging import os import pathlib import random import sys import lightning.pytorch as pl import matplotlib import numpy as np import torch.utils.data import utils from typing import Dict from lightning.pytorch.utilities.rank_zero import rank_zero_debug, rank_zero_info, rank_zero_only from matplotlib import pyplot as plt from torch import nn from torch.utils.data import Dataset from torchmetrics import Metric, MeanMetric from models.univnet.univnet import UnivNet from modules.loss.univloss import univloss from modules.univ_D.discriminator import MultiPeriodDiscriminator, MultiResSpecDiscriminator from training.base_task_gan import GanBaseTask from utils.wav2mel import PitchAdjustableMelSpectrogram
10,294
def __len__(self): return len(self.data_index) def collater(self, minibatch): samples_per_frame = self.config['hop_size'] if self.infer: crop_mel_frames = 0 else: crop_mel_frames = self.config['crop_mel_frames'] for record in minibatch: # Filter out records that aren't long enough. if len(record['spectrogram']) < crop_mel_frames: del record['spectrogram'] del record['audio'] del record['f0'] continue start = random.randint(0, record['spectrogram'].shape[0] - 1 - crop_mel_frames) end = start + crop_mel_frames if self.infer: record['spectrogram'] = record['spectrogram'].T record['f0'] = record['f0'] else: record['spectrogram'] = record['spectrogram'][start:end].T record['f0'] = record['f0'][start:end] start *= samples_per_frame end *= samples_per_frame if self.infer: cty=(len(record['spectrogram'].T) * samples_per_frame) record['audio'] = record['audio'][:cty] record['audio'] = np.pad(record['audio'], ( 0, (len(record['spectrogram'].T) * samples_per_frame) - len(record['audio'])), mode='constant') pass else: # record['spectrogram'] = record['spectrogram'][start:end].T record['audio'] = record['audio'][start:end] record['audio'] = np.pad(record['audio'], (0, (end - start) - len(record['audio'])), mode='constant') if self.volume_aug: for record in minibatch: if random.random() < self.volume_aug_prob: audio = record['audio'] audio_mel = record['spectrogram'] max_amp = float(np.max(np.abs(audio))) + 1e-5 max_shift = min(3, np.log(1 / max_amp)) log_mel_shift = random.uniform(-3, max_shift) # audio *= (10 ** log_mel_shift) audio *= np.exp(log_mel_shift) audio_mel += log_mel_shift audio_mel = torch.clamp(torch.from_numpy(audio_mel), min=np.log(1e-5)).numpy() record['audio'] = audio record['spectrogram'] = audio_mel audio = np.stack([record['audio'] for record in minibatch if 'audio' in record]) spectrogram = np.stack([record['spectrogram'] for record in minibatch if 'spectrogram' in record]) f0 = np.stack([record['f0'] for record in minibatch if 'f0' in record]) return { 'audio': torch.from_numpy(audio).unsqueeze(1), 'mel': torch.from_numpy(spectrogram), 'f0': torch.from_numpy(f0), } class stftlog: def __init__(self, n_fft=2048, win_length=2048, hop_length=512, center=False,): self.hop_length=hop_length self.win_size=win_length self.n_fft = n_fft self.win_size = win_length self.center = center self.hann_window = {} def exc(self,y): hann_window_key = f"{y.device}" if hann_window_key not in self.hann_window: self.hann_window[hann_window_key] = torch.hann_window( self.win_size, device=y.device ) y = torch.nn.functional.pad( y.unsqueeze(1), ( int((self.win_size - self.hop_length) // 2), int((self.win_size - self.hop_length+1) // 2), ), mode="reflect", ) y = y.squeeze(1) spec = torch.stft( y, self.n_fft, hop_length=self.hop_length, win_length=self.win_size, window=self.hann_window[hann_window_key], center=self.center, pad_mode="reflect", normalized=False, onesided=True, return_complex=True, ).abs() return spec class univnet_task(GanBaseTask): def __init__(self, config): super().__init__(config)
# from models.lvc_ddspgan.lvc_ddspgan import DDSPgan # from models.nsf_HiFigan.models import Generator, AttrDict, MultiScaleDiscriminator, MultiPeriodDiscriminator def spec_to_figure(spec, vmin=None, vmax=None): if isinstance(spec, torch.Tensor): spec = spec.cpu().numpy() fig = plt.figure(figsize=(12, 9),dpi=100) plt.pcolor(spec.T, vmin=vmin, vmax=vmax) plt.tight_layout() return fig class nsf_HiFigan_dataset(Dataset): def __init__(self, config: dict, data_dir, infer=False): super().__init__() self.config = config self.data_dir = data_dir if isinstance(data_dir, pathlib.Path) else pathlib.Path(data_dir) with open(self.data_dir, 'r', encoding='utf8') as f: fills = f.read().strip().split('\n') self.data_index = fills self.infer = infer self.volume_aug = self.config['volume_aug'] self.volume_aug_prob = self.config['volume_aug_prob'] if not infer else 0 def __getitem__(self, index): data_path = self.data_index[index] data = np.load(data_path) return {'f0':data['f0'],'spectrogram':data['mel'],'audio':data['audio']} def __len__(self): return len(self.data_index) def collater(self, minibatch): samples_per_frame = self.config['hop_size'] if self.infer: crop_mel_frames = 0 else: crop_mel_frames = self.config['crop_mel_frames'] for record in minibatch: # Filter out records that aren't long enough. if len(record['spectrogram']) < crop_mel_frames: del record['spectrogram'] del record['audio'] del record['f0'] continue start = random.randint(0, record['spectrogram'].shape[0] - 1 - crop_mel_frames) end = start + crop_mel_frames if self.infer: record['spectrogram'] = record['spectrogram'].T record['f0'] = record['f0'] else: record['spectrogram'] = record['spectrogram'][start:end].T record['f0'] = record['f0'][start:end] start *= samples_per_frame end *= samples_per_frame if self.infer: cty=(len(record['spectrogram'].T) * samples_per_frame) record['audio'] = record['audio'][:cty] record['audio'] = np.pad(record['audio'], ( 0, (len(record['spectrogram'].T) * samples_per_frame) - len(record['audio'])), mode='constant') pass else: # record['spectrogram'] = record['spectrogram'][start:end].T record['audio'] = record['audio'][start:end] record['audio'] = np.pad(record['audio'], (0, (end - start) - len(record['audio'])), mode='constant') if self.volume_aug: for record in minibatch: if random.random() < self.volume_aug_prob: audio = record['audio'] audio_mel = record['spectrogram'] max_amp = float(np.max(np.abs(audio))) + 1e-5 max_shift = min(3, np.log(1 / max_amp)) log_mel_shift = random.uniform(-3, max_shift) # audio *= (10 ** log_mel_shift) audio *= np.exp(log_mel_shift) audio_mel += log_mel_shift audio_mel = torch.clamp(torch.from_numpy(audio_mel), min=np.log(1e-5)).numpy() record['audio'] = audio record['spectrogram'] = audio_mel audio = np.stack([record['audio'] for record in minibatch if 'audio' in record]) spectrogram = np.stack([record['spectrogram'] for record in minibatch if 'spectrogram' in record]) f0 = np.stack([record['f0'] for record in minibatch if 'f0' in record]) return { 'audio': torch.from_numpy(audio).unsqueeze(1), 'mel': torch.from_numpy(spectrogram), 'f0': torch.from_numpy(f0), } class stftlog: def __init__(self, n_fft=2048, win_length=2048, hop_length=512, center=False,): self.hop_length=hop_length self.win_size=win_length self.n_fft = n_fft self.win_size = win_length self.center = center self.hann_window = {} def exc(self,y): hann_window_key = f"{y.device}" if hann_window_key not in self.hann_window: self.hann_window[hann_window_key] = torch.hann_window( self.win_size, device=y.device ) y = torch.nn.functional.pad( y.unsqueeze(1), ( int((self.win_size - self.hop_length) // 2), int((self.win_size - self.hop_length+1) // 2), ), mode="reflect", ) y = y.squeeze(1) spec = torch.stft( y, self.n_fft, hop_length=self.hop_length, win_length=self.win_size, window=self.hann_window[hann_window_key], center=self.center, pad_mode="reflect", normalized=False, onesided=True, return_complex=True, ).abs() return spec class univnet_task(GanBaseTask): def __init__(self, config): super().__init__(config)
self.TF = PitchAdjustableMelSpectrogram( f_min=0,
5
2023-10-17 13:45:09+00:00
12k
OllieBoyne/FOUND
FOUND/utils/eval_utils.py
[ { "identifier": "modified_chamf", "path": "FOUND/utils/pytorch3d.py", "snippet": "def modified_chamf(x,y, x_lengths=None, y_lengths=None,\n x_normals=None, y_normals=None,\n norm: int = 2):\n \"\"\"\n \tA modified version of pytorch3d.loss.chamfer_distance\n \tto allow for no point or batch reduction and some other changes\n \"\"\"\n\n if not ((norm == 1) or (norm == 2)):\n raise ValueError(\"Support for 1 or 2 norm.\")\n\n x, x_lengths, x_normals = _handle_pointcloud_input(x, x_lengths, x_normals)\n y, y_lengths, y_normals = _handle_pointcloud_input(y, y_lengths, y_normals)\n\n return_normals = x_normals is not None and y_normals is not None\n\n N, P1, D = x.shape\n P2 = y.shape[1]\n\n # Check if inputs are heterogeneous and create a lengths mask.\n is_x_heterogeneous = (x_lengths != P1).any()\n is_y_heterogeneous = (y_lengths != P2).any()\n x_mask = (\n torch.arange(P1, device=x.device)[None] >= x_lengths[:, None]\n ) # shape [N, P1]\n y_mask = (\n torch.arange(P2, device=y.device)[None] >= y_lengths[:, None]\n ) # shape [N, P2]\n\n if y.shape[0] != N or y.shape[2] != D:\n raise ValueError(\"y does not have the correct shape.\")\n\n cham_norm_x = x.new_zeros(())\n cham_norm_y = x.new_zeros(())\n\n x_nn = knn_points(x, y, lengths1=x_lengths, lengths2=y_lengths, norm=norm, K=1)\n y_nn = knn_points(y, x, lengths1=y_lengths, lengths2=x_lengths, norm=norm, K=1)\n\n cham_x = x_nn.dists[..., 0] # (N, P1)\n cham_y = y_nn.dists[..., 0] # (N, P2)\n\n if is_x_heterogeneous:\n cham_x[x_mask] = 0.0\n if is_y_heterogeneous:\n cham_y[y_mask] = 0.0\n\n\n # Gather the normals using the indices and keep only value for k=0\n x_normals_near = knn_gather(y_normals, x_nn.idx, y_lengths)[..., 0, :]\n y_normals_near = knn_gather(x_normals, y_nn.idx, x_lengths)[..., 0, :]\n\n cham_norm_x = torch.abs(\n F.cosine_similarity(x_normals, x_normals_near, dim=2, eps=1e-6)\n )\n cham_norm_y = torch.abs(\n F.cosine_similarity(y_normals, y_normals_near, dim=2, eps=1e-6)\n )\n\n return dict(cham_x=cham_x, cham_y=cham_y, cham_norm_x = cham_norm_x, cham_norm_y=cham_norm_y)" }, { "identifier": "modified_sample", "path": "FOUND/utils/pytorch3d.py", "snippet": "def modified_sample(meshes: Meshes, \n num_samples: int = 10000,\n return_normals: bool = False,\n return_textures: bool = False,):\n\n \"\"\"Modified version of pytorch3d.ops.sample_points_from_meshes\n that returns references to the faces sampled from\"\"\"\n\n if meshes.isempty():\n raise ValueError(\"Meshes are empty.\")\n\n verts = meshes.verts_packed()\n if not torch.isfinite(verts).all():\n raise ValueError(\"Meshes contain nan or inf.\")\n\n if return_textures and meshes.textures is None:\n raise ValueError(\"Meshes do not contain textures.\")\n\n faces = meshes.faces_packed()\n mesh_to_face = meshes.mesh_to_faces_packed_first_idx()\n num_meshes = len(meshes)\n num_valid_meshes = torch.sum(meshes.valid) # Non empty meshes.\n\n # Initialize samples tensor with fill value 0 for empty meshes.\n samples = torch.zeros((num_meshes, num_samples, 3), device=meshes.device)\n\n # Only compute samples for non empty meshes\n with torch.no_grad():\n areas, _ = mesh_face_areas_normals(verts, faces) # Face areas can be zero.\n max_faces = meshes.num_faces_per_mesh().max().item()\n areas_padded = packed_to_padded(\n areas, mesh_to_face[meshes.valid], max_faces\n ) # (N, F)\n\n # TODO (gkioxari) Confirm multinomial bug is not present with real data.\n sample_face_idxs = areas_padded.multinomial(\n num_samples, replacement=True\n ) # (N, num_samples)\n sample_face_idxs += mesh_to_face[meshes.valid].view(num_valid_meshes, 1)\n\n # Get the vertex coordinates of the sampled faces.\n face_verts = verts[faces]\n v0, v1, v2 = face_verts[:, 0], face_verts[:, 1], face_verts[:, 2]\n\n # Randomly generate barycentric coords.\n w0, w1, w2 = _rand_barycentric_coords(\n num_valid_meshes, num_samples, verts.dtype, verts.device\n )\n\n # Use the barycentric coords to get a point on each sampled face.\n a = v0[sample_face_idxs] # (N, num_samples, 3)\n b = v1[sample_face_idxs]\n c = v2[sample_face_idxs]\n samples[meshes.valid] = w0[:, :, None] * a + w1[:, :, None] * b + w2[:, :, None] * c\n\n if return_normals:\n # Initialize normals tensor with fill value 0 for empty meshes.\n # Normals for the sampled points are face normals computed from\n # the vertices of the face in which the sampled point lies.\n normals = torch.zeros((num_meshes, num_samples, 3), device=meshes.device)\n vert_normals = (v1 - v0).cross(v2 - v1, dim=1)\n vert_normals = vert_normals / vert_normals.norm(dim=1, p=2, keepdim=True).clamp(\n min=sys.float_info.epsilon\n )\n vert_normals = vert_normals[sample_face_idxs]\n normals[meshes.valid] = vert_normals\n\n if return_textures:\n # fragment data are of shape NxHxWxK. Here H=S, W=1 & K=1.\n pix_to_face = sample_face_idxs.view(len(meshes), num_samples, 1, 1) # NxSx1x1\n bary = torch.stack((w0, w1, w2), dim=2).unsqueeze(2).unsqueeze(2) # NxSx1x1x3\n # zbuf and dists are not used in `sample_textures` so we initialize them with dummy\n dummy = torch.zeros(\n (len(meshes), num_samples, 1, 1), device=meshes.device, dtype=torch.float32\n ) # NxSx1x1\n fragments = MeshFragments(\n pix_to_face=pix_to_face, zbuf=dummy, bary_coords=bary, dists=dummy\n )\n textures = meshes.sample_textures(fragments) # NxSx1x1xC\n textures = textures[:, :, 0, 0, :] # NxSxC\n\n out = {}\n\n out['verts'] = samples\n if return_normals: out['normals'] = normals\n if return_textures: out['textures'] = textures\n\n # return original faces\n out['face_idxs'] = sample_face_idxs\n\n return out" }, { "identifier": "Renderer", "path": "FOUND/utils/renderer.py", "snippet": "class Renderer(nn.Module):\n\n\tdef __init__(self, device='cuda', image_size=(256, 256),\n\t\t\t\t bin_size=None, z_clip_value=None,\n\t\t\t\t max_faces_per_bin=None, cam_params: dict = None,\n\t\t\t\t MAX_BATCH_SIZE=10,\n\t\t\t\t **kwargs):\n\n\t\tsuper().__init__()\n\n\t\tself.MAX_BATCH_SIZE = MAX_BATCH_SIZE\n\n\t\tif isinstance(image_size, int):\n\t\t\timage_size = (image_size, image_size)\n\n\t\tself.image_size = image_size\n\n\t\tself.img_raster_settings = RasterizationSettings(\n\t\t\timage_size=image_size, blur_radius=0.,\n\t\t\tfaces_per_pixel=1, max_faces_per_bin=max_faces_per_bin,\n\t\t\tbin_size=bin_size, z_clip_value=z_clip_value)\n\n\t\t# Rasterization settings for silhouette rendering\n\t\tsigma = 1e-6\n\t\tself.raster_settings_silhouette = RasterizationSettings(\n\t\t\timage_size=image_size,\n\t\t\tblur_radius=np.log(1. / 1e-4 - 1.) * sigma,\n\t\t\tfaces_per_pixel=10, max_faces_per_bin=max_faces_per_bin,\n\t\t\tbin_size=bin_size\n\t\t)\n\n\t\tself.rasterizer = MeshRasterizer(raster_settings=self.img_raster_settings)\n\t\tself.sil_rasterizer = MeshRasterizer(raster_settings=self.raster_settings_silhouette)\n\n\t\t# Shaders\n\t\tself.img_shader = SoftPhongShader(device=device)\n\t\tself.norm_shader = NormalShader()\n\t\tself.sil_shader = SoftSilhouetteShader()\n\n\t\t# default lighting\n\t\tself.lights = AmbientLights(device=device)\n\n\t\tself.camera_params = {}\n\t\tif cam_params is not None:\n\t\t\t# Multiple camera intrinsics not currently supported\n\t\t\tf = torch.tensor([[cam_params['focal_length']]]).to(device) # [N x 1]\n\t\t\tpp = torch.tensor(cam_params['principal_point']).unsqueeze(0).to(device) # [N x 2]\n\t\t\tself.camera_params = dict(focal_length=f, principal_point=pp,\n\t\t\t\t\t\t\t\t\t in_ndc=False, image_size=torch.tensor(image_size).unsqueeze(0).to(device))\n\n\tdef forward(self, meshes, R: torch.Tensor, T: torch.Tensor, keypoints=None,\n\t\t\t\trender_normals=True, render_rgb=True, render_sil=True,\n\t\t\t\tmask_out_faces=None, return_cameras=False, camera_params=None,\n\t\t\t\tnormals_fmt='blender', one_view_per_mesh=False):\n\t\t\"\"\"\n\t\tCan receive various number of 'views' (size of R) and meshes (size of 'meshes')\n\t\tN input views, 1 mesh -> render N views of 1 mesh\n\t\tN input views, N mesh -> render one view per mesh (only if one_view_per_mesh is True)\n\t\tN input views, M mesh -> render N views of M meshes\n\n\t\tRender modes:\n\t\t\t- render_rgb: render RGB image\n\t\t\t- render_normals: render surface normals\n\t\t\t- render_sil: render silhouette\n\t\t\t- keypoints: project 3D keypoints onto image\n\n\t\t:param R: [N x 4 x 4]\n\t\t:param T: [N x 4 x 4]\n\t\t:param keypoints: optional [M x P x 3] keypoints to render\n\t\t:param mask_out_faces: [M x F] faces per mesh to optionally remove from seg & normal\n\t\t:param camera_params: Optional per-camera focal length & principal point\n\t\t:return:\n\n\t\tCurrently does not support M > 1 rendering to M images.\n\t\t\"\"\"\n\n\t\tif camera_params is None:\n\t\t\tcamera_params = self.camera_params\n\n\t\tN = R.shape[0] # number of views\n\t\tM = len(meshes) # number of meshes\n\n\t\tif M > 1 and (N == M):\n\t\t\tassert one_view_per_mesh, \"For N == M, M > 1, requires one_view_per_mesh=True parameter.\"\n\n\t\t\tout_shape_rgb = (N, *self.image_size, 3)\n\t\t\tout_shape_single = (N, *self.image_size)\n\t\t\tbatch_size = N\n\n\t\t# in the case M != N for M > 1, want to render all N views for each mesh\n\t\telif M != N and M > 1:\n\t\t\tmeshes = meshes.extend(N) # produce a mesh for each view\n\t\t\tR = torch.cat([R] * M, dim=0)\n\t\t\tT = torch.cat([T] * M, dim=0) # produce R, T for each mesh\n\n\t\t\tout_shape_rgb = (N, M, *self.image_size, 3)\n\t\t\tout_shape_single = (N, M, *self.image_size)\n\t\t\tbatch_size = N * M\n\n\t\t# in the case M = 1, N >= 1, render N views of 1 mesh\n\t\telse:\n\t\t\tmeshes = meshes.extend(N) # produce a mesh for each view\n\t\t\tout_shape_rgb = (N, *self.image_size, 3)\n\t\t\tout_shape_single = (N, *self.image_size)\n\n\t\tcameras = PerspectiveCameras(device=meshes.device, R=R, T=T, **camera_params)\n\n\t\tout = dict()\n\t\t_frags = None\n\t\tnormals = None\n\t\tif render_rgb or render_normals:\n\t\t\tfragments = self.rasterizer(meshes, cameras=cameras)\n\t\t\t_frags = fragments # Store fragments for mask out faces\n\n\t\t\tif render_rgb:\n\t\t\t\tout['rgb'] = self.img_shader(fragments, meshes, cameras=cameras, lights=self.lights)[..., :3].reshape(\n\t\t\t\t\tout_shape_rgb)\n\n\t\t\tif render_normals:\n\t\t\t\tnormals = self.norm_shader(fragments, meshes, cameras=cameras)\n\n\t\tif render_sil:\n\t\t\tfragments_sil = self.sil_rasterizer(meshes, cameras=cameras)\n\t\t\tif _frags is None: _frags = fragments_sil # Store fragments for mask out faces\n\n\t\t\tsil = self.sil_shader(fragments_sil, meshes, cameras=cameras)\n\t\t\tout['sil'] = sil[..., -1].reshape(out_shape_single) # return just alpha channel (silhouette)\n\n\t\t# Apply face masking of FIND model\n\t\tif (render_rgb or render_sil or render_normals) and mask_out_faces is not None:\n\t\t\t# get foremost face for each pixel in correct format\n\t\t\tpix_to_face = get_padded_pix_to_face(_frags.pix_to_face[..., 0], meshes).reshape(out_shape_single)\n\n\t\t\tfor n in range(N):\n\t\t\t\tmask_pix = torch.isin(pix_to_face[n], mask_out_faces)\n\n\t\t\t\tif render_rgb:\n\t\t\t\t\tout['rgb'][n][mask_pix] = 1. # set pixels to white\n\n\t\t\t\tif render_sil:\n\t\t\t\t\tout['sil'][n, mask_pix] = 0.\n\n\t\t\t\tif render_normals:\n\t\t\t\t\tnormals.mask[n] *= ~mask_pix # does not work for certain batch types\n\n\t\tif render_normals:\n\t\t\t# Also return rgb and xyz of normals\n\t\t\tout['norm_rgb'] = normals.to_rgb(format=normals_fmt, mask_value=.5).reshape(out_shape_rgb)\n\t\t\tout['norm_xyz'] = normals.to_xyz(format=normals_fmt).reshape(out_shape_rgb)\n\n\t\tif keypoints is not None:\n\t\t\tkps_2d = cameras.transform_points_screen(keypoints, image_size=self.image_size)[..., :2]\n\t\t\tout['kps'] = kps_2d\n\n\t\tif return_cameras:\n\t\t\tout['cameras'] = cameras\n\n\t\treturn out" }, { "identifier": "view_from", "path": "FOUND/utils/renderer.py", "snippet": "def view_from(view_kw='topdown', dist=.35):\n\tkws = ['topdown', 'side1', 'side2', 'toes', '45', '60']\n\n\tif isinstance(view_kw, str):\n\t\tview_kw = [view_kw]\n\n\tN = len(view_kw)\n\tR, T = torch.empty((N, 3, 3)), torch.empty((N, 3))\n\tfor n, v in enumerate(view_kw):\n\t\tassert v in kws or isinstance(v, int), f\"View description `{view_kw}` not understood\"\n\n\t\tdist, elev, azim, point = dist, 0, 0, ((0, 0, 0),)\n\t\tif v == 'topdown': elev = 0\n\t\tif v == 'side1': elev = 90\n\t\tif v == 'side2': elev, azim = -90, 180\n\t\tif v == 'toes': point = ((0.1, 0, 0),); dist = 0.1\n\t\tif isinstance(v, int):\n\t\t\telev = v\n\n\t\t_R, _T = look_at_view_transform(dist=dist, elev=elev, azim=azim, up=((1, 0, 0),), at=point)\n\n\t\tR[n] = _R\n\t\tT[n] = _T\n\n\treturn R, T" }, { "identifier": "produce_grid", "path": "FOUND/utils/vis.py", "snippet": "def produce_grid(entries):\n\t\"\"\"Receives list of lists, containing several possible data types. Converts them all to the correct RGB uint8 format, combines into a single image, and returns.\n\n\tAccepted formats:\n\tTensor, any device, >= 2 dims (will take first element in all above last 3), >= 3 channels (will take first 3) OR 1 channel (segmentation)\n\tnp.ndarray (same rules as tensor)\n\tNone - fill with blank\n\n\tPads all rows with black images if not enough elements\n\t\"\"\"\n\n\tif not isinstance(entries[0], list):\n\t\tentries = [entries] # convert to 2D list of lists\n\n\tM = max(map(len, entries))\n\n\tH, W = None, None\n\n\trows = []\n\tfor j, raw_row in enumerate(entries):\n\t\trow = []\n\t\tfor i, entry in enumerate(raw_row):\n\t\t\tif entry is None:\n\t\t\t\tentry = np.zeros((H, W, 3), dtype=np.uint8)\n\n\t\t\tentry = tens2rgb(entry)\n\n\t\t\tassert entry.ndim >= 2, f\"Arrays for grid must have >= 2 dimensions. Entry ({i}, {j}) has shape {entry.shape}.\"\n\t\t\tentry = reduce_ax(entry, 3) # reduce dimensions to just get a single image\n\n\t\t\t# handle segmentations\n\t\t\tif entry.shape[-1] > 4: # if last axis is clearly a width/height axis\n\t\t\t\tentry = seg_to_rgb(reduce_ax(entry, 2))\n\n\t\t\tentry = entry[..., :3] # only take first 3 channels\n\n\t\t\tif i == j == 0:\n\t\t\t\tH, W, _ = entry.shape\n\n\t\t\tentry = entry.astype(np.uint8)\n\t\t\trow.append(entry)\n\n\t\tfor i in range(M - len(raw_row)):\n\t\t\trow.append(np.zeros((H, W, 3), dtype=np.uint8)) # pad each row with black images if not enough items\n\n\t\t# stack the row images together\n\t\ttry:\n\t\t\trows.append(np.hstack(row))\n\t\texcept:\n\t\t\traise ValueError(\n\t\t\t\tf\"Could not combine row {j}, of raw shapes: {[x.shape for x in raw_row]}. Attempted conversion to shapes: {[x.shape for x in row]}\")\n\n\treturn np.vstack(rows)" }, { "identifier": "put_text", "path": "FOUND/utils/vis.py", "snippet": "def put_text(img, string, x, y, width, height, backg=(0,0,0), scale=1, vertical=False):\n\t\"\"\"Place text on an image, with top left corner (x,y), and a given width height.\n\tWhite text, black background fixed.\n\tVertical flag used to rotate 90 degrees anticlockwise\"\"\"\n\n\tout = img.copy()\n\tout[y:y+height, x:x+width] = get_text(string.split('\\n'), width, height, scale=scale, backg=backg, vertical=vertical)\n\treturn out" }, { "identifier": "colourbar", "path": "FOUND/utils/vis.py", "snippet": "def colourbar(width, height, colours, points=(0, 1), orientation='vertical'):\n\t\"\"\"Produce a colour bar of size width x height.\n\tAt each point in `points`, the colour at point along the horizontal/vertical (depending on `orientation`)\n\tmust be the corresponding colour in `colour`. Between points, linearly interpolate.\"\"\"\n\n\tassert len(colours) == len(points), \"Colours to points must be 1-1 correspondence for colourbar\"\n\tcolours = np.array(colours)\n\n\timg = np.zeros((height, width, 3))\n\tfor (c0, p0, c1, p1) in zip(colours, points, colours[1:], points[1:]):\n\t\tif orientation == 'vertical':\n\t\t\tv0, v1 = int(p0*height), int(p1*height)\n\t\t\timg[v0: v1] = c0[None, None, :] + np.linspace(0, 1, v1-v0)[:, None, None] * (c1 - c0)[None, None, :]\n\n\t\telse:\n\t\t\th0, h1 = int(p0 * width), int(p1 * width)\n\t\t\timg[:, h0:h1] = c0 + np.linspace(0, 1, h1 - h0) * (c1 - c0)\n\n\treturn img.astype(np.uint8)" } ]
from pytorch3d.renderer import TexturesVertex from pytorch3d.structures import Meshes from multiprocessing import Process from prettytable import PrettyTable from .pytorch3d import modified_chamf, modified_sample from .renderer import Renderer, view_from from .vis import produce_grid, put_text, colourbar from matplotlib import pyplot as plt import os import trimesh import cv2 import multiprocessing as mp import torch import torch.nn.functional as F import numpy as np import json
8,026
sole_vert_positions = pred_mesh_trimesh.vertices[FIND_sole_verts] # save sole vertex positions to refind them after mesh pre-processing pred_mesh_trimesh.update_faces(~np.isin(np.arange(pred_mesh_trimesh.faces.shape[0]), FIND_cutoff_surface)) pred_mesh_trimesh = cutoff_slice_FIND(pred_mesh_trimesh) # define a mask # want to be able to define a mask on the FIND model, so that errors of verts in this mask aren't considered real -> pred, but are considered in reverse # (for sole verts, unfair to count the error on them, but likewise incorrect to just remove them all, especially at the boundary) # recalculate sole vertices FIND_sole_vert_idxs = np.argwhere(np.all(pred_mesh_trimesh.vertices[:, None, :] == sole_vert_positions[None, ...], axis=-1))[:, 0] FIND_sole_vertex_mask = np.isin(np.arange(pred_mesh_trimesh.vertices.shape[0]), FIND_sole_vert_idxs) # mask of which vertices correspond to the sole FIND_sole_faces_mask = np.any(FIND_sole_vertex_mask[pred_mesh_trimesh.faces], axis=-1) # mask of which faces are in sole else: pred_mesh_trimesh = cutoff_slice_FIND(pred_mesh_trimesh) # Convert to PyTorch3D p3d_from_trimesh = lambda mesh: Meshes(verts=torch.from_numpy(np.asarray(mesh.vertices)[None, ...]).float(), faces=torch.from_numpy(np.asarray(mesh.faces)[None, ...])).to(device) gt_mesh = p3d_from_trimesh(gt_mesh_trimesh) pred_mesh = p3d_from_trimesh(pred_mesh_trimesh) # Sample vertices uniformly from mesh, returning vertex position, normal, and original face/vert idxs gt_sample_dict = modified_sample(gt_mesh, num_samples=10_000, return_normals=True) pred_sample_dict = modified_sample(pred_mesh, num_samples=10_000, return_normals=True) # Calculate errors for reporting - by considering samples over the surface errs = modified_chamf(pred_sample_dict['verts'], gt_sample_dict['verts'], x_normals=pred_sample_dict['normals'], y_normals=gt_sample_dict['normals']) # Calculate errors for visualisation - by considering every vertex vis_errs = modified_chamf(pred_mesh.verts_padded(), gt_mesh.verts_padded(), x_normals=pred_mesh.verts_normals_padded(), y_normals=gt_mesh.verts_normals_padded()) # convert from cosine similarity to error in degrees errs['cham_norm_x'] = torch.rad2deg(torch.acos(errs['cham_norm_x'])) errs['cham_norm_y'] = torch.rad2deg(torch.acos(errs['cham_norm_y'])) vis_errs['cham_norm_x'] = torch.rad2deg(torch.acos(vis_errs['cham_norm_x'])) vis_errs['cham_norm_y'] = torch.rad2deg(torch.acos(vis_errs['cham_norm_y'])) if settings.get('model', 'FIND') == 'FIND': # apply masking here to not include errors for sole in pred -> real # errs has a sample of the vertices in, need to do correct indexing sampled_vertex_mask = FIND_sole_faces_mask[pred_sample_dict['face_idxs'].cpu().detach().numpy()[0]] errs['cham_x'][:, sampled_vertex_mask] = np.nan errs['cham_norm_x'][:, sampled_vertex_mask] = np.nan # vis_errs has all vertices in mesh in vis_errs['cham_x'][:, FIND_sole_vertex_mask] = np.nan vis_errs['cham_norm_x'][:, FIND_sole_vertex_mask] = np.nan # visualisation info for each metric of error vis_params = { 'cham': dict(vmin=0, vmax=1e-4, mag=1_000_000, units='um', cutoffs=np.array([5, 10, 15, 20, 25])*1e-6, xscale='log'), 'cham_norm': dict(vmin=0, vmax=60, mag=1, units='deg', cutoffs=[5, 7.5, 11.25, 22.5, 30], xscale='lin') } # define axes fig, axs = plt.subplots(nrows=2, ncols=2, sharex='col') axs[0, 0].set_title('Chamfer Error') axs[0, 1].set_title('Normal Error') axs[0, 0].set_ylabel('pred2real') axs[1, 0].set_ylabel('real2pred') axs[1, 0].set_xlabel('um') axs[1, 1].set_xlabel('Degrees') axs[1,1].set_xlim(0, 90) axs[1, 1].set_yticks([0, 30, 60, 90]) with Reporter(os.path.join(eval_dir, 'report.txt')) as report: report(f"Experiment: {exp_dir}") i = 0 for L in ['cham', 'cham_norm']: report(L) table = PrettyTable() cutoffs = vis_params[L]['cutoffs'] mag = vis_params[L]['mag'] table.field_names = ['Desc', 'Mean', 'Median', 'RMSE'] + [f'% < {round(x*mag)}' for x in cutoffs] for desc, x in zip(['pred2real', 'real2pred'], ['x', 'y']): e = errs[f'{L}_{x}'].cpu().detach().numpy() e = e[~np.isnan(e)] # filter out nan values metrics = eval_metrics(e, cutoffs=cutoffs) table.add_row([desc] + [f'{metrics[k] * mag:.2f}' for k in ['mean', 'median', 'rmse']] + [f'{i * 100:.1f}' for i in metrics['cutoffs']] ) # plot distribution of errors ax = axs[i%2, i//2] if vis_params[L]['xscale'] == 'log': ax.hist(**get_loghist(np.ravel(e)*mag, 100), density=True) ax.set_xscale('log') else: ax.hist(np.ravel(e) * mag, bins=100, density=True) i+=1 results[f'{L}_{desc}'] = {**{k: metrics[k] * mag for k in ['mean', 'median', 'rmse']}, **{f'% < {round(c*mag)}': i * 100 for c, i in zip(cutoffs, metrics['cutoffs'])}} report(table.get_string()) report("") plt.savefig(os.path.join(eval_dir, 'err_dist.png')) plt.close() # Set up rendering if render: renderer: Renderer = Renderer(image_size=256, max_faces_per_bin=100_000, device=device)
"""Evaluate the performance of a fitted mesh""" device = 'cuda' def eval_metrics(arr, cutoffs=[5, 7.5, 11.25, 22.5, 30]): """Given a 1d array, return mean, median, rmse, and % of values less than each in `cutoffs`""" assert arr.ndim == 1, "eval_metrics requires 1D array" out = dict(mean = arr.mean(), median = np.median(arr), rmse = (arr ** 2).mean() **.5, cutoffs = [(arr < i).mean() for i in cutoffs]) return out def err_to_colour(err: torch.Tensor, vmin:float=None, vmax:float=None, colmin=(0, 1, 0), colmax=(1, 0, 0), nan_colour=(0.3, 0.3, 0.3)): """Convert a tensor of errors (...) to an RGB colour scale (..., 3). Linearly interpolate so that err of vmin -> colmin, err of vmax -> colmax if vmin and vmax not given, take min and max of err If any nan's given, set their colour to nan_colour """ ndim = err.ndim colmin = torch.tensor(colmin)[(None,)*ndim].to(err.device) # expand colmin to [..., 3] colmax = torch.tensor(colmax)[(None,)*ndim].to(err.device) colnan = torch.tensor(nan_colour)[(None,)*ndim].to(err.device) vmin = err.nanmin() if vmin is None else vmin vmax = err.nanmax() if vmax is None else vmax fracs = (err - vmin) / (vmax - vmin) rgba = (colmin + fracs.unsqueeze(-1) * (colmax - colmin)).to(err.device) rgba = torch.clip(rgba, min=0, max=1) rgba[torch.any(torch.isnan(rgba), dim=-1)] = colnan return rgba class Reporter: """Receive statements, on exit print all and save all to file""" def __init__(self, out_file_loc): self.lines = [] self.out_file_loc = out_file_loc def __call__(self, line): self.lines.append(line) def __enter__(self, *args): return self def __exit__(self, *args): [*map(print, self.lines)] with open(self.out_file_loc, 'w') as outfile: outfile.writelines([s + '\n' for s in self.lines]) def get_max_fit(exp_dir): """Search in an experiment directory for the fit_xx.obj with the highest value""" f = lambda s: -1 if 'fit_' not in s else int(s.split('fit_')[1].split('.obj')[0]) return max(os.listdir(exp_dir), key=f) def cutoff_slice_FIND(mesh, max_heel_height = 0.04, cutoff_height = 0.1): """Similar mesh slicing method to FIND: identify heel keypoint, slice off 1cm above""" X, Y, Z = mesh.vertices.T Xma = np.ma.array(X, mask= Z >= max_heel_height) heel_idx = np.ma.argmin(Xma) slice_height = min(Z[heel_idx] + cutoff_height, Z.max() - 5e-3) return mesh.slice_plane([0, 0, slice_height], [0, 0, -1], cap=False) def get_loghist(x, nbins): hist, bins = np.histogram(x, bins=nbins) logbins = np.logspace(np.log10(bins[0]),np.log10(bins[-1]),len(bins)) return dict(x=x, bins=logbins) def eval_exp(exp_dir, render=True): results = {} # return results as errors if not any('fit_' in f for f in os.listdir(exp_dir)): print(f"No fits for {exp_dir}, skipping...") return pred_obj_loc = os.path.join(exp_dir, get_max_fit(exp_dir)) # load settings to get folder opts_loc = os.path.join(exp_dir, 'opts.json') if not os.path.isfile(opts_loc): print(f"No opts for {exp_dir}, skipping...") return with open(opts_loc) as infile: settings = json.load(infile) # assume GT OBJ loc is # (1) saved in <data_folder>/mesh.obj if <data_folder> given if 'data_folder' in settings: gt_obj_loc = os.path.join(settings['data_folder'], 'mesh.obj') # (2) saved in <exp_dir>/gt_mesh.obj otherwise else: gt_obj_loc = os.path.join(exp_dir, 'gt_mesh.obj') eval_dir = os.path.join(exp_dir, 'eval') os.makedirs(eval_dir, exist_ok=True) with open(gt_obj_loc) as infile: d = trimesh.exchange.obj.load_obj(infile, process=False) gt_mesh_trimesh = trimesh.Trimesh(**d) with open(pred_obj_loc) as infile: d = trimesh.exchange.obj.load_obj(infile, process=False) pred_mesh_trimesh = trimesh.Trimesh(**d) # pre-process meshes, w/ cutoff # Same method as used for Foot3D here for slicing GT gt_mesh_trimesh = cutoff_slice_FIND(gt_mesh_trimesh) if settings.get('model', 'FIND') == 'FIND': # slice FIND faces FIND_cutoff_surface = np.load(os.path.join(settings['find_pth'], 'templ_masked_faces.npy')) FIND_sole_faces = np.load(os.path.join(settings['find_pth'], 'templ_sole_faces.npy')) FIND_sole_verts = np.unique(np.ravel(pred_mesh_trimesh.faces[FIND_sole_faces])) # all vertices considered part of the sole sole_vert_positions = pred_mesh_trimesh.vertices[FIND_sole_verts] # save sole vertex positions to refind them after mesh pre-processing pred_mesh_trimesh.update_faces(~np.isin(np.arange(pred_mesh_trimesh.faces.shape[0]), FIND_cutoff_surface)) pred_mesh_trimesh = cutoff_slice_FIND(pred_mesh_trimesh) # define a mask # want to be able to define a mask on the FIND model, so that errors of verts in this mask aren't considered real -> pred, but are considered in reverse # (for sole verts, unfair to count the error on them, but likewise incorrect to just remove them all, especially at the boundary) # recalculate sole vertices FIND_sole_vert_idxs = np.argwhere(np.all(pred_mesh_trimesh.vertices[:, None, :] == sole_vert_positions[None, ...], axis=-1))[:, 0] FIND_sole_vertex_mask = np.isin(np.arange(pred_mesh_trimesh.vertices.shape[0]), FIND_sole_vert_idxs) # mask of which vertices correspond to the sole FIND_sole_faces_mask = np.any(FIND_sole_vertex_mask[pred_mesh_trimesh.faces], axis=-1) # mask of which faces are in sole else: pred_mesh_trimesh = cutoff_slice_FIND(pred_mesh_trimesh) # Convert to PyTorch3D p3d_from_trimesh = lambda mesh: Meshes(verts=torch.from_numpy(np.asarray(mesh.vertices)[None, ...]).float(), faces=torch.from_numpy(np.asarray(mesh.faces)[None, ...])).to(device) gt_mesh = p3d_from_trimesh(gt_mesh_trimesh) pred_mesh = p3d_from_trimesh(pred_mesh_trimesh) # Sample vertices uniformly from mesh, returning vertex position, normal, and original face/vert idxs gt_sample_dict = modified_sample(gt_mesh, num_samples=10_000, return_normals=True) pred_sample_dict = modified_sample(pred_mesh, num_samples=10_000, return_normals=True) # Calculate errors for reporting - by considering samples over the surface errs = modified_chamf(pred_sample_dict['verts'], gt_sample_dict['verts'], x_normals=pred_sample_dict['normals'], y_normals=gt_sample_dict['normals']) # Calculate errors for visualisation - by considering every vertex vis_errs = modified_chamf(pred_mesh.verts_padded(), gt_mesh.verts_padded(), x_normals=pred_mesh.verts_normals_padded(), y_normals=gt_mesh.verts_normals_padded()) # convert from cosine similarity to error in degrees errs['cham_norm_x'] = torch.rad2deg(torch.acos(errs['cham_norm_x'])) errs['cham_norm_y'] = torch.rad2deg(torch.acos(errs['cham_norm_y'])) vis_errs['cham_norm_x'] = torch.rad2deg(torch.acos(vis_errs['cham_norm_x'])) vis_errs['cham_norm_y'] = torch.rad2deg(torch.acos(vis_errs['cham_norm_y'])) if settings.get('model', 'FIND') == 'FIND': # apply masking here to not include errors for sole in pred -> real # errs has a sample of the vertices in, need to do correct indexing sampled_vertex_mask = FIND_sole_faces_mask[pred_sample_dict['face_idxs'].cpu().detach().numpy()[0]] errs['cham_x'][:, sampled_vertex_mask] = np.nan errs['cham_norm_x'][:, sampled_vertex_mask] = np.nan # vis_errs has all vertices in mesh in vis_errs['cham_x'][:, FIND_sole_vertex_mask] = np.nan vis_errs['cham_norm_x'][:, FIND_sole_vertex_mask] = np.nan # visualisation info for each metric of error vis_params = { 'cham': dict(vmin=0, vmax=1e-4, mag=1_000_000, units='um', cutoffs=np.array([5, 10, 15, 20, 25])*1e-6, xscale='log'), 'cham_norm': dict(vmin=0, vmax=60, mag=1, units='deg', cutoffs=[5, 7.5, 11.25, 22.5, 30], xscale='lin') } # define axes fig, axs = plt.subplots(nrows=2, ncols=2, sharex='col') axs[0, 0].set_title('Chamfer Error') axs[0, 1].set_title('Normal Error') axs[0, 0].set_ylabel('pred2real') axs[1, 0].set_ylabel('real2pred') axs[1, 0].set_xlabel('um') axs[1, 1].set_xlabel('Degrees') axs[1,1].set_xlim(0, 90) axs[1, 1].set_yticks([0, 30, 60, 90]) with Reporter(os.path.join(eval_dir, 'report.txt')) as report: report(f"Experiment: {exp_dir}") i = 0 for L in ['cham', 'cham_norm']: report(L) table = PrettyTable() cutoffs = vis_params[L]['cutoffs'] mag = vis_params[L]['mag'] table.field_names = ['Desc', 'Mean', 'Median', 'RMSE'] + [f'% < {round(x*mag)}' for x in cutoffs] for desc, x in zip(['pred2real', 'real2pred'], ['x', 'y']): e = errs[f'{L}_{x}'].cpu().detach().numpy() e = e[~np.isnan(e)] # filter out nan values metrics = eval_metrics(e, cutoffs=cutoffs) table.add_row([desc] + [f'{metrics[k] * mag:.2f}' for k in ['mean', 'median', 'rmse']] + [f'{i * 100:.1f}' for i in metrics['cutoffs']] ) # plot distribution of errors ax = axs[i%2, i//2] if vis_params[L]['xscale'] == 'log': ax.hist(**get_loghist(np.ravel(e)*mag, 100), density=True) ax.set_xscale('log') else: ax.hist(np.ravel(e) * mag, bins=100, density=True) i+=1 results[f'{L}_{desc}'] = {**{k: metrics[k] * mag for k in ['mean', 'median', 'rmse']}, **{f'% < {round(c*mag)}': i * 100 for c, i in zip(cutoffs, metrics['cutoffs'])}} report(table.get_string()) report("") plt.savefig(os.path.join(eval_dir, 'err_dist.png')) plt.close() # Set up rendering if render: renderer: Renderer = Renderer(image_size=256, max_faces_per_bin=100_000, device=device)
R, T = view_from(['side1', 'topdown', 'side2'])
3
2023-10-24 11:46:42+00:00
12k
RobertCsordas/moe
tasks/simple/language_model/enwik8_transformer.py
[ { "identifier": "TransformerLanguageModel", "path": "models/transformer_language_model.py", "snippet": "class TransformerLanguageModel(LoggingLayer, torch.nn.Module):\n def __init__(self, voc_size: int, embedding_size: Optional[int], state_size: int, dropout: float,\n tied_embedding: bool, layers: List[torch.nn.Module], n_prev_states: int,\n n_prev_states_test: Optional[int] = None, adaptive_cutoffs: List[int] = [],\n same_length_eval: bool = True, norm_before_output: bool = False,\n p_drop_layer: float = 0.0, use_last_state: bool = False, same_length: bool = False,\n output_mode: str = \"normal\"):\n\n super().__init__()\n\n self.embedding = torch.nn.Embedding(voc_size, embedding_size or state_size)\n # with torch.no_grad():\n # self.embedding.weight.uniform_(-0.1, 0.1)\n\n torch.nn.init.xavier_uniform_(self.embedding.weight)\n\n self.shared_layers = all([la is layers[0] for la in layers])\n\n if embedding_size is None:\n self.embedding_adapter = lambda x: x\n else:\n self.embedding_adapter = torch.nn.Linear(embedding_size, state_size)\n\n self.dropout = torch.nn.Dropout(dropout)\n self.layers = torch.nn.ModuleList(layers)\n self.output_adapter = lambda x: x\n self.n_prev_states = n_prev_states\n self.n_prev_states_test = n_prev_states_test or n_prev_states\n self.same_length_eval = same_length_eval\n self.embedding_scale = math.sqrt(state_size)\n self.p_drop_layer = p_drop_layer\n self.use_last_state = use_last_state\n self.same_length = same_length\n self.iter = 0\n self.output_mode = output_mode\n\n assert self.output_mode in {\"normal\", \"sum\", \"geometric\", \"sigmoid\"}\n\n if self.output_mode in {\"geometric\", \"sigmoid\"}:\n self.output_gate = torch.nn.Linear(state_size, 1)\n\n self.adaptive = bool(adaptive_cutoffs)\n\n out_proj_size = (embedding_size or state_size) if tied_embedding else state_size\n if self.adaptive:\n self.output = framework.layers.CustomAdaptiveLogSoftmaxWithLoss(\n out_proj_size, voc_size, adaptive_cutoffs, div_value=1,\n tied_to=self.embedding if tied_embedding else None)\n else:\n self.output = torch.nn.Linear(out_proj_size, voc_size)\n\n if norm_before_output or self.output_mode in {\"sum\", \"sigmoid\"}:\n self.out_norm = torch.nn.LayerNorm(state_size)\n else:\n self.out_norm = lambda x: x\n\n if tied_embedding:\n if not self.adaptive:\n self.output.weight = self.embedding.weight\n if embedding_size is not None:\n self.output_adapter = torch.nn.Linear(state_size, embedding_size)\n\n @staticmethod\n def generate_history_mask(sz: int, device: torch.device) -> torch.Tensor:\n return torch.tril(torch.ones(sz, sz, dtype=torch.bool, device=device), diagonal=-1)\n\n def gen_output(self, x: torch.Tensor, target: Optional[torch.Tensor]) -> torch.Tensor:\n net = self.out_norm(x)\n net = self.output_adapter(net)\n net = self.dropout(net)\n\n if self.adaptive:\n net = self.output(net.transpose(0, 1), target)\n else:\n net = self.output(net.transpose(0, 1))\n\n return net\n\n def accumulate_output(self, features: List[torch.Tensor]) -> torch.Tensor:\n if self.output_mode == \"sum\":\n return sum(features)\n elif self.output_mode in {\"geometric\", \"sigmoid\"}:\n # Must cast it to float16, otherwise pytorch will crash after a few hundred iterations with an\n # incomprehensible error in the gradient scaler\n gates = torch.sigmoid(torch.cat([self.output_gate(f).float() for f in features], -1))\n if self.output_mode == \"geometric\":\n ngates = torch.cumprod(1.0 - gates, -1)\n scores = torch.cat([gates[..., 0:1], gates[..., 1:] * ngates[..., :-1]], -1)\n else:\n scores = gates\n\n if self.iter % 100 == 0 and self.training:\n self.log(\"output_gate_mean\", framework.visualize.plot.Barplot(scores.flatten(end_dim=-2).mean(0)))\n # return sum(f * scores[..., i: i+1] for i, f in enumerate(features))\n f = scores.unsqueeze(-2) @ torch.stack(features, -2)\n return f.squeeze(-2)\n else:\n assert False, \"Invalid output mode\"\n\n def forward(self, x: torch.Tensor, target: Optional[torch.Tensor], state) -> Tuple[torch.Tensor, Any]:\n causality_mask = Transformer.generate_square_subsequent_mask(x.shape[0], x.device)\n\n net = self.dropout(self.embedding(x.T.long()))\n net = self.embedding_adapter(net)\n net = net * self.embedding_scale\n\n new_state = []\n features = [net]\n\n n_prev_states = self.n_prev_states if self.training else self.n_prev_states_test\n\n same_length = self.same_length or ((not self.training) and self.same_length_eval)\n if same_length and state is not None:\n causality_mask = [self.generate_history_mask(x.shape[0], x.device)] + \\\n [torch.zeros_like(causality_mask)] * (len(state[0]) - 1) + [causality_mask]\n causality_mask = torch.cat(causality_mask, -1)\n\n plot_cossim = (self.iter % 100 == 0 and self.training)\n for li, l in enumerate(self.layers):\n if n_prev_states > 0:\n if li == 0:\n # Pos offset should be constant for all layers\n pos_offset = sum(s.shape[1] for s in state[0]) if state is not None else 0\n\n # Concatenate the new state with the previous states\n li_r = 0 if self.use_last_state else li\n s = (state[li_r] + [net]) if state is not None else [net]\n attend_to = torch.cat(s, 1)\n\n if not self.use_last_state:\n s[-1] = s[-1].detach()\n new_state.append(s[-n_prev_states:])\n else:\n pos_offset = None\n attend_to = None\n\n net_o = l(net, mask=AttentionMask(None, causality_mask), attend_to=attend_to,\n pos_offset=pos_offset)\n\n if plot_cossim or self.output_mode != \"normal\":\n features.append(net_o)\n\n with torch.no_grad():\n ndiff = torch.norm(net_o - net, p=2, dim=-1)\n n_in = torch.norm(net, p=2, dim=-1)\n self.log(f\"activation_norm/abs_update_layer_{li}\", ndiff.mean())\n self.log(f\"activation_norm/in_layer_{li}\", n_in.mean())\n self.log(f\"activation_norm/rel_update_layer_{li}\", (ndiff/n_in.clamp(min=torch.finfo(n_in.dtype).eps)).mean())\n\n if self.training and self.p_drop_layer > 0.0:\n net = torch.where(torch.rand_like(net_o[..., 0:1]) < self.p_drop_layer, net, net_o)\n else:\n net = net_o\n\n if self.use_last_state and n_prev_states > 0:\n # If we carry over the last state, save it here\n new_state = [((state[0] if state is not None else []) + [net.detach()])[-n_prev_states:]]\n\n if self.output_mode != \"normal\":\n net = self.accumulate_output(features)\n\n if plot_cossim:\n with torch.no_grad():\n f_sample = [f.view(-1, f.shape[-1])[:1024] for f in features]\n f_sample_all = torch.stack(f_sample, -2)\n scores = framework.utils.cossim(f_sample_all, f_sample_all).mean(0)\n self.log(\"feature_cossim\", framework.visualize.plot.Heatmap(scores, range=(0, 1), textval=False))\n\n if self.output_mode != \"normal\":\n f_sample = [self.accumulate_output(f_sample[:i]) for i in range(1, len(f_sample)+1)]\n f_sample_all = torch.stack(f_sample, -2)\n\n outs = F.softmax(self.gen_output(f_sample_all, target).transpose(0, 1), -1)\n scores = framework.utils.cossim(outs, outs).mean(0)\n self.log(\"out_dist_cossim\", framework.visualize.plot.Heatmap(scores, range=(0, 1), textval=False))\n\n real_out = outs[:, -1]\n for i in range(outs.shape[-2] - 1):\n self.log(f\"out_diff_{i}\", (outs[:, i] - real_out).norm(dim=-1, p=1).mean())\n\n del outs\n del features\n\n net = self.gen_output(net, target)\n self.iter += 1\n\n return net, new_state" }, { "identifier": "task", "path": "tasks/task_db.py", "snippet": "def task(name: Optional[str] = None):\n def wrapper(cls):\n n = TASK_PREFIX + (name or camel_to_snake(cls.__name__))\n assert n not in TASKS, f\"Task {n} already exists\"\n TASKS[n] = cls\n return cls\n return wrapper" }, { "identifier": "args", "path": "tasks/task_db.py", "snippet": "def args(fn):\n global ARGS_REGISTERS\n ARGS_REGISTERS.append(fn)\n return fn" }, { "identifier": "TransformerLMMixin", "path": "tasks/simple/language_model/transformer_lm_mixin.py", "snippet": "class TransformerLMMixin:\n helper: framework.helpers.TrainingHelper\n\n def is_preln(self) -> bool:\n return \"preln\" in self.helper.args.transformer.variant\n\n def topk_activation(self, x: torch.Tensor) -> torch.Tensor:\n nx = -x\n return torch.masked_fill(x, nx <= nx.kthvalue(self.helper.args.transformer.topk_value, keepdim=True)[0], 0)\n\n def get_layers(self) -> List[torch.nn.Module]:\n # pyright: reportOptionalMemberAccess=false\n if self.helper.args.transformer.activation == \"relu\":\n activation = F.relu\n elif self.helper.args.transformer.activation == \"topk\":\n activation = self.topk_activation\n elif self.helper.args.transformer.activation == \"identity\":\n activation = lambda x: x\n elif self.helper.args.transformer.activation == \"sigmoid\":\n activation = torch.sigmoid\n elif self.helper.args.transformer.activation == \"gelu\":\n activation = F.gelu\n elif self.helper.args.transformer.activation == \"softmax\":\n activation = lambda x: F.softmax(x, dim=-1)\n else:\n raise ValueError(f\"Invalid activation: {self.helper.args.transformer.activation}\")\n\n base_args = dict(\n d_model=self.helper.args.state_size,\n nhead=self.helper.args.transformer.n_heads,\n dim_feedforward=int(self.helper.args.state_size * self.helper.args.transformer.ff_multiplier),\n dropout=self.helper.args.dropout,\n activation=activation\n )\n\n\n extra_args = {} if not self.helper.args.transformer.variant.endswith(\"_gelu\") else {\n \"activation\": F.gelu,\n \"drop_expand\": False\n }\n\n\n if self.helper.args.transformer.variant in {\"preln_relative\"}:\n mklayer = lambda: PrelnRelativeTransformerEncoderLayer(\n **base_args, **extra_args, test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp,\n n_layers=self.helper.args.transformer.encoder_n_layers,\n head_projection_size=self.helper.args.transformer.head_projection_size,)\n elif self.helper.args.transformer.variant in {\"preln_topk\"}:\n mklayer = lambda: TopkTransformer(\n **base_args, **extra_args, test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp,\n n_layers=self.helper.args.transformer.encoder_n_layers, k=self.helper.args.transformer.topk_value,\n use_norm=self.helper.args.transformer.topk_use_norm,\n head_projection_size=self.helper.args.transformer.head_projection_size,)\n elif self.helper.args.transformer.variant in {\"preln_kvmem\"}:\n mklayer = lambda: PrelnRelativeKVMemTransformerEncoderLayer(\n **base_args, **extra_args, test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp,\n n_layers=self.helper.args.transformer.encoder_n_layers, n_keys=self.helper.args.pkm.n_keys,\n pkm_stochastic=self.helper.args.pkm.stochastic, pkm_heads=self.helper.args.pkm.n_heads,\n pkm_custom_init=self.helper.args.pkm.custom_init, pkm_slice_values=self.helper.args.pkm.slice_values,\n pkm_knn=self.helper.args.pkm.knn, linproj=self.helper.args.kvmem.linproj,\n head_merge_topk=self.helper.args.kvmem.head_merge_topk,\n load_balance=self.helper.args.kvmem.load_balance, kvmem_dropout=self.helper.args.kvmem.dropout,\n kvmem_randomize_indices=self.helper.args.kvmem.randomize_indices,\n kvmem_query_bias=self.helper.args.kvmem.query_bias,\n standard_parallel=self.helper.args.kvmem.standard_parallel,\n approx_topk=self.helper.args.kvmem.approx_topk,\n factorize=self.helper.args.kvmem.factorize,\n full_key=self.helper.args.kvmem.full_key,\n key_redundancy_factor=self.helper.args.kvmem.key_redundancy_factor,\n two_stage=self.helper.args.kvmem.two_stage,\n head_exclusive=self.helper.args.kvmem.head_exclusive,\n head_projection_size=self.helper.args.transformer.head_projection_size,)\n elif self.helper.args.transformer.variant in {\"preln_moe\", \"preln_moe_universal\", \"moe\", \"moe_universal\"}:\n # def __init__(self, d_model, nhead, n_bins: int, bin_size: int, n_layers: int, dim_feedforward=2048,\n mklayer = lambda: RelativeMoeTransformerEncoderLayer(\n **base_args, **extra_args, preln=self.is_preln(),\n test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp,\n n_layers=self.helper.args.transformer.encoder_n_layers,\n standard_parallel=self.helper.args.kvmem.standard_parallel,\n custom_init=self.helper.args.pkm.custom_init,\n n_experts=self.helper.args.moe.n_experts,\n expert_size=self.helper.args.moe.expert_size,\n dropout_mode=self.helper.args.kvmem.dropout,\n knn=self.helper.args.pkm.knn,\n selection_mode=self.helper.args.moe.selection_mode,\n perplexity_reg=self.helper.args.moe.perplexity_reg,\n key_mode=self.helper.args.moe.key_mode,\n half_key=self.helper.args.moe.half_key,\n n_heads=self.helper.args.pkm.n_heads,\n norm_keys=self.helper.args.moe.norm_keys,\n perplexity_reg_mode=self.helper.args.moe.perplexity_reg_mode,\n n_random=self.helper.args.moe.n_random,\n reg_type=self.helper.args.moe.reg_type,\n std_correction=self.helper.args.moe.std_correction,\n topk_mode=self.helper.args.moe.topk_mode,\n head_projection_size=self.helper.args.transformer.head_projection_size,\n activation_after_topk=self.helper.args.moe.activation_after_topk,\n weight_grouping=self.helper.args.moe.weight_grouping,\n kmeans_distance=self.helper.args.moe.kmeans_distance,\n drop_parallel=self.helper.args.moe.drop_parallel,\n block_expert_sel_in_grad=self.helper.args.moe.block_expert_sel_in_grad,\n mlp_selection=self.helper.args.moe.mlp_selection,\n classification_target=self.helper.args.moe.classification_target,\n norm_key_init=self.helper.args.moe.norm_key_init,\n normalize_expert_sel_init=self.helper.args.moe.norm_expert_sel_init,\n norm_value_init=self.helper.args.moe.norm_value_init,\n norm_standard_parallel_values=self.helper.args.moe.norm_standard_parallel_values,\n identical_init=self.helper.args.moe.identical_init,\n topological_sel_reg=self.helper.args.moe.topological_sel_reg,\n topological_expert_reg=self.helper.args.moe.topological_expert_reg,\n gumbel_select_only=self.helper.args.moe.gumbel_select_only,\n topk_value_norm_compensation=self.helper.args.moe.topk_value_norm_compensation,\n norm_expert_scores=self.helper.args.moe.norm_expert_scores,\n sel_input_cluster_init=self.helper.args.moe.sel_input_cluster_init,\n init_norm_mode=self.helper.args.moe.init_norm_mode,\n sel_bias=self.helper.args.moe.sel_bias,\n bias=self.helper.args.moe.bias,\n rescale_normed=self.helper.args.moe.rescale_normed,\n sel_norm=self.helper.args.moe.sel_norm,\n rescale_grads=self.helper.args.moe.rescale_grads,\n gumbel_decay=self.helper.args.moe.gumbel_decay,\n ln_affine=self.helper.args.transformer.ln_affine,\n sinkhorn_local=self.helper.args.moe.sinkhorn_local,\n sinkhorn_n_iters=self.helper.args.moe.sinkhron_n_iters,\n moe_dropout_factor=self.helper.args.moe.dropout_factor,\n drop_expert=self.helper.args.moe.drop_expert,\n expert_size_init=self.helper.args.moe.expert_size_init,\n sync_distributed=self.helper.args.moe.sync_distributed,\n modulation_amplitude=self.helper.args.moe.modulation_amplitude,\n invisible_selection=self.helper.args.moe.invisible_selection,\n slope_multiplier=self.helper.args.moe.slope_multiplier,\n moe_init_scale=self.helper.args.moe.init_scale)\n else:\n assert False, \"Invalid variant\"\n\n layers = [mklayer() for _ in range(self.helper.args.transformer.encoder_n_layers)]\n return layers\n\n\n def fix_init(self, model):\n init_std = 0.02\n\n torch.nn.init.normal_(model.embedding.weight, 0.0, init_std)\n # torch.nn.init.normal_(model.embedding_adapter.weight, 0.0, init_std)\n\n initialized = 0\n for m in model.modules():\n if isinstance(m, (torch.nn.Linear, torch.nn.Embedding)) and hasattr(m, \"weight\"):\n torch.nn.init.normal_(m.weight, 0.0, init_std)\n initialized += m.weight.numel()\n if isinstance(m, (torch.nn.Linear, torch.nn.LayerNorm)) and m.bias is not None:\n torch.nn.init.constant_(m.bias, 0)\n initialized += m.bias.numel()\n if isinstance(m, (torch.nn.LayerNorm)) and m.weight is not None:\n torch.nn.init.normal_(m.weight, 1.0, init_std)\n initialized += m.weight.numel()\n if isinstance(m, MoE):\n torch.nn.init.normal_(m.keys, 0.0, init_std)\n torch.nn.init.normal_(m.values, 0.0, init_std)\n if m.expert_sel is not None:\n torch.nn.init.normal_(m.expert_sel, 0.0, init_std)\n initialized += m.expert_sel.numel()\n initialized += m.keys.numel() + m.values.numel()\n\n print(f\"Reinitialized {initialized/self.n_weights*100:.3f}% weights\")\n\n def create_model(self) -> torch.nn.Module:\n # pyright: reportOptionalMemberAccess=false\n tlayers = self.get_layers()\n\n if self.helper.args.transformer.output_mode != \"normal\" and self.is_preln():\n raise ValueError(\"accumulated_output not supported with pre-ln\")\n\n model = TransformerLanguageModel(\n len(self.train_set.vocabulary), self.helper.args.embedding_size,\n self.helper.args.state_size, self.helper.args.dropout,\n tied_embedding=self.helper.args.tied_embedding,\n layers=tlayers, n_prev_states=self.helper.args.lm.trafo.context_blocks,\n n_prev_states_test=self.helper.args.lm.trafo.test_context_blocks,\n same_length_eval=self.helper.args.lm.trafo.same_length_eval,\n p_drop_layer=self.helper.args.transformer.p_drop_layer,\n same_length=self.helper.args.lm.trafo.same_length,\n use_last_state=self.helper.args.lm.trafo.last_layer_context,\n norm_before_output=self.is_preln(), output_mode=self.helper.args.transformer.output_mode,)\n\n self.n_weights = sum(p.numel() for p in model.parameters())\n\n with torch.no_grad():\n if self.is_preln():\n model.embedding_scale = 1.0\n elif self.helper.args.lm.trafo.xl_init:\n self.fix_init(model)\n elif self.helper.args.lm.trafo.embedding_mode_init==\"scale_to_sqrt_dmodel\":\n norm = model.embedding.weight.norm(dim=-1).mean()\n model.embedding_scale = math.sqrt(self.helper.args.state_size) / norm\n elif self.helper.args.lm.trafo.embedding_mode_init==\"one_and_scale_to_sqrt_dmodel\":\n norm = model.embedding.weight.norm(dim=-1).mean()\n model.embedding_scale = math.sqrt(self.helper.args.state_size)\n model.embedding.weight.mul_(1.0 / norm)\n elif self.helper.args.lm.trafo.embedding_mode_init==\"init_to_sqrt_dmodel\":\n norm = model.embedding.weight.norm(dim=-1, keepdim=True)\n model.embedding_scale=1.0\n model.embedding.weight.mul_(math.sqrt(self.helper.args.state_size) / norm)\n\n return model\n\n def moe_recluster(self):\n for n, m in self.model.named_modules():\n if isinstance(m, MoE):\n perm = m.regroup_weights()\n m.patch_optimizer_state(self.optimizer, perm)\n\n def train_step(self) -> Tuple[Result, Dict[str, Any]]:\n if self.helper.args.kvmem.norm_values:\n with torch.no_grad():\n for m in self.model.modules():\n if isinstance(m, torch.nn.EmbeddingBag):\n m.weight.div_(m.weight.norm(dim=-1, keepdim=True))\n if self.helper.args.moe.recluster_steps:\n if self.helper.state.iter in self.helper.args.moe.recluster_steps:\n self.moe_recluster()\n\n return super().train_step()\n\n def get_optimizer_param_list(self):\n params = list(self.model.parameters())\n sel_params = []\n expert_params = []\n\n if self.helper.args.moe.sel_lr_multipler != 1.0:\n for m in self.model.modules():\n if isinstance(m, MoE):\n sel_params += list(m.sel.parameters()) if m.mlp_selection else [m.expert_sel]\n\n if self.helper.args.moe.expert_lr_multipler != 1.0:\n for m in self.model.modules():\n if isinstance(m, MoE):\n expert_params += [m.keys, m.values]\n\n excluded_params = [id(p) for p in sel_params + expert_params]\n params = [p for p in params if id(p) not in excluded_params]\n\n if not excluded_params:\n return params\n\n return [\n {\"params\": params},\n {\"params\": sel_params, \"lr\": self.helper.args.lr * self.helper.args.moe.sel_lr_multipler},\n {\"params\": expert_params, \"lr\": self.helper.args.lr * self.helper.args.moe.expert_lr_multipler},\n ]" }, { "identifier": "SimpleTask", "path": "tasks/simple/simple_task.py", "snippet": "class SimpleTask(Task):\n MAX_LENGHT_PER_BATCH = None\n train_set: torch.utils.data.Dataset\n train_loader: torch.utils.data.DataLoader\n model: torch.nn.Module\n\n def create_datasets(self):\n raise NotImplementedError()\n\n def create_model_interface(self):\n raise NotImplementedError()\n\n def create_model(self) -> torch.nn.Module:\n raise NotImplementedError()\n\n def create_state(self):\n pass\n\n @property\n def amp_enabled(self):\n return torch.cuda.is_available() and self.helper.args.amp\n\n @property\n def time_dim(self) -> int:\n return 1 - self.batch_dim\n\n def __init__(self, helper: framework.helpers.TrainingHelper):\n super().__init__(helper)\n\n self.avg_num_chunks = framework.utils.Average()\n self.reg_loss_average = framework.utils.DictAverage()\n self.max_grad = 0\n self.time_sum = 0\n\n self.create_datasets()\n self.create_loaders()\n self.model = self.create_model()\n self.model = self.model.to(self.helper.device)\n\n self.create_model_interface()\n self.create_optimizer()\n self.create_lr_scheduler()\n\n self.regularizer = LayerRegularizer(\n self.model, self.helper.args.stop_after, self.helper.args.reg_scales, self.helper.args.reg_lin_decay)\n\n self.scaler = torch.cuda.amp.GradScaler(enabled=self.amp_enabled)\n self.helper.saver[\"scaler\"] = self.scaler\n\n print(f\"Total number of model parameters: {sum(p.numel() for p in self.model.parameters())}\")\n\n self.helper.saver[\"model\"] = self.model\n self.create_state()\n self.helper.restore()\n\n self.fetcher = None\n\n def fetch_thread(self):\n data = self.prepare_data(self.get_train_batch())\n n_chunks = self.get_n_chunks(data)\n d_chunks = self.chunk_batch_dim(data, n_chunks)\n\n return data, d_chunks\n\n def create_train_loader(self, loader: torch.utils.data.Dataset, seed: Optional[int] = None,\n batch_size: Optional[int] = None) -> torch.utils.data.DataLoader:\n\n return super().create_train_loader_bs(loader, batch_size or self.helper.args.batch_size, seed)\n\n def set_train_set(self, ds: torch.utils.data.Dataset, seed: Optional[int] = None):\n self.train_set = ds\n self.train_loader = self.create_train_loader(self.train_set, seed)\n self.data_iter = iter(self.train_loader)\n\n def create_loaders(self):\n self.train_loader = self.create_train_loader(self.train_set)\n self.valid_loaders = framework.data_structures.DotDict()\n self.valid_loaders.update({k: self.create_valid_loader(v) for k, v in self.valid_sets.items()})\n\n def get_optimizer_param_list(self):\n return self.model.parameters()\n\n def create_optimizer(self):\n if self.helper.args.optimizer in [\"adam\", \"adamw\"]:\n opt = torch.optim.Adam if self.helper.args.optimizer == \"adam\" else torch.optim.AdamW\n self.set_optimizer(opt(self.get_optimizer_param_list(), self.helper.args.lr,\n weight_decay=self.helper.args.wd, betas=self.helper.args.adam.betas,\n eps=self.helper.args.adam.eps))\n elif self.helper.args.optimizer == \"adagrad\":\n self.set_optimizer(torch.optim.Adagrad(self.get_optimizer_param_list(), self.helper.args.lr,\n weight_decay=self.helper.args.wd))\n elif self.helper.args.optimizer == \"sgd\":\n self.set_optimizer(torch.optim.SGD(self.get_optimizer_param_list(), self.helper.args.lr,\n weight_decay=self.helper.args.wd, momentum=0.9))\n else:\n assert False, f\"Unsupported optimizer: {self.helper.args.optimizer}\"\n\n def set_optimizer(self, optimizer: torch.optim.Optimizer):\n self.optimizer = optimizer\n self.helper.saver.register(\"optimizer\", self.optimizer, replace=True)\n\n def get_train_batch(self) -> Dict[str, Any]:\n return next(self.data_iter)\n\n def chunk_batch_dim(self, data: Dict[str, Any], n: int) -> List[Dict[str, Any]]:\n if n == 1:\n return [data]\n\n res = [{} for _ in range(n)]\n for k, v in data.items():\n assert torch.is_tensor(v), \"Only tensors are supported by autosplitting\"\n\n bd = self.batch_dim if self.batch_dim < v.ndimension() else 0\n assert v.shape[bd] % n == 0, f\"Batch (dim {bd} of input {k} of shape {v.shape} is not divisible by {n})\"\n\n for i, c in enumerate(v.chunk(n, dim=bd)):\n res[i][k] = c\n\n # Avoid unnecessary computation.\n if \"in\" in data and \"in_len\" in data:\n for r in res:\n r[\"in\"] = r[\"in\"].narrow(1 - self.batch_dim, 0, int(r[\"in_len\"].max().item()))\n\n if \"out\" in data and \"out_len\" in data and data[\"out\"].ndim > 1:\n for r in res:\n r[\"out\"] = r[\"out\"].narrow(1 - self.batch_dim, 0, int(r[\"out_len\"].max().item()))\n\n return res\n\n def is_seq2seq_task(self, data: Dict[str, Any]) -> bool:\n return \"in_len\" in data and \"out_len\" in data\n\n def get_seq_length(self, data: Dict[str, Any]) -> int:\n # This assumes separate encoder and decoder\n return max(data[\"in\"].shape[self.time_dim], data[\"out\"].shape[self.time_dim] if data[\"out\"].ndim > 1 else 0)\n\n def get_n_chunks(self, data: Dict[str, Any]) -> int:\n if self.helper.args.n_microbatch:\n return self.helper.args.n_microbatch\n\n max_length_per_batch = self.helper.args.max_length_per_batch or self.MAX_LENGHT_PER_BATCH\n if self.is_seq2seq_task(data) and max_length_per_batch:\n # The formula below assumes quadratic memory consumption\n return int(2**int(self.get_seq_length(data) / max_length_per_batch))\n return 1\n\n def post_backward(self) -> Dict[str, Any]:\n return {}\n\n def train_step(self) -> Tuple[Result, Dict[str, Any]]:\n plots = {}\n\n if self.helper.args.speedtest==\"iter\":\n torch.cuda.synchronize()\n\n with self.forward_time_meter:\n self.set_lr()\n self.optimizer.zero_grad(set_to_none=True)\n\n data, d_chunks = self.fetcher.get()\n\n res_list = []\n weights = []\n\n self.avg_num_chunks.add(len(d_chunks))\n\n total_out_len = data[\"out_len\"].sum() if \"out_len\" in data else 1\n\n profiler = None\n # if self.helper.state.iter == 3:\n # profiler = torch.profiler.profile(activities=[torch.profiler.ProfilerActivity.CPU, torch.profiler.ProfilerActivity.CUDA], record_shapes=True)\n # profiler.__enter__()\n\n\n call_pre_iter(self.model)\n for d in d_chunks:\n with torch.cuda.amp.autocast(enabled=self.amp_enabled):\n res, custom_plots = self.run_model(d)\n call_before_loss(self.model)\n res_list.append(res)\n plots.update(custom_plots)\n weights.append((d[\"out_len\"].sum()/total_out_len) if \"out_len\" in d else 1)\n reg_loss, reg_log = self.regularizer.get(self.helper.state.iter)\n self.reg_loss_average.add(reg_log)\n total_loss = (res_list[-1].loss + reg_loss * self.helper.args.reg) * self.helper.get_loss_scaling()\n\n if not torch.isfinite(total_loss):\n for n, p in self.model.named_parameters():\n if not torch.isfinite(p).all():\n print(f\"Found non-finite weight {n}\")\n\n for n, p in self.model.named_buffers():\n if not torch.isfinite(p).all():\n print(f\"Found non-finite buffer {n}\")\n assert False, \"Loss not finite\"\n\n self.scaler.scale(total_loss * weights[-1]).backward()\n plots.update(self.post_backward())\n\n if self.helper.dist_env.is_distributed:\n aops = []\n for p in self.model.parameters():\n if p.grad is None:\n continue\n aops.append(torch.distributed.all_reduce(p.grad.contiguous(), async_op=True))\n\n for a in aops:\n a.wait()\n\n\n call_post_iter(self.model)\n\n self.scaler.unscale_(self.optimizer)\n\n if self.helper.args.grad_clip:\n gn = torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.helper.args.grad_clip)\n self.max_grad = max(self.max_grad, gn)\n\n\n if self.helper.args.log_grad_norms:\n for n, p in self.model.named_parameters():\n plots[f\"grad_norms/{n}\"] = p.detach().norm().item()\n\n\n self.scaler.step(self.optimizer)\n self.scaler.update()\n\n self.helper.state.iter += 1\n res = res_list[0].__class__.merge(res_list, weights)\n\n if self.helper.args.speedtest in {\"iter\"}:\n torch.cuda.synchronize()\n\n if profiler is not None:\n profiler.__exit__(None, None, None)\n profiler.export_chrome_trace(\"trace_all.json\")\n assert False\n\n\n # if self.helper.state.iter % 20 == 0:\n\n if \"in_len\" in data:\n n_total_tokens = (data[\"in_len\"] + data[\"out_len\"]).sum()\n if self.helper.dist_env.is_distributed:\n torch.distributed.all_reduce(n_total_tokens)\n\n self.total_n_token_in_period += n_total_tokens\n\n return res, plots\n\n def plot(self, res: Result) -> Dict[str, Any]:\n res = super().plot(res)\n\n if self.helper.args.dump_logs and self.helper.dist_env.is_master():\n dump_logs(self.model, self.helper.get_storage_path(\"log_dumps\") + f\"/{self.helper.state.iter}\")\n\n if self.helper.state.iter % 20 == 1:\n res.update(get_logs(self.model))\n\n res[\"average_num_chunks\"] = self.avg_num_chunks.get()\n for k, v in self.reg_loss_average.get().items():\n res[f\"train/reg_loss/{k}\"] = v\n\n if self.helper.args.grad_clip:\n res[\"max_grad\"] = self.max_grad\n self.max_grad = 0\n\n\n return res\n\n def train(self):\n self.loss_average.reset()\n\n self.data_iter = iter(self.train_loader)\n self.fetcher = framework.helpers.StoppingParallelProducer(self.fetch_thread)\n\n try:\n while (self.helper.args.stop_after or 10e10) > self.helper.state.iter:\n self.load_time_meter.stop()\n\n res, plots = self.train_step()\n plots.update(self.plot(res))\n\n with self.plot_time_meter:\n self.helper.log(plots)\n\n self.load_time_meter.start()\n\n self.helper.tick()\n except self.fetcher.Stopped:\n pass" }, { "identifier": "LanguageModelInterface", "path": "interfaces/language_model_interface.py", "snippet": "class LanguageModelInterface(ModelInterface):\n def __init__(self, model: torch.nn.Module, batch_dim: int = 1, drop_state_prob: float = 0,\n dist_env: Optional[DistributedEnv] = None, save_state: bool = False):\n super().__init__()\n self.model = model\n self.state = None\n self.batch_dim = batch_dim\n self.drop_state_prob = drop_state_prob\n self.time_dim = 1 - self.batch_dim\n self.dist_env = dist_env\n self.save_state = save_state\n\n def create_input(self, data: Dict[str, torch.Tensor]) -> torch.Tensor:\n return data[\"data\"].narrow(self.time_dim, 0, data[\"data\"].shape[self.time_dim] - 1)\n\n def decode_outputs(self, outputs: RecurrentResult) -> Any:\n return outputs.outputs\n\n def reset_state(self):\n self.state = None\n\n def loss(self, net_out: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n assert net_out.shape[:-1] == target.shape\n return F.cross_entropy(net_out.flatten(0, -2), target.flatten().long())\n\n def create_target(self, data: Dict[str, torch.Tensor]) -> torch.Tensor:\n return data[\"data\"].narrow(self.time_dim, 1, data[\"data\"].shape[self.time_dim] - 1).contiguous()\n\n def __call__(self, data: Dict[str, torch.Tensor]) -> LanguageModelResult:\n if self.model.training and self.drop_state_prob > 0 and random.random() < self.drop_state_prob:\n self.state = None\n\n input = self.create_input(data)\n target = self.create_target(data)\n\n res, state = self.model(input, target, self.state)\n if isinstance(res, torch.nn.modules.adaptive._ASMoutput):\n loss = res.loss\n # res = res.outputs\n else:\n loss = self.loss(res, target)\n\n self.state = U.apply_to_tensors(state, lambda x: x.detach())\n return LanguageModelResult(res, loss)\n\n def state_dict(self) -> Dict[str, Any]:\n if not self.save_state:\n return {}\n\n if self.dist_env is not None and self.dist_env.is_distributed:\n # Collect the state from all workers\n alist = [None] * self.dist_env.world_size\n state = torch.distributed.all_gather(alist, self.state)\n state = torch.cat(state, self.batch_dim)\n return {\"state\": state}\n else:\n return {\"state\": self.state}\n\n def load_state_dict(self, state: Dict[str, Any]):\n if not self.save_state:\n self.state = None\n return\n\n if self.dist_env is not None and self.dist_env.is_distributed:\n state_bs = state[\"state\"].shape[self.batch_dim]\n if state_bs % self.dist_env.world_size != 0:\n print(f\"WARNING: State batch size ({state_bs}) is not divisible by the number of workers ({self.dist_env.world_size}). Resetting state.\")\n self.state = None\n else:\n bs_per_worker = state_bs // self.dist_env.world_size\n self.state = state[\"state\"].narrow(self.batch_dim, self.dist_env.local_rank * bs_per_worker, bs_per_worker)\n else:\n self.state = state[\"state\"]" } ]
import framework import torch import torch.nn import torch.utils.data import dataset import random from models import TransformerLanguageModel from ... import task, args from .transformer_lm_mixin import TransformerLMMixin from ..simple_task import SimpleTask from typing import Tuple, Any, Dict, List, Union from interfaces import LanguageModelInterface
9,238
@args def a(parser: framework.helpers.ArgumentParser): parser.add_argument("-lm.state_drop_probability", default=0.0) parser.add_argument("-lm.lstm_weight_drop", default=0.0) parser.add_argument("-lm.unroll", default=100) parser.add_argument("-lm.unroll_eval", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.example_context", default=100) parser.add_argument("-lm.example_window", default=40)
@args def a(parser: framework.helpers.ArgumentParser): parser.add_argument("-lm.state_drop_probability", default=0.0) parser.add_argument("-lm.lstm_weight_drop", default=0.0) parser.add_argument("-lm.unroll", default=100) parser.add_argument("-lm.unroll_eval", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.example_context", default=100) parser.add_argument("-lm.example_window", default=40)
@task()
1
2023-10-16 11:26:45+00:00
12k
blackgold3/SemanticBoost
mdm/model_util.py
[ { "identifier": "MDM", "path": "mdm/model/mdm.py", "snippet": "class MDM(nn.Module):\n def __init__(self, njoints, nfeats, latent_dim=256, ff_size=1024, num_layers=8, num_heads=4, dropout=0.1,\n activation=\"gelu\", dataset='amass', clip_dim=512,\n arch='trans_enc', clip_version=None, **kargs):\n super().__init__()\n\n self.local = kargs[\"local\"]\n self.encode_full = kargs.get(\"encode_full\", 0) #### encode_full = 1 add tokens & encode_full = 2 model compress tokens\n self.txt_tokens = kargs.get(\"txt_tokens\", 0) #### txt_tokens = 1 add tokens & txt_tokens = 2 model compress tokens\n self.dataset = dataset\n self.condition_length = 77\n self.num_frames = kargs.get(\"num_frames\", 196)\n self.json_dict = kargs.get(\"json_dict\")\n\n if arch.endswith(\"static\"):\n self.position_type = \"static\" #### [static or rope] only for llama arch\n self.arch = arch.replace(\"_static\", \"\")\n elif arch.endswith(\"rope\"):\n self.position_type = \"rope\"\n self.arch = arch.replace(\"_rope\", \"\")\n else:\n self.position_type = \"static\"\n self.arch = arch\n\n if isinstance(self.num_frames, list) or isinstance(self.num_frames, tuple):\n self.num_frames = self.num_frames[0]\n\n self.njoints = njoints\n self.nfeats = nfeats\n\n self.latent_dim = latent_dim\n\n self.ff_size = ff_size\n self.num_layers = num_layers\n self.num_heads = num_heads\n self.dropout = dropout\n\n self.activation = activation\n self.clip_dim = clip_dim\n self.action_emb = kargs.get('action_emb', None)\n\n self.input_feats = self.njoints * self.nfeats\n\n self.cond_mode = kargs.get('cond_mode', 'no_cond')\n self.cond_mask_prob = kargs.get('cond_mask_prob', 0.)\n\n\n self.input_process = InputProcess(self.input_feats, self.latent_dim) #### 输入 x 的 linear\n self.output_process = OutputProcess(self.input_feats, self.latent_dim, self.njoints,\n self.nfeats)\n\n self.sequence_pos_encoder = PositionalEncoding(self.latent_dim, self.dropout)\n\n if self.arch == 'trans_enc':\n print(\"TRANS_ENC init\")\n seqTransEncoderLayer = nn.TransformerEncoderLayer(d_model=self.latent_dim,\n nhead=self.num_heads,\n dim_feedforward=self.ff_size,\n dropout=self.dropout,\n activation=self.activation)\n self.seqTransEncoder = nn.TransformerEncoder(seqTransEncoderLayer, num_layers=self.num_layers)\n\n elif self.arch == \"llama_encoder\":\n TransLayer = RefinedLayer(self.latent_dim, self.num_heads, self.ff_size, self.dropout, self.activation, max_seq_len=self.num_frames, position_type=self.position_type, norm_type=\"rmsnorm\")\n self.seqTransEncoder = Refined_Transformer(TransLayer, self.num_layers)\n\n elif self.arch == \"llama_decoder\":\n TransLayer = RefinedLayer(self.latent_dim, self.num_heads, self.ff_size, self.dropout, self.activation, max_seq_len=self.num_frames, position_type=self.position_type, word_tokens=True, norm_type=\"rmsnorm\")\n self.seqTransEncoder = Refined_Transformer(TransLayer, self.num_layers)\n\n else:\n raise ValueError('Please choose correct architecture')\n\n self.embed_timestep = TimestepEmbedder(self.latent_dim, self.sequence_pos_encoder)\n\n if self.cond_mode != 'no_cond':\n if 'text' in self.cond_mode:\n self.embed_text = nn.Linear(self.clip_dim, self.latent_dim)\n print('EMBED TEXT')\n print('Loading CLIP...')\n self.clip_version = clip_version\n self.clip_model = self.load_and_freeze_clip(clip_version)\n\n if self.txt_tokens == 2:\n if self.arch in [\"trans_enc\", \"llama_encoder\"]:\n scale = 3\n elif self.arch in [\"llama_decoder\"]:\n scale = 2\n\n encode_compress_layer = RefinedLayer(d_model=self.latent_dim * scale,\n nhead=self.num_heads,\n dim_feedforward=self.ff_size,\n dropout=self.dropout,\n activation=self.activation, norm_type=\"rmsnorm\")\n self.condition_compress = nn.Sequential(\n Refined_Transformer(encode_compress_layer, num_layers=1),\n nn.Linear(self.latent_dim * scale, self.latent_dim, )\n ) \n\n if self.encode_full != 0: #### [1, bs, 512] -> [seq, bs, 1024] -> [seq, bs, 512]\n self.code_full = Encoder_Block(begin_channel=self.input_feats, latent_dim=self.latent_dim, num_layers=6, TN=1, bias=kargs[\"conv_bias\"], norm_type=kargs[\"conv_norm\"], activate_type=kargs[\"conv_activate\"]) \n\n if self.encode_full == 2:\n encode_compress_layer = RefinedLayer(d_model=self.latent_dim * 2,\n nhead=self.num_heads,\n dim_feedforward=self.ff_size,\n dropout=self.dropout,\n activation=self.activation, norm_type=\"rmsnorm\")\n\n self.encode_compress = nn.Sequential(\n Refined_Transformer(encode_compress_layer, num_layers=1),\n nn.Linear(self.latent_dim * 2, self.latent_dim, )\n )\n\n print(\" =========================\", self.cond_mode, \"===================================\")\n\n def parameters_wo_clip(self):\n return [p for name, p in self.named_parameters() if not name.startswith('clip_model.')]\n\n def load_and_freeze_clip(self, clip_version):\n clip_model, clip_preprocess = clip.load(clip_version, device='cpu', jit=False, download_root=self.json_dict[\"clip\"]) # Must set jit=False for training\n clip_model.float()\n \n # Freeze CLIP weights\n clip_model.eval()\n for p in clip_model.parameters():\n p.requires_grad = False\n\n return clip_model\n\n def mask_cond(self, cond, force_mask=False):\n bs = cond.shape[0]\n if force_mask:\n return torch.zeros_like(cond)\n elif self.training and self.cond_mask_prob > 0.:\n mask = torch.bernoulli(torch.ones(bs, device=cond.device) * self.cond_mask_prob) # 1-> use null_cond, 0-> use real cond\n if len(cond.shape) == 3:\n mask = mask.view(bs, 1, 1)\n else:\n mask = mask.view(bs, 1)\n return cond * (1. - mask)\n else:\n return cond\n\n def clip_text_embedding(self, raw_text):\n device = self.clip_model.ln_final.weight.device\n default_context_length = self.condition_length\n texts = clip.tokenize(raw_text, context_length=default_context_length, truncate=True).to(device) # [bs, context_length] # if n_tokens > context_length -> will truncate\n if self.txt_tokens == 0: \n clip_feature = self.clip_model.encode_text(texts)\n else:\n with torch.no_grad():\n x = self.clip_model.token_embedding(texts) # [batch_size, n_ctx, d_model]\n x = x + self.clip_model.positional_embedding\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.clip_model.transformer(x)\n x = x.permute(1, 0, 2) # LND -> NLD\n x = self.clip_model.ln_final(x)\n clip_feature = x[torch.arange(x.shape[0]), texts.argmax(dim=-1)] @ self.clip_model.text_projection\n clip_feature = clip_feature.unsqueeze(1)\n clip_feature = torch.cat([clip_feature, x], dim=1) #### [bs, T, 512]\n return clip_feature\n \n def get_mask(self, sz1, sz2):\n mask = (torch.triu(torch.ones(sz1, sz2)) == 1).transpose(0, 1)\n mask = mask.float()\n mask = mask.masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\n mask.requires_grad = False\n return mask\n\n def forward(self, x, timesteps, y=None):\n \"\"\"\n x: [batch_size, njoints, nfeats, max_frames], denoted x_t in the paper\n timesteps: [batch_size] (int)\n \"\"\"\n \n results = {}\n emb = self.embed_timestep(timesteps) # [1, bs, d]\n x = x.to(emb.dtype)\n\n real_length = x.shape[-1]\n\n if self.encode_full != 0 and x.shape[-1] < self.num_frames:\n extension = torch.zeros([x.shape[0], x.shape[1], x.shape[2], self.num_frames - x.shape[-1]], device=x.device, dtype=x.dtype)\n x = torch.cat([x, extension], dim=-1)\n\n if self.encode_full == 1:\n latent = self.code_full(x) ### [seq, bs, 512]\n current = self.input_process(x) \n latent = latent.repeat(current.shape[0], 1, 1)\n current = current + latent\n elif self.encode_full == 2:\n latent = self.code_full(x) ### [seq, bs, 512]\n current = self.input_process(x) #### [seq, bs, 512]\n latent = latent.repeat(current.shape[0], 1, 1)\n current = torch.cat([current, latent], dim=2)\n current = self.encode_compress(current)\n else:\n current = self.input_process(x) #### [seq, bs, 512]\n\n force_mask = y.get('uncond', False)\n if 'text' in self.cond_mode:\n enc_text = self.clip_text_embedding(y['text']).to(emb.dtype) ### MASK_COND 会按照一定的比例把 batch_size 中的一部分文本句整句换成 [0, 0, ... 0]\n txt_emb = self.embed_text(enc_text)\n txt_emb = self.mask_cond(txt_emb, force_mask=force_mask)\n \n if len(txt_emb.shape) == 3:\n txt_emb = txt_emb.permute(1, 0, 2)\n else:\n txt_emb = txt_emb.unsqueeze(0)\n else:\n txt_emb = None\n\n if txt_emb is not None:\n all_emb = txt_emb\n else:\n all_emb = torch.zeros_like(emb)\n\n if self.arch in [\"trans_enc\", \"llama_encoder\"] and txt_emb is not None:\n if self.txt_tokens == 1:\n word_embedding = all_emb[1::, :, :]\n global_embedding = all_emb[0:1, :, :].repeat(word_embedding.shape[0], 1, 1)\n all_emb = word_embedding + global_embedding\n emb = emb.repeat(all_emb.shape[0], 1, 1)\n emb += all_emb\n elif self.txt_tokens == 2:\n word_embedding = all_emb[1::, :, :]\n global_embedding = all_emb[0:1, :, :].repeat(word_embedding.shape[0], 1, 1)\n emb = emb.repeat(word_embedding.shape[0], 1, 1)\n concat_embedding = torch.cat([emb, global_embedding, word_embedding], dim=2)\n emb = self.condition_compress(concat_embedding)\n else:\n emb += all_emb\n elif txt_emb is not None:\n if self.txt_tokens == 1:\n emb = emb.repeat(all_emb.shape[0], 1, 1)\n emb += all_emb\n elif self.txt_tokens == 2:\n emb = emb.repeat(all_emb.shape[0], 1, 1)\n concat_embedding = torch.cat([emb, all_emb], dim=2)\n emb = self.condition_compress(concat_embedding) \n else:\n emb += all_emb \n else:\n emb = emb.repeat(all_emb.shape[0], 1, 1)\n emb += all_emb\n\n if self.arch in [\"trans_enc\", \"llama_encoder\"]:\n real_token_length = emb.shape[0] ######### 用来截断输出,只保留真正的output\n elif self.arch in [\"llama_decoder\"]:\n real_token_length = 1\n\n if self.arch in [\"trans_enc\", \"llama_encoder\"]:\n xseq = torch.cat([emb, current], dim=0)\n\n if self.arch in [\"trans_enc\"] or self.position_type == \"static\":\n xseq = self.sequence_pos_encoder(xseq)\n\n output = self.seqTransEncoder(xseq)\n\n elif self.arch in [\"llama_decoder\"]:\n if emb.shape[0] == 1:\n emb = emb.repeat(1+self.condition_length, 1, 1)\n\n xseq = torch.cat([emb[0:1], current], dim=0)\n word_tokens = emb[1::]\n\n if self.position_type == \"static\":\n xseq = self.sequence_pos_encoder(xseq)\n \n output = self.seqTransEncoder(xseq, word_tokens=word_tokens)\n\n output = output[real_token_length:]\n output = self.output_process(output) # [bs, njoints, nfeats, nframes]\n output = output[:, :, :, :real_length]\n results[\"output\"] = output\n return results\n \n def _apply(self, fn):\n super()._apply(fn)\n\n def train(self, *args, **kwargs):\n super().train(*args, **kwargs)" }, { "identifier": "gaussian_diffusion", "path": "mdm/diffusion/gaussian_diffusion.py", "snippet": "def get_named_beta_schedule(schedule_name, num_diffusion_timesteps, scale_betas=1.):\ndef betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):\n def is_vb(self):\n def __init__(\n self,\n *,\n betas,\n model_mean_type,\n model_var_type,\n loss_type,\n rescale_timesteps=False,\n rep=\"t2m\"\n ):\n def masked_l2(self, a, b, mask, addition_rotate_mask):\n def q_mean_variance(self, x_start, t):\n def q_sample(self, x_start, t, noise=None, model_kwargs=None):\n def q_posterior_mean_variance(self, x_start, x_t, t):\n def p_mean_variance(\n self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None\n ):\n def process_xstart(x):\n def _predict_xstart_from_eps(self, x_t, t, eps):\n def _predict_xstart_from_xprev(self, x_t, t, xprev):\n def _scale_timesteps(self, t):\n def p_sample(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n const_noise=False,\n ):\n def p_sample_loop(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n skip_timesteps=0,\n init_image=None,\n randomize_class=False,\n cond_fn_with_grad=False,\n dump_steps=None,\n const_noise=False,\n unfolding_handshake=0, # 0 means no unfolding\n eval_mask=None\n\n ):\n def p_sample_loop_progressive(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n skip_timesteps=0,\n init_image=None,\n randomize_class=False,\n cond_fn_with_grad=False,\n const_noise=False,\n eval_mask=None\n ):\n def training_losses(self, model, x_start, t, model_kwargs=None, noise=None):\ndef _extract_into_tensor(arr, timesteps, broadcast_shape):\nclass ModelMeanType(enum.Enum):\nclass ModelVarType(enum.Enum):\nclass LossType(enum.Enum):\nclass GaussianDiffusion:\n PREVIOUS_X = enum.auto() # the model predicts x_{t-1}\n START_X = enum.auto() # the model predicts x_0\n EPSILON = enum.auto() # the model predicts epsilon\n LEARNED = enum.auto()\n FIXED_SMALL = enum.auto()\n FIXED_LARGE = enum.auto()\n LEARNED_RANGE = enum.auto()\n MSE = enum.auto() # use raw MSE loss (and KL when learning variances)\n RESCALED_MSE = (\n enum.auto()\n ) # use raw MSE loss (with RESCALED_KL when learning variances)\n KL = enum.auto() # use the variational lower-bound\n RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB\n B, C = x.shape[:2]" }, { "identifier": "SpacedDiffusion", "path": "mdm/diffusion/respace.py", "snippet": "class SpacedDiffusion(GaussianDiffusion):\n \"\"\"\n A diffusion process which can skip steps in a base diffusion process.\n\n :param use_timesteps: a collection (sequence or set) of timesteps from the\n original diffusion process to retain.\n :param kwargs: the kwargs to create the base diffusion process.\n \"\"\"\n\n def __init__(self, use_timesteps, **kwargs):\n self.use_timesteps = set(use_timesteps)\n self.timestep_map = []\n self.original_num_steps = len(kwargs[\"betas\"])\n\n base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa\n last_alpha_cumprod = 1.0\n new_betas = []\n for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod):\n if i in self.use_timesteps:\n new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)\n last_alpha_cumprod = alpha_cumprod\n self.timestep_map.append(i)\n kwargs[\"betas\"] = np.array(new_betas)\n super().__init__(**kwargs)\n\n def p_mean_variance(\n self, model, *args, **kwargs\n ): # pylint: disable=signature-differs\n return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)\n\n def training_losses(\n self, model, *args, **kwargs\n ): # pylint: disable=signature-differs\n return super().training_losses(self._wrap_model(model), *args, **kwargs)\n\n def _wrap_model(self, model):\n if isinstance(model, _WrappedModel):\n return model\n return _WrappedModel(\n model, self.timestep_map, self.rescale_timesteps, self.original_num_steps\n )\n\n def _scale_timesteps(self, t):\n # Scaling is done by the wrapped model.\n return t" }, { "identifier": "space_timesteps", "path": "mdm/diffusion/respace.py", "snippet": "def space_timesteps(num_timesteps, section_counts):\n \"\"\"\n Create a list of timesteps to use from an original diffusion process,\n given the number of timesteps we want to take from equally-sized portions\n of the original process.\n\n For example, if there's 300 timesteps and the section counts are [10,15,20]\n then the first 100 timesteps are strided to be 10 timesteps, the second 100\n are strided to be 15 timesteps, and the final 100 are strided to be 20.\n\n If the stride is a string starting with \"ddim\", then the fixed striding\n from the DDIM paper is used, and only one section is allowed.\n\n :param num_timesteps: the number of diffusion steps in the original\n process to divide up.\n :param section_counts: either a list of numbers, or a string containing\n comma-separated numbers, indicating the step count\n per section. As a special case, use \"ddimN\" where N\n is a number of steps to use the striding from the\n DDIM paper.\n :return: a set of diffusion steps from the original process to use.\n \"\"\"\n if isinstance(section_counts, str):\n if section_counts.startswith(\"ddim\"):\n desired_count = int(section_counts[len(\"ddim\") :])\n for i in range(1, num_timesteps):\n if len(range(0, num_timesteps, i)) == desired_count:\n return set(range(0, num_timesteps, i))\n raise ValueError(\n f\"cannot create exactly {num_timesteps} steps with an integer stride\"\n )\n section_counts = [int(x) for x in section_counts.split(\",\")]\n size_per = num_timesteps // len(section_counts)\n extra = num_timesteps % len(section_counts)\n start_idx = 0\n all_steps = []\n for i, section_count in enumerate(section_counts):\n size = size_per + (1 if i < extra else 0)\n if size < section_count:\n raise ValueError(\n f\"cannot divide section of {size} steps into {section_count}\"\n )\n if section_count <= 1:\n frac_stride = 1\n else:\n frac_stride = (size - 1) / (section_count - 1)\n cur_idx = 0.0\n taken_steps = []\n for _ in range(section_count):\n taken_steps.append(start_idx + round(cur_idx))\n cur_idx += frac_stride\n all_steps += taken_steps\n start_idx += size\n return set(all_steps)" }, { "identifier": "InpaintingGaussianDiffusion", "path": "mdm/diffusion/respace.py", "snippet": "class InpaintingGaussianDiffusion(SpacedDiffusion):\n def q_sample(self, x_start, t, noise=None, model_kwargs=None):\n \"\"\"\n overrides q_sample to use the inpainting mask\n \n same usage as in GaussianDiffusion\n \"\"\"\n if noise is None:\n noise = th.randn_like(x_start)\n assert noise.shape == x_start.shape\n\n bs, feat, _, frames = noise.shape\n noise *= 1. - model_kwargs['y']['inpainting_mask']\n\n return (\n _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start\n + _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)\n * noise\n )\n \n def p_sample(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n const_noise=False,\n ):\n \"\"\"\n overrides p_sample to use the inpainting mask\n \n same usage as in GaussianDiffusion\n \"\"\"\n out = self.p_mean_variance(\n model,\n x,\n t,\n clip_denoised=clip_denoised,\n denoised_fn=denoised_fn,\n model_kwargs=model_kwargs,\n )\n noise = th.randn_like(x)\n if const_noise:\n noise = noise[[0]].repeat(x.shape[0], 1, 1, 1)\n noise *= 1. - model_kwargs['y']['inpainting_mask']\n\n nonzero_mask = (\n (t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))\n ) # no noise when t == 0\n if cond_fn is not None:\n out[\"mean\"] = self.condition_mean(\n cond_fn, out, x, t, model_kwargs=model_kwargs\n )\n sample = out[\"mean\"] + nonzero_mask * th.exp(0.5 * out[\"log_variance\"]) * noise\n return {\"sample\": sample, \"pred_xstart\": out[\"pred_xstart\"]}" }, { "identifier": "TRT_MDM", "path": "mdm/model/trt_model.py", "snippet": "class TRT_MDM(nn.Module):\n def __init__(self, mode, json_dict, device=\"cuda\"):\n super(TRT_MDM, self).__init__()\n self.device = device\n self.json_dict = json_dict\n self.clip_model = DynamicModel(self.json_dict[f\"{mode}2\"], self.device)\n self.decoder = DynamicModel(self.json_dict[f\"{mode}1\"], self.device)\n self.num_frames = 196\n self.njoints = 269\n self.nfeats = 1\n self.condition_length = 77\n\n def mask_cond(self, cond, force_mask=False):\n bs = cond.shape[0]\n if force_mask:\n return torch.zeros_like(cond)\n else:\n return cond\n\n def clip_text_embedding(self, raw_text):\n default_context_length = self.condition_length\n texts = clip.tokenize(raw_text, context_length=default_context_length, truncate=True) # [bs, context_length] # if n_tokens > context_length -> will truncate\n texts = texts.to(self.device)\n\n if len(self.clip_model.inshape) == 0 or self.clip_model.inshape[0] != texts.shape:\n self.clip_model.set_shape([[*texts.shape]], [[texts.shape[0], self.condition_length+1, 512]]) \n\n clip_feature = self.clip_model(texts)\n return clip_feature\n\n @torch.no_grad()\n def forward(self, x, timesteps, y=None): \n force_mask = y.get('uncond', False)\n txt_emb = self.clip_text_embedding(y['text']) ### MASK_COND 会按照一定的比例把 batch_size 中的一部分文本句整句换成 [0, 0, ... 0]\n txt_emb = self.mask_cond(txt_emb, force_mask=force_mask)\n \n if len(txt_emb.shape) == 3:\n txt_emb = txt_emb.permute(1, 0, 2)\n else:\n txt_emb = txt_emb.unsqueeze(0)\n\n real_frame = x.shape[-1]\n if real_frame < self.num_frames:\n extension = torch.zeros([x.shape[0], x.shape[1], x.shape[2], self.num_frames - x.shape[-1]], device=x.device, dtype=x.dtype)\n x = torch.cat([x, extension], dim=-1)\n\n if len(self.decoder.inshape) == 0 or self.decoder.inshape[0] != x.shape:\n self.decoder.set_shape([[*x.shape], [*timesteps.shape], [*txt_emb.shape]], [[*x.shape]])\n\n output = self.decoder([x, timesteps, txt_emb])\n output = output[:, :, :, :real_frame]\n\n return {\"output\":output}" } ]
from mdm.model.mdm import MDM from mdm.diffusion import gaussian_diffusion as gd from mdm.diffusion.respace import SpacedDiffusion, space_timesteps, InpaintingGaussianDiffusion from mdm.model.trt_model import TRT_MDM
7,589
def load_model_wo_clip(model, state_dict): print("load model checkpoints without clip") try: new_state_dict = {} for key, value in state_dict.items(): if "in_proj" in key: keyq = key.replace("in_proj_weight", "wq.weight") keyk = key.replace("in_proj_weight", "wk.weight") keyv = key.replace("in_proj_weight", "wv.weight") inshape = value.shape[0] // 3 valueq = value[:inshape] valuek = value[inshape:inshape * 2] valuev = value[inshape * 2:] new_state_dict[keyq] = valueq new_state_dict[keyk] = valuek new_state_dict[keyv] = valuev elif "out_proj" in key: newkey = key.replace("out_proj", "wo") new_state_dict[newkey] = value else: new_state_dict[key] = value missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=False) except: missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) print(unexpected_keys) other_miss = [] for key in missing_keys: if not key.startswith('clip_model.'): other_miss.append(key) print(other_miss) assert all([k.startswith('clip_model.') for k in missing_keys]) def load_ft_model_wo_clip(model, state_dict): print("load model checkpoints without clip") missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) print(unexpected_keys) # for name, value in model.named_parameters(): # if "seqTransEncoder" in name and "self_attn" in name: # value.requires_grad = False # if name.startswith("code_full") or name.startswith("encode_compress") or name.startswith("input_process"): # value.requires_grad = False assert all([k.startswith('clip_pose_encoder.') for k in unexpected_keys]) # assert all([k.startswith('clip_model.') or k.startswith('clip_pose_encoder.') or k.startswith('embed_text.') for k in missing_keys]) def create_model_and_diffusion(args, mode="text", json_dict=None): model = MDM(**get_model_args(args), json_dict=json_dict) diffusion = create_gaussian_diffusion(args, mode) return model, diffusion def create_trt_model(args, model, mode="text", json_dict=None, device="cuda"): model = TRT_MDM(model, json_dict, device=device) diffusion = create_gaussian_diffusion(args, mode) return model, diffusion def get_model_args(args): # default args clip_version = 'ViT-B/32' if args.unconstrained: cond_mode = 'no_cond' elif args.dataset in ['kit', 'humanml']: cond_mode = "text" activation = args.trans_activate if args.arch != "trans_enc" else "gelu" if args.dataset == 'humanml': njoints = 263 nfeats = 1 elif args.dataset == 'kit': njoints = 251 nfeats = 1 if args.rep == "smr": njoints += 6 nfeats = 1 return {'njoints': njoints, 'nfeats': nfeats, 'latent_dim': args.latent_dim, 'ff_size': args.ff_size, 'num_layers': args.layers, 'num_heads': args.heads, 'dropout': 0.1, 'activation': activation, 'cond_mode': cond_mode, 'cond_mask_prob': args.cond_mask_prob, 'arch': args.arch, 'clip_version': clip_version, 'dataset': args.dataset, "local":args.local, "encode_full":args.encode_full, "txt_tokens":args.txt_tokens, "dataset_path":args.dataset_path, "num_frames":args.num_frames, "conv_bias":args.conv_bias, "conv_activate":args.conv_activate, "conv_norm":args.conv_norm} def create_gaussian_diffusion(args, mode="text"): # default params predict_xstart = True # we always predict x_start (a.k.a. x0), that's our deal! steps = 1000 scale_beta = 1. # no scaling timestep_respacing = '' # can be used for ddim sampling, we don't use it. learn_sigma = False rescale_timesteps = False betas = gd.get_named_beta_schedule(args.noise_schedule, steps, scale_beta) loss_type = gd.LossType.MSE if not timestep_respacing: timestep_respacing = [steps] if mode is not None and (mode.startswith("finetune_control") or mode == "control_length"): print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> inpainting diffusion model") diffusion = InpaintingGaussianDiffusion else: print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> SpacedDiffusion")
def load_model_wo_clip(model, state_dict): print("load model checkpoints without clip") try: new_state_dict = {} for key, value in state_dict.items(): if "in_proj" in key: keyq = key.replace("in_proj_weight", "wq.weight") keyk = key.replace("in_proj_weight", "wk.weight") keyv = key.replace("in_proj_weight", "wv.weight") inshape = value.shape[0] // 3 valueq = value[:inshape] valuek = value[inshape:inshape * 2] valuev = value[inshape * 2:] new_state_dict[keyq] = valueq new_state_dict[keyk] = valuek new_state_dict[keyv] = valuev elif "out_proj" in key: newkey = key.replace("out_proj", "wo") new_state_dict[newkey] = value else: new_state_dict[key] = value missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=False) except: missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) print(unexpected_keys) other_miss = [] for key in missing_keys: if not key.startswith('clip_model.'): other_miss.append(key) print(other_miss) assert all([k.startswith('clip_model.') for k in missing_keys]) def load_ft_model_wo_clip(model, state_dict): print("load model checkpoints without clip") missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) print(unexpected_keys) # for name, value in model.named_parameters(): # if "seqTransEncoder" in name and "self_attn" in name: # value.requires_grad = False # if name.startswith("code_full") or name.startswith("encode_compress") or name.startswith("input_process"): # value.requires_grad = False assert all([k.startswith('clip_pose_encoder.') for k in unexpected_keys]) # assert all([k.startswith('clip_model.') or k.startswith('clip_pose_encoder.') or k.startswith('embed_text.') for k in missing_keys]) def create_model_and_diffusion(args, mode="text", json_dict=None): model = MDM(**get_model_args(args), json_dict=json_dict) diffusion = create_gaussian_diffusion(args, mode) return model, diffusion def create_trt_model(args, model, mode="text", json_dict=None, device="cuda"): model = TRT_MDM(model, json_dict, device=device) diffusion = create_gaussian_diffusion(args, mode) return model, diffusion def get_model_args(args): # default args clip_version = 'ViT-B/32' if args.unconstrained: cond_mode = 'no_cond' elif args.dataset in ['kit', 'humanml']: cond_mode = "text" activation = args.trans_activate if args.arch != "trans_enc" else "gelu" if args.dataset == 'humanml': njoints = 263 nfeats = 1 elif args.dataset == 'kit': njoints = 251 nfeats = 1 if args.rep == "smr": njoints += 6 nfeats = 1 return {'njoints': njoints, 'nfeats': nfeats, 'latent_dim': args.latent_dim, 'ff_size': args.ff_size, 'num_layers': args.layers, 'num_heads': args.heads, 'dropout': 0.1, 'activation': activation, 'cond_mode': cond_mode, 'cond_mask_prob': args.cond_mask_prob, 'arch': args.arch, 'clip_version': clip_version, 'dataset': args.dataset, "local":args.local, "encode_full":args.encode_full, "txt_tokens":args.txt_tokens, "dataset_path":args.dataset_path, "num_frames":args.num_frames, "conv_bias":args.conv_bias, "conv_activate":args.conv_activate, "conv_norm":args.conv_norm} def create_gaussian_diffusion(args, mode="text"): # default params predict_xstart = True # we always predict x_start (a.k.a. x0), that's our deal! steps = 1000 scale_beta = 1. # no scaling timestep_respacing = '' # can be used for ddim sampling, we don't use it. learn_sigma = False rescale_timesteps = False betas = gd.get_named_beta_schedule(args.noise_schedule, steps, scale_beta) loss_type = gd.LossType.MSE if not timestep_respacing: timestep_respacing = [steps] if mode is not None and (mode.startswith("finetune_control") or mode == "control_length"): print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> inpainting diffusion model") diffusion = InpaintingGaussianDiffusion else: print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> SpacedDiffusion")
diffusion = SpacedDiffusion
2
2023-10-20 14:53:26+00:00
12k
pythonlessons/FinRock
experiments/testing_ppo_sinusoid.py
[ { "identifier": "PdDataFeeder", "path": "finrock/data_feeder.py", "snippet": "class PdDataFeeder:\n def __init__(\n self, \n df: pd.DataFrame,\n indicators: list = [],\n min: float = None,\n max: float = None\n ) -> None:\n self._df = df\n self._min = min\n self._max = max\n self._indicators = indicators\n self._cache = {}\n\n assert isinstance(self._df, pd.DataFrame) == True, \"df must be a pandas.DataFrame\"\n assert 'timestamp' in self._df.columns, \"df must have 'timestamp' column\"\n assert 'open' in self._df.columns, \"df must have 'open' column\"\n assert 'high' in self._df.columns, \"df must have 'high' column\"\n assert 'low' in self._df.columns, \"df must have 'low' column\"\n assert 'close' in self._df.columns, \"df must have 'close' column\"\n\n assert isinstance(self._indicators, list) == True, \"indicators must be an iterable\"\n assert all(isinstance(indicator, Indicator) for indicator in self._indicators) == True, \"indicators must be a list of Indicator objects\"\n\n @property\n def min(self) -> float:\n return self._min or self._df['low'].min()\n \n @property\n def max(self) -> float:\n return self._max or self._df['high'].max()\n\n def __len__(self) -> int:\n return len(self._df)\n \n def __getitem__(self, idx: int, args=None) -> State:\n # Use cache to speed up training\n if idx in self._cache:\n return self._cache[idx]\n\n indicators = []\n for indicator in self._indicators:\n results = indicator(idx)\n if results is None:\n self._cache[idx] = None\n return None\n \n indicators.append(results)\n\n data = self._df.iloc[idx]\n state = State(\n timestamp=data['timestamp'],\n open=data['open'],\n high=data['high'],\n low=data['low'],\n close=data['close'],\n volume=data.get('volume', 0.0),\n indicators=indicators\n )\n self._cache[idx] = state\n\n return state\n \n def __iter__(self) -> State:\n \"\"\" Create a generator that iterate over the Sequence.\"\"\"\n for index in range(len(self)):\n yield self[index]" }, { "identifier": "TradingEnv", "path": "finrock/trading_env.py", "snippet": "class TradingEnv:\n def __init__(\n self,\n data_feeder: PdDataFeeder,\n output_transformer: typing.Callable = None,\n initial_balance: float = 1000.0,\n max_episode_steps: int = None,\n window_size: int = 50,\n reward_function: typing.Callable = simpleReward,\n metrics: typing.List[typing.Callable] = []\n ) -> None:\n self._data_feeder = data_feeder\n self._output_transformer = output_transformer\n self._initial_balance = initial_balance\n self._max_episode_steps = max_episode_steps if max_episode_steps is not None else len(data_feeder)\n self._window_size = window_size\n self._reward_function = reward_function\n self._metrics = metrics\n\n self._observations = Observations(window_size=window_size)\n self._observation_space = np.zeros(self.reset()[0].shape)\n self.action_space = 3\n\n @property\n def observation_space(self):\n return self._observation_space\n\n def _get_obs(self, index: int, balance: float=None) -> State:\n next_state = self._data_feeder[index]\n if next_state is None:\n return None\n\n if balance is not None:\n next_state.balance = balance\n\n return next_state\n \n def _get_terminated(self):\n return False\n \n def _take_action(self, action: int, order_size: float) -> typing.Tuple[int, float]:\n # validate action is in range\n assert (action in list(range(self.action_space))) == True, f'action must be in range {self.action_space}, received: {action}'\n\n # get last state and next state\n last_state, next_state = self._observations[-2:]\n\n # modify action to hold (0) if we are out of balance\n if action == 2 and last_state.allocation_percentage == 1.0:\n action = 0\n\n # modify action to hold (0) if we are out of assets\n elif action == 1 and last_state.allocation_percentage == 0.0:\n action = 0\n\n if action == 2: # buy\n next_state.allocation_percentage = order_size\n next_state.assets = last_state.balance * order_size / last_state.close\n next_state.balance = last_state.balance - (last_state.balance * order_size)\n\n elif action == 1: # sell\n next_state.allocation_percentage = 0.0\n next_state.balance = last_state.assets * order_size * last_state.close\n next_state.assets = 0.0\n\n else: # hold\n next_state.allocation_percentage = last_state.allocation_percentage\n next_state.assets = last_state.assets\n next_state.balance = last_state.balance\n\n return action, order_size\n \n @property\n def metrics(self):\n return self._metrics\n\n def _metricsHandler(self, observation: State):\n metrics = {}\n # Loop through metrics and update\n for metric in self._metrics:\n metric.update(observation)\n metrics[metric.name] = metric.result\n\n return metrics\n\n def step(self, action: int) -> typing.Tuple[State, float, bool, bool, dict]:\n\n index = self._env_step_indexes.pop(0)\n\n observation = self._get_obs(index)\n # update observations object with new observation\n self._observations.append(observation)\n\n order_size = 1.0\n action, order_size = self._take_action(action, order_size)\n reward = self._reward_function(self._observations)\n terminated = self._get_terminated()\n truncated = False if self._env_step_indexes else True\n info = {\n \"states\": [observation],\n \"metrics\": self._metricsHandler(observation)\n }\n\n transformed_obs = self._output_transformer.transform(self._observations)\n\n if np.isnan(transformed_obs).any():\n raise ValueError(\"transformed_obs contains nan values, check your data\")\n\n return transformed_obs, reward, terminated, truncated, info\n\n def reset(self) -> typing.Tuple[State, dict]:\n \"\"\" Reset the environment and return the initial state\n \"\"\"\n size = len(self._data_feeder) - self._max_episode_steps\n self._env_start_index = np.random.randint(0, size) if size > 0 else 0\n self._env_step_indexes = list(range(self._env_start_index, self._env_start_index + self._max_episode_steps))\n\n # Initial observations are the first states of the window size\n self._observations.reset()\n while not self._observations.full:\n obs = self._get_obs(self._env_step_indexes.pop(0), balance=self._initial_balance)\n if obs is None:\n continue\n # update observations object with new observation\n self._observations.append(obs)\n\n info = {\n \"states\": self._observations.observations,\n \"metrics\": {}\n }\n \n # reset metrics with last state\n for metric in self._metrics:\n metric.reset(self._observations.observations[-1])\n\n transformed_obs = self._output_transformer.transform(self._observations)\n if np.isnan(transformed_obs).any():\n raise ValueError(\"transformed_obs contains nan values, check your data\")\n \n # return state and info\n return transformed_obs, info\n\n def render(self):\n raise NotImplementedError\n\n def close(self):\n raise NotImplementedError" }, { "identifier": "PygameRender", "path": "finrock/render.py", "snippet": "class PygameRender:\n def __init__(\n self,\n window_size: int=100,\n screen_width: int=1440,\n screen_height: int=1080,\n top_offset: int=25,\n bottom_offset: int=25,\n candle_spacing: int=1,\n color_theme = ColorTheme(),\n frame_rate: int=30,\n render_balance: bool=True,\n ):\n # pygame window settings\n self.screen_width = screen_width\n self.screen_height = screen_height\n self.top_offset = top_offset\n self.bottom_offset = bottom_offset\n self.candle_spacing = candle_spacing\n self.window_size = window_size\n self.color_theme = color_theme\n self.frame_rate = frame_rate\n self.render_balance = render_balance\n\n self.mainWindow = MainWindow(\n width=self.screen_width,\n height=self.screen_height,\n top_offset=self.top_offset,\n bottom_offset=self.bottom_offset,\n window_size=self.window_size,\n candle_spacing=self.candle_spacing,\n font_ratio=self.color_theme.font_ratio\n )\n\n self._states = []\n\n try:\n import pygame\n self.pygame = pygame\n except ImportError:\n raise ImportError('Please install pygame (pip install pygame)')\n \n self.pygame.init()\n self.pygame.display.init()\n self.window = self.pygame.display.set_mode(self.mainWindow.screen_shape, self.pygame.RESIZABLE)\n self.clock = self.pygame.time.Clock()\n\n def reset(self):\n self._states = []\n \n def _prerender(func):\n \"\"\" Decorator for input data validation and pygame window rendering\"\"\"\n def wrapper(self, info: dict, rgb_array: bool=False):\n self._states += info.get('states', [])\n\n if not self._states or not bool(self.window._pixels_address):\n return\n\n for event in self.pygame.event.get():\n if event.type == self.pygame.QUIT:\n self.pygame.quit()\n return\n\n if event.type == self.pygame.VIDEORESIZE:\n self.mainWindow.screen_shape = (event.w, event.h)\n\n # pause if spacebar is pressed\n if event.type == self.pygame.KEYDOWN:\n if event.key == self.pygame.K_SPACE:\n print('Paused')\n while True:\n event = self.pygame.event.wait()\n if event.type == self.pygame.KEYDOWN:\n if event.key == self.pygame.K_SPACE:\n print('Unpaused')\n break\n if event.type == self.pygame.QUIT:\n self.pygame.quit()\n return\n \n self.mainWindow.screen_shape = self.pygame.display.get_surface().get_size()\n\n\n canvas = func(self, info)\n canvas = self.pygame.transform.scale(canvas, self.mainWindow.screen_shape)\n # The following line copies our drawings from `canvas` to the visible window\n self.window.blit(canvas, canvas.get_rect())\n self.pygame.display.update()\n self.clock.tick(self.frame_rate)\n\n if rgb_array:\n return self.pygame.surfarray.array3d(canvas)\n\n return wrapper\n \n def render_indicators(self, state: State, canvas: object, candle_offset: int, max_low: float, max_high: float):\n # connect last 2 points with a line\n for i, indicator in enumerate(state.indicators):\n for name, render_option in indicator[\"render_options\"].items():\n\n index = self._states.index(state)\n if not index:\n return\n last_state = self._states[index - 1]\n\n if render_option.render_type == RenderType.LINE:\n prev_render_option = last_state.indicators[i][\"render_options\"][name]\n if render_option.window_type == WindowType.MAIN:\n\n cur_value_map = self.mainWindow.map_price_to_window(render_option.value, max_low, max_high)\n prev_value_map = self.mainWindow.map_price_to_window(prev_render_option.value, max_low, max_high)\n\n elif render_option.window_type == WindowType.SEPERATE:\n\n cur_value_map = self.mainWindow.map_to_seperate_window(render_option.value, render_option.min, render_option.max)\n prev_value_map = self.mainWindow.map_to_seperate_window(prev_render_option.value, prev_render_option.min, prev_render_option.max)\n\n self.pygame.draw.line(canvas, render_option.color, \n (candle_offset - self.mainWindow.candle_width / 2, prev_value_map), \n (candle_offset + self.mainWindow.candle_width / 2, cur_value_map))\n \n elif render_option.render_type == RenderType.DOT:\n if render_option.window_type == WindowType.MAIN:\n self.pygame.draw.circle(canvas, render_option.color,\n (candle_offset, self.mainWindow.map_price_to_window(render_option.value, max_low, max_high)), 2)\n elif render_option.window == WindowType.SEPERATE:\n raise NotImplementedError('Seperate window for indicators is not implemented yet')\n \n def render_candle(self, state: State, canvas: object, candle_offset: int, max_low: float, max_high: float, font: object):\n assert isinstance(state, State) == True # check if state is a State object\n\n # Calculate candle coordinates\n candle_y_open = self.mainWindow.map_price_to_window(state.open, max_low, max_high)\n candle_y_close = self.mainWindow.map_price_to_window(state.close, max_low, max_high)\n candle_y_high = self.mainWindow.map_price_to_window(state.high, max_low, max_high)\n candle_y_low = self.mainWindow.map_price_to_window(state.low, max_low, max_high)\n\n # Determine candle color\n if state.open < state.close:\n # up candle\n candle_color = self.color_theme.up_candle\n candle_body_y = candle_y_close\n candle_body_height = candle_y_open - candle_y_close\n else:\n # down candle\n candle_color = self.color_theme.down_candle\n candle_body_y = candle_y_open\n candle_body_height = candle_y_close - candle_y_open\n\n # Draw candlestick wicks\n self.pygame.draw.line(canvas, self.color_theme.wick, \n (candle_offset + self.mainWindow.candle_width // 2, candle_y_high), \n (candle_offset + self.mainWindow.candle_width // 2, candle_y_low))\n\n # Draw candlestick body\n self.pygame.draw.rect(canvas, candle_color, (candle_offset, candle_body_y, self.mainWindow.candle_width, candle_body_height))\n\n # Compare with previous state to determine whether buy or sell action was taken and draw arrow\n index = self._states.index(state)\n if index > 0:\n last_state = self._states[index - 1]\n\n if last_state.allocation_percentage < state.allocation_percentage:\n # buy\n candle_y_low = self.mainWindow.map_price_to_window(last_state.low, max_low, max_high)\n self.pygame.draw.polygon(canvas, self.color_theme.buy, [\n (candle_offset - self.mainWindow.candle_width / 2, candle_y_low + self.mainWindow.spacing / 2), \n (candle_offset - self.mainWindow.candle_width * 0.1, candle_y_low + self.mainWindow.spacing), \n (candle_offset - self.mainWindow.candle_width * 0.9, candle_y_low + self.mainWindow.spacing)\n ])\n \n # add account_value label bellow candle\n if self.render_balance:\n text = str(int(last_state.account_value))\n buy_label = font.render(text, True, self.color_theme.text)\n label_width, label_height = font.size(text)\n canvas.blit(buy_label, (candle_offset - (self.mainWindow.candle_width + label_width) / 2, candle_y_low + self.mainWindow.spacing))\n\n elif last_state.allocation_percentage > state.allocation_percentage:\n # sell\n candle_y_high = self.mainWindow.map_price_to_window(last_state.high, max_low, max_high)\n self.pygame.draw.polygon(canvas, self.color_theme.sell, [\n (candle_offset - self.mainWindow.candle_width / 2, candle_y_high - self.mainWindow.spacing / 2), \n (candle_offset - self.mainWindow.candle_width * 0.1, candle_y_high - self.mainWindow.spacing), \n (candle_offset - self.mainWindow.candle_width * 0.9, candle_y_high - self.mainWindow.spacing)\n ])\n\n # add account_value label above candle\n if self.render_balance:\n text = str(int(last_state.account_value))\n sell_label = font.render(text, True, self.color_theme.text)\n label_width, label_height = font.size(text)\n canvas.blit(sell_label, (candle_offset - (self.mainWindow.candle_width + label_width) / 2, candle_y_high - self.mainWindow.spacing - label_height))\n\n @_prerender\n def render(self, info: dict):\n canvas = self.pygame.Surface(self.mainWindow.screen_shape)\n canvas.fill(self.color_theme.background)\n \n max_high = max([state.high for state in self._states[-self.window_size:]])\n max_low = min([state.low for state in self._states[-self.window_size:]])\n\n candle_offset = self.candle_spacing\n\n # Set font for labels\n font = self.pygame.font.SysFont(self.color_theme.font, self.mainWindow.font_size)\n\n for state in self._states[-self.window_size:]:\n\n # draw indicators\n self.render_indicators(state, canvas, candle_offset, max_low, max_high)\n\n # draw candle\n self.render_candle(state, canvas, candle_offset, max_low, max_high, font)\n\n # Move to the next candle\n candle_offset += self.mainWindow.candle_width + self.candle_spacing\n\n # Draw max and min ohlc values on the chart\n label_width, label_height = font.size(str(max_low))\n label_y_low = font.render(str(max_low), True, self.color_theme.text)\n canvas.blit(label_y_low, (self.candle_spacing + 5, self.mainWindow.height - label_height * 2))\n\n label_width, label_height = font.size(str(max_low))\n label_y_high = font.render(str(max_high), True, self.color_theme.text)\n canvas.blit(label_y_high, (self.candle_spacing + 5, label_height))\n\n return canvas" }, { "identifier": "MinMaxScaler", "path": "finrock/scalers.py", "snippet": "class MinMaxScaler:\n def __init__(self, min: float, max: float):\n self._min = min\n self._max = max\n \n def transform(self, observations: Observations) -> np.ndarray:\n\n assert isinstance(observations, Observations) == True, \"observations must be an instance of Observations\"\n\n transformed_data = []\n for state in observations:\n data = []\n for name in ['open', 'high', 'low', 'close']:\n value = getattr(state, name)\n transformed_value = (value - self._min) / (self._max - self._min)\n data.append(transformed_value)\n \n data.append(state.allocation_percentage)\n\n # append scaled indicators\n for indicator in state.indicators:\n for value in indicator[\"values\"].values():\n transformed_value = (value - indicator[\"min\"]) / (indicator[\"max\"] - indicator[\"min\"])\n data.append(transformed_value)\n\n transformed_data.append(data)\n\n return np.array(transformed_data)\n \n def __call__(self, observations) -> np.ndarray:\n return self.transform(observations)" }, { "identifier": "simpleReward", "path": "finrock/reward.py", "snippet": "def simpleReward(observations: Observations) -> float:\n \n assert isinstance(observations, Observations) == True, \"observations must be an instance of Observations\"\n\n last_state, next_state = observations[-2:]\n\n # buy\n if next_state.allocation_percentage > last_state.allocation_percentage:\n # check whether it was good or bad to buy\n order_size = next_state.allocation_percentage - last_state.allocation_percentage\n reward = (next_state.close - last_state.close) / last_state.close * order_size\n\n # sell\n elif next_state.allocation_percentage < last_state.allocation_percentage:\n # check whether it was good or bad to sell\n order_size = last_state.allocation_percentage - next_state.allocation_percentage\n reward = -1 * (next_state.close - last_state.close) / last_state.close * order_size\n\n # hold\n else:\n # check whether it was good or bad to hold\n ratio = -1 if not last_state.allocation_percentage else last_state.allocation_percentage\n reward = (next_state.close - last_state.close) / last_state.close * ratio\n \n return reward" }, { "identifier": "DifferentActions", "path": "finrock/metrics.py", "snippet": "class DifferentActions(Metric):\n def __init__(self, name: str=\"different_actions\") -> None:\n super().__init__(name=name)\n\n def update(self, state: State):\n super().update(state)\n\n if not self.prev_state:\n self.prev_state = state\n else:\n if state.allocation_percentage != self.prev_state.allocation_percentage:\n self.different_actions += 1\n\n self.prev_state = state\n\n @property\n def result(self):\n return self.different_actions\n \n def reset(self, prev_state: State=None):\n super().reset(prev_state)\n\n self.prev_state = prev_state\n self.different_actions = 0" }, { "identifier": "AccountValue", "path": "finrock/metrics.py", "snippet": "class AccountValue(Metric):\n def __init__(self, name: str=\"account_value\") -> None:\n super().__init__(name=name)\n\n def update(self, state: State):\n super().update(state)\n\n self.account_value = state.account_value\n\n @property\n def result(self):\n return self.account_value\n \n def reset(self, prev_state: State=None):\n super().reset(prev_state)\n \n self.account_value = prev_state.account_value if prev_state else 0.0" }, { "identifier": "MaxDrawdown", "path": "finrock/metrics.py", "snippet": "class MaxDrawdown(Metric):\n \"\"\" The Maximum Drawdown (MDD) is a measure of the largest peak-to-trough decline in the \n value of a portfolio or investment during a specific period\n\n The Maximum Drawdown Ratio represents the proportion of the peak value that was lost during \n the largest decline. It is a measure of the risk associated with a particular investment or \n portfolio. Investors and fund managers use the Maximum Drawdown and its ratio to assess the \n historical downside risk and potential losses that could be incurred.\n \"\"\"\n def __init__(self, name: str=\"max_drawdown\") -> None:\n super().__init__(name=name)\n\n def update(self, state: State):\n super().update(state)\n\n # Use min to find the trough value\n self.max_account_value = max(self.max_account_value, state.account_value)\n\n # Calculate drawdown\n drawdown = (state.account_value - self.max_account_value) / self.max_account_value\n\n # Update max drawdown if the current drawdown is greater\n self.max_drawdown = min(self.max_drawdown, drawdown)\n\n @property\n def result(self):\n return self.max_drawdown\n \n def reset(self, prev_state: State=None):\n super().reset(prev_state)\n\n self.max_account_value = prev_state.account_value if prev_state else 0.0\n self.max_drawdown = 0.0" }, { "identifier": "SharpeRatio", "path": "finrock/metrics.py", "snippet": "class SharpeRatio(Metric):\n \"\"\" The Sharpe Ratio, is a measure of the risk-adjusted performance of an investment or a portfolio. \n It helps investors evaluate the return of an investment relative to its risk.\n\n A higher Sharpe Ratio indicates a better risk-adjusted performance. Investors and portfolio managers \n often use the Sharpe Ratio to compare the risk-adjusted returns of different investments or portfolios. \n It allows them to assess whether the additional return earned by taking on additional risk is justified.\n \"\"\"\n def __init__(self, ratio_days=365.25, name: str='sharpe_ratio'):\n self.ratio_days = ratio_days\n super().__init__(name=name)\n\n def update(self, state: State):\n super().update(state)\n time_difference_days = (state.date - self.prev_state.date).days\n if time_difference_days >= 1:\n self.daily_returns.append((state.account_value - self.prev_state.account_value) / self.prev_state.account_value)\n self.account_values.append(state.account_value)\n self.prev_state = state\n \n @property\n def result(self):\n if len(self.daily_returns) == 0:\n return 0.0\n\n mean = np.mean(self.daily_returns)\n std = np.std(self.daily_returns)\n if std == 0:\n return 0.0\n \n sharpe_ratio = mean / std * np.sqrt(self.ratio_days)\n \n return sharpe_ratio\n \n def reset(self, prev_state: State=None):\n super().reset(prev_state)\n self.prev_state = prev_state\n self.account_values = []\n self.daily_returns = []" }, { "identifier": "BolingerBands", "path": "finrock/indicators.py", "snippet": "class BolingerBands(Indicator):\n \"\"\" Volatility indicator\n\n Bollinger Bands are a type of price envelope developed by John BollingerOpens in a new window. (Price envelopes define \n upper and lower price range levels.) Bollinger Bands are envelopes plotted at a standard deviation level above and \n below a simple moving average of the price. Because the distance of the bands is based on standard deviation, they \n adjust to volatility swings in the underlying price.\n\n Bollinger Bands use 2 parameters, Period and Standard Deviations, StdDev. The default values are 20 for period, and 2 \n for standard deviations, although you may customize the combinations.\n\n Bollinger bands help determine whether prices are high or low on a relative basis. They are used in pairs, both upper\n and lower bands and in conjunction with a moving average. Further, the pair of bands is not intended to be used on its own. \n Use the pair to confirm signals given with other indicators.\n \"\"\"\n def __init__(\n self, \n data: pd.DataFrame, \n period: int=20, \n std: int=2,\n target_column: str='close',\n render_options: dict={}\n ):\n self._period = period\n self._std = std\n self._names = ['SMA', 'BB_up', 'BB_dn']\n super().__init__(data, target_column, render_options)\n\n @property\n def min(self):\n return self._data['BB_dn'].min()\n \n @property\n def max(self):\n return self._data['BB_up'].max()\n\n def compute(self):\n self._data['SMA'] = self._data[self.target_column].rolling(self._period).mean()\n self._data['BB_up'] = self._data['SMA'] + self._data[self.target_column].rolling(self._period).std() * self._std\n self._data['BB_dn'] = self._data['SMA'] - self._data[self.target_column].rolling(self._period).std() * self._std\n\n def default_render_options(self):\n return {name: RenderOptions(\n name=name,\n color=(100, 100, 255),\n window_type=WindowType.MAIN,\n render_type=RenderType.LINE,\n min=self.min,\n max=self.max\n ) for name in self._names}" }, { "identifier": "RSI", "path": "finrock/indicators.py", "snippet": "class RSI(Indicator):\n \"\"\" Momentum indicator\n\n The Relative Strength Index (RSI), developed by J. Welles Wilder, is a momentum oscillator that measures the speed and \n change of price movements. The RSI oscillates between zero and 100. Traditionally the RSI is considered overbought when \n above 70 and oversold when below 30. Signals can be generated by looking for divergences and failure swings. \n RSI can also be used to identify the general trend.\n \"\"\"\n def __init__(\n self, \n data: pd.DataFrame, \n period: int=14, \n target_column: str='close',\n render_options: dict={}\n ):\n self._period = period\n self._names = ['RSI']\n super().__init__(data, target_column, render_options)\n\n @property\n def min(self):\n return 0.0\n \n @property\n def max(self):\n return 100.0\n\n def compute(self):\n delta = self._data[self.target_column].diff()\n up = delta.clip(lower=0)\n down = -1 * delta.clip(upper=0)\n ema_up = up.ewm(com=self._period-1, adjust=True, min_periods=self._period).mean()\n ema_down = down.ewm(com=self._period-1, adjust=True, min_periods=self._period).mean()\n rs = ema_up / ema_down\n self._data['RSI'] = 100 - (100 / (1 + rs))\n\n def default_render_options(self):\n custom_options = {\n \"RSI0\": 0,\n \"RSI30\": 30,\n \"RSI70\": 70,\n \"RSI100\": 100\n }\n options = {name: RenderOptions(\n name=name,\n color=(100, 100, 255),\n window_type=WindowType.SEPERATE,\n render_type=RenderType.LINE,\n min=self.min,\n max=self.max\n ) for name in self._names}\n\n for name, value in custom_options.items():\n options[name] = RenderOptions(\n name=name,\n color=(192, 192, 192),\n window_type=WindowType.SEPERATE,\n render_type=RenderType.LINE,\n min=self.min,\n max=self.max,\n value=value\n )\n return options" }, { "identifier": "PSAR", "path": "finrock/indicators.py", "snippet": "class PSAR(Indicator):\n \"\"\" Parabolic Stop and Reverse (Parabolic SAR)\n\n The Parabolic Stop and Reverse, more commonly known as the\n Parabolic SAR,is a trend-following indicator developed by\n J. Welles Wilder. The Parabolic SAR is displayed as a single\n parabolic line (or dots) underneath the price bars in an uptrend,\n and above the price bars in a downtrend.\n\n https://school.stockcharts.com/doku.php?id=technical_indicators:parabolic_sar\n \"\"\"\n def __init__(\n self, \n data: pd.DataFrame, \n step: float=0.02, \n max_step: float=0.2,\n target_column: str='close',\n render_options: dict={}\n ):\n self._names = ['PSAR']\n self._step = step\n self._max_step = max_step\n super().__init__(data, target_column, render_options)\n\n @property\n def min(self):\n return self._data['PSAR'].min()\n \n @property\n def max(self):\n return self._data['PSAR'].max()\n\n def default_render_options(self):\n return {name: RenderOptions(\n name=name,\n color=(100, 100, 255),\n window_type=WindowType.MAIN,\n render_type=RenderType.DOT,\n min=self.min,\n max=self.max\n ) for name in self._names}\n\n def compute(self):\n high = self._data['high']\n low = self._data['low']\n close = self._data[self.target_column]\n\n up_trend = True\n acceleration_factor = self._step\n up_trend_high = high.iloc[0]\n down_trend_low = low.iloc[0]\n\n self._psar = close.copy()\n self._psar_up = pd.Series(index=self._psar.index, dtype=\"float64\")\n self._psar_down = pd.Series(index=self._psar.index, dtype=\"float64\")\n\n for i in range(2, len(close)):\n reversal = False\n\n max_high = high.iloc[i]\n min_low = low.iloc[i]\n\n if up_trend:\n self._psar.iloc[i] = self._psar.iloc[i - 1] + (\n acceleration_factor * (up_trend_high - self._psar.iloc[i - 1])\n )\n\n if min_low < self._psar.iloc[i]:\n reversal = True\n self._psar.iloc[i] = up_trend_high\n down_trend_low = min_low\n acceleration_factor = self._step\n else:\n if max_high > up_trend_high:\n up_trend_high = max_high\n acceleration_factor = min(\n acceleration_factor + self._step, self._max_step\n )\n\n low1 = low.iloc[i - 1]\n low2 = low.iloc[i - 2]\n if low2 < self._psar.iloc[i]:\n self._psar.iloc[i] = low2\n elif low1 < self._psar.iloc[i]:\n self._psar.iloc[i] = low1\n else:\n self._psar.iloc[i] = self._psar.iloc[i - 1] - (\n acceleration_factor * (self._psar.iloc[i - 1] - down_trend_low)\n )\n\n if max_high > self._psar.iloc[i]:\n reversal = True\n self._psar.iloc[i] = down_trend_low\n up_trend_high = max_high\n acceleration_factor = self._step\n else:\n if min_low < down_trend_low:\n down_trend_low = min_low\n acceleration_factor = min(\n acceleration_factor + self._step, self._max_step\n )\n\n high1 = high.iloc[i - 1]\n high2 = high.iloc[i - 2]\n if high2 > self._psar.iloc[i]:\n self._psar[i] = high2\n elif high1 > self._psar.iloc[i]:\n self._psar.iloc[i] = high1\n\n up_trend = up_trend != reversal # XOR\n\n if up_trend:\n self._psar_up.iloc[i] = self._psar.iloc[i]\n else:\n self._psar_down.iloc[i] = self._psar.iloc[i]\n\n # calculate psar indicator\n self._data['PSAR'] = self._psar" }, { "identifier": "SMA", "path": "finrock/indicators.py", "snippet": "class SMA(Indicator):\n \"\"\" Trend indicator\n\n A simple moving average (SMA) calculates the average of a selected range of prices, usually closing prices, by the number \n of periods in that range.\n\n The SMA is a technical indicator for determining if an asset price will continue or reverse a bull or bear trend. It is \n calculated by summing up the closing prices of a stock over time and then dividing that total by the number of time periods \n being examined. Short-term averages respond quickly to changes in the price of the underlying, while long-term averages are \n slow to react.\n\n https://www.investopedia.com/terms/s/sma.asp\n \"\"\"\n def __init__(\n self, \n data: pd.DataFrame, \n period: int=20, \n target_column: str='close',\n render_options: dict={}\n ):\n self._period = period\n self._names = [f'SMA{period}']\n super().__init__(data, target_column, render_options)\n\n @property\n def min(self):\n return self._data[self.names[0]].min()\n \n @property\n def max(self):\n return self._data[self.names[0]].max()\n \n def default_render_options(self):\n return {name: RenderOptions(\n name=name,\n color=(100, 100, 255),\n window_type=WindowType.MAIN,\n render_type=RenderType.LINE,\n min=self.min,\n max=self.max\n ) for name in self._names}\n\n def compute(self):\n self._data[self.names[0]] = self._data[self.target_column].rolling(self._period).mean()" } ]
import numpy as np import pandas as pd import tensorflow as tf from finrock.data_feeder import PdDataFeeder from finrock.trading_env import TradingEnv from finrock.render import PygameRender from finrock.scalers import MinMaxScaler from finrock.reward import simpleReward from finrock.metrics import DifferentActions, AccountValue, MaxDrawdown, SharpeRatio from finrock.indicators import BolingerBands, RSI, PSAR, SMA
9,058
tf.get_logger().setLevel('ERROR') for gpu in tf.config.experimental.list_physical_devices('GPU'): tf.config.experimental.set_memory_growth(gpu, True) df = pd.read_csv('Datasets/random_sinusoid.csv') df = df[-1000:] pd_data_feeder = PdDataFeeder( df, indicators = [ BolingerBands(data=df, period=20, std=2), RSI(data=df, period=14), PSAR(data=df), SMA(data=df, period=7), SMA(data=df, period=25), SMA(data=df, period=99), ] ) env = TradingEnv( data_feeder = pd_data_feeder, output_transformer = MinMaxScaler(min=pd_data_feeder.min, max=pd_data_feeder.max), initial_balance = 1000.0, max_episode_steps = 1000, window_size = 50,
tf.get_logger().setLevel('ERROR') for gpu in tf.config.experimental.list_physical_devices('GPU'): tf.config.experimental.set_memory_growth(gpu, True) df = pd.read_csv('Datasets/random_sinusoid.csv') df = df[-1000:] pd_data_feeder = PdDataFeeder( df, indicators = [ BolingerBands(data=df, period=20, std=2), RSI(data=df, period=14), PSAR(data=df), SMA(data=df, period=7), SMA(data=df, period=25), SMA(data=df, period=99), ] ) env = TradingEnv( data_feeder = pd_data_feeder, output_transformer = MinMaxScaler(min=pd_data_feeder.min, max=pd_data_feeder.max), initial_balance = 1000.0, max_episode_steps = 1000, window_size = 50,
reward_function = simpleReward,
4
2023-10-23 07:44:54+00:00
12k
hitlic/deepepochs
deepepochs/trainer.py
[ { "identifier": "StopLoopException", "path": "deepepochs/loops.py", "snippet": "class StopLoopException(Exception):\r\n pass\r" }, { "identifier": "LoopException", "path": "deepepochs/loops.py", "snippet": "class LoopException(Exception):\r\n pass\r" }, { "identifier": "TensorTuple", "path": "deepepochs/loops.py", "snippet": "class TensorTuple(tuple):\r\n \"\"\"\r\n tuple of tensors\r\n \"\"\"\r\n def __new__(cls, tensors):\r\n if isinstance(tensors, torch.Tensor):\r\n tensors=(tensors,)\r\n return tuple.__new__(cls, tensors)\r\n\r\n @property\r\n def device(self):\r\n if len(self) > 0:\r\n return self[0].device\r\n else:\r\n return torch.device(type='cpu')\r\n\r\n def to(self, device, **kwargs):\r\n return TensorTuple(t.to(device, **kwargs) if isinstance(t, torch.Tensor) else t for t in self)\r\n\r\n def cpu(self):\r\n return TensorTuple(t.cpu() if isinstance(t, torch.Tensor) else t for t in self)\r\n\r\n def clone(self):\r\n return TensorTuple(t.clone() if isinstance(t, torch.Tensor) else t for t in self)\r\n\r\n def detach(self):\r\n return TensorTuple(t.detach() if isinstance(t, torch.Tensor) else t for t in self)\r\n\r\n @property\r\n def data(self):\r\n return TensorTuple(t.data if isinstance(t, torch.Tensor) else t for t in self)\r\n\r\n def float(self):\r\n return TensorTuple(t.float() if isinstance(t, torch.Tensor) else t for t in self)\r\n\r\n def long(self):\r\n return TensorTuple(t.long() if isinstance(t, torch.Tensor) else t for t in self)\r\n\r\n def int(self):\r\n return TensorTuple(t.int() if isinstance(t, torch.Tensor) else t for t in self)\r" }, { "identifier": "flatten_dict", "path": "deepepochs/loops.py", "snippet": "def flatten_dict(d, parent_key='', sep='.'):\r\n \"\"\"flatten a dict with dict as values\"\"\"\r\n items = []\r\n for k, v in d.items():\r\n new_key = f'{parent_key}{sep}{k}' if parent_key else k\r\n if isinstance(v, dict):\r\n items.extend(flatten_dict(v, new_key, sep).items())\r\n else:\r\n items.append((new_key, v))\r\n return dict(items)\r" }, { "identifier": "default_loss", "path": "deepepochs/loops.py", "snippet": "def default_loss(preds, targets):\r\n \"\"\"默认损失函数,直接返回模型预测结果,适用于模型直接返回损失值的情况。\"\"\"\r\n return preds\r" }, { "identifier": "concat_dicts", "path": "deepepochs/loops.py", "snippet": "def concat_dicts(dicts, to_np=True):\r\n if to_np:\r\n return {k: [to_numpy(d.get(k, 0)) for d in dicts] for k in keyset(dicts)}\r\n else:\r\n return {k: [d.get(k, 0) for d in dicts] for k in keyset(dicts)}\r" }, { "identifier": "to_numpy", "path": "deepepochs/loops.py", "snippet": "def to_numpy(data):\r\n \"\"\"将torch.Tensor或Tensor列表、Tensor字典转为numpy数组\"\"\"\r\n def to(d):\r\n if isinstance(d, torch.Tensor):\r\n return d.detach().cpu().numpy()\r\n else:\r\n return np.array(d, dtype=float)\r\n if isinstance(data, (list, tuple)):\r\n return [to(d) for d in data]\r\n elif isinstance(data, dict):\r\n return {k: to(v) for k, v in data.items()}\r\n else:\r\n return to(data)\r" }, { "identifier": "listify", "path": "deepepochs/loops.py", "snippet": "def listify(obj):\r\n if obj is None:\r\n return []\r\n if isinstance(obj, list):\r\n return obj\r\n if isinstance(obj, tuple):\r\n return list(obj)\r\n if isinstance(obj, (dict, str)):\r\n return [obj]\r\n if isinstance(obj, Iterable):\r\n return list(obj)\r\n return [obj]\r" }, { "identifier": "batch_size", "path": "deepepochs/loops.py", "snippet": "def batch_size(data):\r\n if isinstance(data, (list, tuple)):\r\n return data[0].shape[0]\r\n elif isinstance(data, torch.Tensor):\r\n return data.shape[0]\r\n elif hasattr(data, '__len__'):\r\n return len(data)\r\n else:\r\n return 1\r" }, { "identifier": "concat", "path": "deepepochs/loops.py", "snippet": "def concat(datas):\r\n if isinstance(datas[0], (list, tuple)):\r\n return TensorTuple([torch.concat(ds, dim=0) if ds[0].dim()> 1 else torch.concat(ds) for ds in zip(*datas)])\r\n else:\r\n return torch.concat(datas, dim=0) if datas[0].dim() > 1 else torch.concat(datas)\r" }, { "identifier": "detach_clone", "path": "deepepochs/loops.py", "snippet": "def detach_clone(data):\r\n \"\"\"对torch.Tensor或者Tensor列表、Tensor字典进行detach().clone()操作\"\"\"\r\n def to(d):\r\n if isinstance(d, torch.Tensor):\r\n return d.detach().clone()\r\n else:\r\n return d\r\n if isinstance(data, (list, tuple)):\r\n return [to(d) for d in data]\r\n elif isinstance(data, dict):\r\n return {k: to(v) for k, v in data.items()}\r\n else:\r\n return to(data)\r" }, { "identifier": "batches", "path": "deepepochs/tools.py", "snippet": "def batches(inputs, batch_size):\n \"\"\"\n 把inputs按batch_size进行划分\n \"\"\"\n is_list_input = isinstance(inputs, (list, tuple)) # inputs是否是多个输入组成的列表或元素\n start_idx = 0\n is_over = False\n while True:\n if is_list_input:\n batch = TensorTuple([data[start_idx: start_idx + batch_size] for data in inputs])\n is_over = len(batch[0]) > 0\n start_idx += len(batch[0])\n else:\n batch = inputs[start_idx: start_idx + batch_size]\n is_over = len(batch) > 0\n start_idx += len(batch)\n if is_over > 0:\n yield batch\n else:\n break" }, { "identifier": "Optimizer", "path": "deepepochs/optimizer.py", "snippet": "class Optimizer:\n def __init__(self, opt, scheduler=None, sched_on='epoch', sched_with_loss=False):\n \"\"\"\n 优化器组合,对优化器和学习率调度器进行统一管理。\n Args:\n opt: torch.optim.*\n scheduler: torch.optim.lr_scheduler.*\n sched_on: 学习率调整是每个epoch还是每个step\n sched_with_loss: scheduler.step方法是否需要损失作为参数(例如ReduceLROnPlateau)\n \"\"\"\n self.opt = opt\n self.scheduler = scheduler\n assert sched_on in ['step', 'epoch'], '`sched_on`取值为\"step\"或\"epoch\"!'\n self.sched_on = sched_on\n self.sched_with_loss = sched_with_loss\n\n def zero_grad(self):\n self.opt.zero_grad()\n\n def get_last_lr(self):\n return self.scheduler.get_last_lr() if self.scheduler is not None else None\n\n def step(self, at='step', loss=None):\n if at == 'step':\n self.opt.step()\n if self.sched_on == 'step':\n self.sched_step(loss)\n elif at == 'epoch':\n if self.sched_on == 'epoch':\n self.sched_step(loss)\n else:\n raise ValueError('Optimizer.step方法的`at`参数取值为\"step\"或\"epoch\"')\n\n def sched_step(self, loss):\n if self.scheduler is not None:\n if self.sched_with_loss:\n assert loss is not None, \"学习率调度要求损失作为参数,但`train_step`和`evaluate_step`都没有返回`loss`!\"\n self.scheduler.step(loss)\n else:\n self.scheduler.step()\n\n def state_dict(self):\n sched_state = None if self.scheduler is None else self.scheduler.state_dict()\n return {'opt_state': self.opt.state_dict(), 'sched_state': sched_state}\n\n def load_state_dict(self, state):\n opt_state, sched_state = state['opt_state'], state['sched_state']\n self.opt.load_state_dict(opt_state)\n if sched_state is not None and self.scheduler is not None:\n self.scheduler.load_state_dict(opt_state)\n\n @property\n def param_groups(self):\n return self.opt.param_groups\n\n def get_current_lr(self):\n for param_group in self.param_groups:\n return param_group['lr']" }, { "identifier": "Optimizers", "path": "deepepochs/optimizer.py", "snippet": "class Optimizers(list):\n \"\"\"\n 用于管理多个优化器组合(Optimizer),对多个优化器提供支持。\n \"\"\"\n def zero_grad(self):\n for opt in self:\n opt.zero_grad()\n\n def get_last_lr(self):\n return [opt.get_last_lr() for opt in self]\n\n def step(self, at='step', loss=None):\n for opt in self:\n opt.step(at, loss)\n\n def state_dict(self):\n return [opt.state_dict() for opt in self]\n\n def load_state_dict(self, states):\n for opt, state in zip(self, states):\n opt.load_state_dict(state)\n\n def get_current_lr(self):\n return [opt.get_current_lr() for opt in self]" }, { "identifier": "PatchBase", "path": "deepepochs/patches.py", "snippet": "class PatchBase(abc.ABC):\n \"\"\"\n 所有Patch对象的基类\n \"\"\"\n def __init__(self, name=None):\n \"\"\"\n Args:\n name: 显示在输出日志中的名称,当为空时使用指标函数的__name__属性\n \"\"\"\n super().__init__()\n self.name = name\n\n def __add__(self, obj):\n return self.__add(obj)\n\n def __radd__(self, obj):\n return self.__add(obj)\n\n def __call__(self):\n return self.forward()\n\n @abc.abstractmethod\n def forward(self):\n \"\"\"\n 基于当前Patch中保存的数据,计算一个结果(如指标值)并返回,被__call__方法自动调用。\n \"\"\"\n\n def __add(self, obj):\n if obj == 0:\n return self\n assert isinstance(obj, self.__class__), '相加的两个Patch的类型不一致!'\n return self.add(obj)\n\n @abc.abstractmethod\n def add(self, obj):\n \"\"\"\n 用于重载“+”运算符,将self和obj两个对象相加,得到一个新的对象。\n 注意:在相加之前检查self和obj是否能够相加\n \"\"\"" }, { "identifier": "MeanPatch", "path": "deepepochs/patches.py", "snippet": "class MeanPatch(PatchBase):\n def __init__(self, metric, batch_preds, batch_targets=None, name=None):\n \"\"\"\n 用于累积多个mini-batch的指标值,计算Epoch的指标。\n Args:\n metric: 计算指标的函数(或其他适当的可调用对象),必须返回经过平均指标值。\n batch_pres: 一个mini_batch的模型预测\n batch_targets: 一个mini_batch的标签(当指标计算不需要标签时为空值)\n name: 显示在输出日志中的名称\n \"\"\"\n super().__init__(name)\n assert callable(metric), '指标`metric`应当是一个可调用对象!'\n self.metric = metric\n self.batch_size = len(batch_preds)\n m_value = metric(batch_preds, batch_targets)\n if isinstance(m_value, dict):\n self.batch_value = {k: v * self.batch_size for k, v in m_value.items()}\n else:\n self.batch_value = m_value * self.batch_size\n\n def forward(self):\n if isinstance(self.batch_value, dict):\n return {k: v / self.batch_size for k, v in self.batch_value.items()}\n else:\n return self.batch_value / self.batch_size\n\n def add(self, obj):\n assert self.metric is obj.metric, '相加的两个Patch的`metric`不一致'\n return add_patch_value(self, obj)" }, { "identifier": "TensorPatch", "path": "deepepochs/patches.py", "snippet": "class TensorPatch(PatchBase):\n def __init__(self, metric, batch_preds, batch_targets=None, name=None, single_batch=True):\n \"\"\"\n 用于累积多个mini-batch的preds和targets,计算Epoch的指标。\n 例如:\n batch 1的模型预测为preds1, 标签为targets1;\n batch 1的模型预测为preds2, 标签为targets2;\n m_fun 为指标计算函数;\n 计算两个batch的指标:\n p1 = Patch(m_fun, preds1, targets1)\n p2 = Patch(m_fun, preds2, targets2)\n p = 0 + p1 + p2 # 两个Patch可直接相加,而且可与0相加\n p = sum([p1, p2]) # 可利用sum进行运算\n p1() # batch 1上的指标值\n p2() # batch 2上的指标值\n p() # 两个batch上的指标值\n Args:\n metric: 计算指标的函数(或其他适当的可调用对象)\n batch_pres: 一个mini_batch的模型预测\n batch_targets: 一个mini_batch的标签(当指标计算不需要标签时为空值)\n name: 显示在输出日志中的名称\n single_batch: batch_preds, batch_targets中包含的是单个还是多个batch的Patch\n \"\"\"\n super().__init__(name)\n assert callable(metric), '指标`metric`应当是一个可调用对象!'\n self.metric = metric\n if single_batch: # 单个mini-batch的模型预测输出\n # 应对模型有多个输出的情况\n self.batch_preds = [batch_preds] if isinstance(batch_preds, (list, tuple)) else [[batch_preds]]\n else: # 多个mini-batch模型预测输出\n self.batch_preds = batch_preds\n if batch_targets is None:\n self.batch_targets = None\n else:\n if single_batch: # 单个mini-batch的标签数据\n # 应对模型有多个标签的情况\n self.batch_targets = [batch_targets] if isinstance(batch_targets, (list, tuple)) else [[batch_targets]]\n else: # 多个mini-batch的标签数据\n self.batch_targets = batch_targets\n\n self.concat = torch.concat if isinstance(self.batch_preds[0][0], torch.Tensor) else np.concatenate\n\n def forward(self):\n preds = [self.concat(bpreds, 0) for bpreds in zip(*self.batch_preds)]\n targets = None if self.batch_targets is None else [self.concat(btargets, 0) for btargets in zip(*self.batch_targets)]\n preds = preds[0] if len(preds) == 1 else preds\n targets = targets[0] if len(targets) == 1 else targets\n return self.metric(preds, targets)\n\n def add(self, obj):\n assert self.metric is obj.metric, '相加的两个Patch的`metric`不一致'\n new_preds = self.batch_preds + obj.batch_preds\n if self.batch_targets != None:\n assert obj.batch_targets is not None, '相加的两个Patch的`batch_targets`其中一个为None!'\n new_targets = self.batch_targets + obj.batch_targets\n else:\n new_targets = None\n return self.__class__(self.metric, new_preds, new_targets, self.name, single_batch=False)" }, { "identifier": "run_patch_dict", "path": "deepepochs/patches.py", "snippet": "def run_patch_dict(patch_dict):\n \"\"\"\n 计算一个Patch字典的指标值(计算Batch指标)\n \"\"\"\n return {patch_name(k, v): v() for k, v in patch_dict.items()}" }, { "identifier": "run_patch_dicts", "path": "deepepochs/patches.py", "snippet": "def run_patch_dicts(patch_dicts):\n \"\"\"\n 计算Patch字典的列表的指标值(计算Epoch指标)\n \"\"\"\n if len(patch_dicts) == 0:\n return None\n return {patch_name(k, patch_dicts[0][k]): sum(dic[k] for dic in patch_dicts if dic)() for k in keyset(patch_dicts)}" }, { "identifier": "CallbackPool", "path": "deepepochs/callbacks/callback.py", "snippet": "class CallbackPool(list):\n \"\"\"\n 用于管理、执行Callback方法的类\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def prepare(self):\n self.sort(key=lambda cbk: cbk.priority)\n\n def append(self, callback: Callback):\n assert isinstance(callback, Callback), '`callback`必须是Callback的子类对象!'\n return super().append(callback)\n\n def extend(self, callbacks: Iterable):\n assert all(isinstance(cbk, Callback) for cbk in callbacks), '`callbacks`中必须都是Callback的子类对象!'\n return super().extend(callbacks)\n\n def trigger(self, event, *args, **kwargs):\n if 'before' in event:\n cbk_ids = range(len(self))\n else:\n cbk_ids = range(len(self)-1, -1, -1)\n for i in cbk_ids:\n getattr(self[i], f'on_{event}')(*args, **kwargs)" }, { "identifier": "CallbackException", "path": "deepepochs/callbacks/callback.py", "snippet": "class CallbackException(Exception):\n pass" }, { "identifier": "DefaultCallback", "path": "deepepochs/callbacks/default.py", "snippet": "class DefaultCallback(Callback):\n def __init__(self, log_long, log_batch, log_tqdm):\n \"\"\"\n 默认启用的Callback,实现功能:\n 指标输出\n 学习率调度\n 为mini-batch构建每个指标的Patch\n Args:\n log_long: 指标输出为长格式(7位小说)还是短格式(4位小数)\n log_batch: 是否输出batch的指标值\n tqdm_iter: tqdm迭代对象\n \"\"\"\n super().__init__(priority=0)\n self.round_to = 7 if log_long else 4\n self.log_batch = log_batch\n self.epoch_width = 4\n self.batch_width = 5\n self.log_tqdm = log_tqdm\n self.tqdm_iter = None\n\n def on_before_fit(self, trainer, epochs):\n self.total_epochs = epochs\n\n def on_before_epoch(self, trainer, train_tasks, val_tasks, epoch_idx):\n self.epoch_idx = epoch_idx\n self.total_train_batchs = sum(task.batchs for task in train_tasks) # 所有训练任务总batch数量\n self.total_val_batchs = sum(task.batchs for task in val_tasks) # 所有验证任务总batch数量\n self.global_train_batch_idx = 0 # 当前训练batch\n self.global_val_batch_idx = 0 # 当前验证batch\n\n self.epoch_width = len(str(self.total_epochs))\n self.batch_width = len(str(max(self.total_val_batchs, self.total_train_batchs)))\n\n def on_after_train_batch(self, trainer, metrics, batch_idx):\n self.global_train_batch_idx += 1\n if self.log_batch and trainer.main_process:\n log_batch(metrics, self.epoch_idx+1, self.total_epochs, self.global_train_batch_idx, self.total_train_batchs, 'TRAIN', self.epoch_width, self.batch_width, self.round_to)\n\n def on_after_val_batch(self, trainer, metrics, batch_idx):\n self.global_val_batch_idx += 1\n if self.log_batch and trainer.main_process:\n log_batch(metrics, self.epoch_idx+1, self.total_epochs, self.global_val_batch_idx, self.total_val_batchs, 'VAL', self.epoch_width, self.batch_width, self.round_to)\n\n def on_after_epoch(self, trainer, train_tasks, val_tasks, train_metrics, val_metrics, epoch_idx):\n if trainer.main_process:\n if val_metrics:\n log_epoch({'train': train_metrics, 'val': val_metrics}, epoch_idx+1, self.total_epochs, self.epoch_width, self.round_to, self.tqdm_iter)\n else:\n log_epoch({'train': train_metrics}, epoch_idx+1, self.total_epochs, self.epoch_width, self.round_to, self.tqdm_iter)\n\n # 根据调度器的配置改变优化器学习率\n if val_metrics: # 优先使用验证损失\n sched_loss = val_metrics.get('loss')\n else:\n sched_loss = train_metrics.get('loss')\n trainer.opt.step(at='epoch', loss=sched_loss)\n\n def on_before_test_epochs(self, trainer, tasks):\n self.total_test_epochs = len(tasks)\n self.global_test_epoch_idx = 0\n self.total_test_batchs = sum(task.batchs for task in tasks)\n self.global_test_batch_idx = 0\n\n def on_after_test_epoch(self, trainer, task, metrics):\n if trainer.main_process:\n log_epoch({'test': metrics}, self.global_test_epoch_idx+1, self.total_test_epochs, self.epoch_width, self.round_to)\n self.global_test_epoch_idx += 1\n\n def on_after_test_batch(self, trainer, metrics, batch_idx):\n self.global_test_batch_idx += 1\n if self.log_batch and trainer.main_process:\n log_batch(metrics, self.global_test_epoch_idx+1, self.total_test_epochs, self.global_test_batch_idx, self.total_test_batchs, 'TEST', self.epoch_width, self.batch_width, self.round_to)\n\n def on_train_metrics(self, trainer, loss, model_out, batch_y, task):\n \"\"\"当前task的每个指标构建Patch,并注入task.batch_patch_dict\"\"\"\n task.batch_patch_dict = self.make_patch_dict(trainer, loss, model_out, batch_y, task.metrics, 'train')\n\n def on_val_metrics(self, trainer, loss, model_out, batch_y, task):\n \"\"\"当前task的每个指标构建Patch,并注入task.batch_patch_dict\"\"\"\n task.batch_patch_dict = self.make_patch_dict(trainer, loss, model_out, batch_y, task.metrics, 'val')\n\n def on_test_metrics(self, trainer, loss, model_out, batch_y, task):\n \"\"\"当前task的每个指标构建Patch,并注入task.batch_patch_dict\"\"\"\n task.batch_patch_dict = self.make_patch_dict(trainer, loss, model_out, batch_y, task.metrics, 'test')\n\n def make_patch_dict(self, trainer, loss, model_out, batch_y, metrics, stage):\n b_size = torch.tensor(batch_size(model_out)).to(trainer.device)\n # Accelerate 分布式训练时,获取各Process的数据\n if trainer.accelerator is not None and stage!='train': # 训练时仅在主线程上计算指标\n if loss is not None:\n loss = trainer.accelerator.gather_for_metrics(loss)\n b_size = trainer.accelerator.gather_for_metrics(b_size)\n loss = (loss * b_size).sum()\n b_size = b_size.sum()\n loss = loss/b_size\n model_out = trainer.accelerator.gather_for_metrics(model_out)\n batch_y = trainer.accelerator.gather_for_metrics(batch_y)\n\n patch_dict = {} if loss is None else {'loss': ValuePatch(loss, b_size)}\n for m in metrics:\n patch_dict[m.__name__] = trainer.metric_patch(m, model_out, batch_y)\n return patch_dict" } ]
import math import time import torch from datetime import datetime from collections import defaultdict from typing import List, Dict, Callable from torch.optim import Adam from torch.utils.data import DataLoader from accelerate import Accelerator from .loops import (StopLoopException, LoopException, TensorTuple, flatten_dict, default_loss, concat_dicts, to_numpy, listify, batch_size, concat, detach_clone) from .tools import batches from .optimizer import Optimizer, Optimizers from .patches import PatchBase, MeanPatch, TensorPatch, run_patch_dict, run_patch_dicts from .callbacks import CallbackPool, DefaultCallback, CallbackException from tqdm import tqdm
7,749
patch_dict = step_out else: patch_dict = {} self.batch_patch_dict.update(patch_dict) epoch_patch_dicts.append(self.batch_patch_dict) # 计算当前batch的指标 batch_metric_values = flatten_dict(run_patch_dict(self.batch_patch_dict), sep='') self.callbacks.trigger(f'after_{self.stage}_batch', trainer=self.trainer, metrics=batch_metric_values, batch_idx=batch_idx) # 清空 self.batch_patch_dict self.batch_patch_dict = {} # 计算当前epoch的指标 epoch_metrics_values = flatten_dict(run_patch_dicts(epoch_patch_dicts), sep='') self.callbacks.trigger(f'after_{self.stage}_epoch', trainer=self.trainer, task=self, metrics=epoch_metrics_values) return epoch_metrics_values class ModelWrapper: """ 用于实现回调: on_before_train_forward on_after_train_forward on_before_val_forward on_after_val_forward on_before_test_forward on_after_test_forward """ def __init__(self, model, trainer): # self.model = torch.compile(model) self.model = model self.trainer = trainer self.stage = None def __getattr__(self, name): return getattr(self.model, name) def __call__(self, *args, **kwds): self.trainer.callbacks.trigger(f'before_{self.stage}_forward', trainer=self) model_out = self.model(*args, **kwds) self.trainer.callbacks.trigger(f'after_{self.stage}_forward', trainer=self, model_out=model_out) return model_out def train(self): self.model.train() def eval(self): self.model.eval() def to(self, device): self.model = self.model.to(device) return self def cpu(self): self.model = self.model.cpu() return self def cuda(self): self.model = self.model.cuda() return self def parameters(self): return self.model.parameters() def modules(self): return self.model.modules() def state_dict(self): return self.model.state_dict() def load_state_dict(self, state_dict): self.model.load_state_dict(state_dict) class LossWrapper: """ 1. 自动完成zero_grad、backward、opt.step等操作 2. 配合实现梯度累积 3. 实现回调 on_before_backward on_after_backward on_before_optimize on_after_optimize on_train_metrics on_val_metrics on_test_metrics """ def __init__(self, loss_fn, trainer): self.loss_fn = loss_fn self.trainer = trainer self.stage = None self.do_loss = None self.task = None self.total_loss = 0 # 用于实现累积梯度 self.model_outs = [] # 用于实现累积梯度 self.batch_ys = [] # 用于实现累积梯度 def optimize(self): self.trainer.callbacks.trigger('before_optimize', trainer=self) self.trainer.opt.step() self.trainer.opt.zero_grad() self.trainer.callbacks.trigger('after_optimize', trainer=self) def __call__(self, model_out, batch_y, grad_accumulate=False): """ Args: model_out: 模型预测输出 batch_y: 标签 grad_accumulate: 是否累积梯度 """ if self.stage == 'train': # 计算损失 loss = self.loss_fn(model_out, batch_y) # backward self.trainer.callbacks.trigger('before_backward', trainer=self, loss=loss) if self.trainer.accelerator is None: (loss/self.trainer.grad_accumulate_steps).backward() else: # accelerate的backward self.trainer.accelerator.backward(loss/self.trainer.grad_accumulate_steps) self.trainer.callbacks.trigger('after_backward', trainer=self, loss=loss) # 记录各sub-batch的总损失、模型输出、标签 _loss = loss.detach().clone() self.total_loss += _loss * batch_size(model_out)
""" @author: liuchen """ class EpochTask: """一个Epoch的训练、验证或测试任务""" def __init__(self, dataloader, metrics=None, do_loss=True, **step_args): """ Args: dataloader: pytorch Dataloader metrics: 指标函数列表 do_loss: 验证和测试中是否计算据损失 step_args: 其他需要传递给`step`、`train_step`、`val_step`、`test_step`和`evaluate`方法的参数 """ self.dataloader = dataloader self.batchs = len(dataloader) self.metrics = listify(metrics) self.do_loss = do_loss self.trainer = None self.stage = None self.val_freq = None self.step_args = step_args self.batch_patch_dict = {} # 由DefaultCallback中的on_train/val/test_prediction回调注入 def __len__(self): return self.batchs def __getattr__(self, name): """如果要找的属性和方法不存在,则到trainer中找""" return getattr(self.trainer, name, None) def __call__(self): phase = 'train' if self.stage=='train' else 'evaluate' if self.stage == 'train': self.model.train() else: self.model.eval() self.model.stage = self.stage self.loss.stage = self.stage self.loss.do_loss = self.do_loss self.loss.task = self # 配置指标,在DefaultCallback中的on_train/val/test_prediction中用于构造Patch if self.stage == 'train': self.metrics = [m for m in self.metrics if m not in self.train_metrics] + self.train_metrics elif self.stage == 'val': self.metrics = [m for m in self.metrics if m not in self.val_metrics] + self.val_metrics else: self.metrics = [m for m in self.metrics if m not in self.test_metrics] + self.test_metrics with torch.no_grad(): self.callbacks.trigger(f'before_{self.stage}_epoch', trainer=self, task=self) epoch_patch_dicts = [] for batch_idx, batch_data in enumerate(self.dataloader): batch_x, batch_y = self.prepare_data(batch_data) self.callbacks.trigger(f'before_{self.stage}_batch', trainer=self.trainer, batch_x=batch_x, batch_y=batch_y, batch_idx=batch_idx) # 获取mini-batch的`*step`方法 # 1. 最优先使用`EpochTask.step`、`Trainer.step` step_method = getattr(self, 'step', None) # 2. 次优先使用`EpochTask.train_step`、`Epoch.val_step`、`EpochTask.test_step` # 3. 其次使用`Trainer.train_step`、`Trainer.val_step`、`Trainer.test_step` step_method = getattr(self, f'{self.stage}_step') if step_method is None else step_method # 4. 再次使用`EpochTask.evaluate_step`方法 # 5. 最次使用`Trainer.evaluate_step` step_method = getattr(self, f'{phase}_step') if step_method is None else step_method # 运行mini-batch的`*step`方法 if self.stage == 'train': with torch.enable_grad(): step_out = step_method(batch_x, batch_y, **self.step_args) else: step_out = step_method(batch_x, batch_y, **self.step_args) if step_out is not None: if not isinstance(step_out, dict): raise LoopException(f'{step_method} 方法的返回值必须为字典!') if not all(isinstance(v, PatchBase) for k, v in step_out.items()): raise LoopException(f'{step_method} 方法返回字典的value必须为Patch(deepepochs.PatchBase子类对象)!') patch_dict = step_out else: patch_dict = {} self.batch_patch_dict.update(patch_dict) epoch_patch_dicts.append(self.batch_patch_dict) # 计算当前batch的指标 batch_metric_values = flatten_dict(run_patch_dict(self.batch_patch_dict), sep='') self.callbacks.trigger(f'after_{self.stage}_batch', trainer=self.trainer, metrics=batch_metric_values, batch_idx=batch_idx) # 清空 self.batch_patch_dict self.batch_patch_dict = {} # 计算当前epoch的指标 epoch_metrics_values = flatten_dict(run_patch_dicts(epoch_patch_dicts), sep='') self.callbacks.trigger(f'after_{self.stage}_epoch', trainer=self.trainer, task=self, metrics=epoch_metrics_values) return epoch_metrics_values class ModelWrapper: """ 用于实现回调: on_before_train_forward on_after_train_forward on_before_val_forward on_after_val_forward on_before_test_forward on_after_test_forward """ def __init__(self, model, trainer): # self.model = torch.compile(model) self.model = model self.trainer = trainer self.stage = None def __getattr__(self, name): return getattr(self.model, name) def __call__(self, *args, **kwds): self.trainer.callbacks.trigger(f'before_{self.stage}_forward', trainer=self) model_out = self.model(*args, **kwds) self.trainer.callbacks.trigger(f'after_{self.stage}_forward', trainer=self, model_out=model_out) return model_out def train(self): self.model.train() def eval(self): self.model.eval() def to(self, device): self.model = self.model.to(device) return self def cpu(self): self.model = self.model.cpu() return self def cuda(self): self.model = self.model.cuda() return self def parameters(self): return self.model.parameters() def modules(self): return self.model.modules() def state_dict(self): return self.model.state_dict() def load_state_dict(self, state_dict): self.model.load_state_dict(state_dict) class LossWrapper: """ 1. 自动完成zero_grad、backward、opt.step等操作 2. 配合实现梯度累积 3. 实现回调 on_before_backward on_after_backward on_before_optimize on_after_optimize on_train_metrics on_val_metrics on_test_metrics """ def __init__(self, loss_fn, trainer): self.loss_fn = loss_fn self.trainer = trainer self.stage = None self.do_loss = None self.task = None self.total_loss = 0 # 用于实现累积梯度 self.model_outs = [] # 用于实现累积梯度 self.batch_ys = [] # 用于实现累积梯度 def optimize(self): self.trainer.callbacks.trigger('before_optimize', trainer=self) self.trainer.opt.step() self.trainer.opt.zero_grad() self.trainer.callbacks.trigger('after_optimize', trainer=self) def __call__(self, model_out, batch_y, grad_accumulate=False): """ Args: model_out: 模型预测输出 batch_y: 标签 grad_accumulate: 是否累积梯度 """ if self.stage == 'train': # 计算损失 loss = self.loss_fn(model_out, batch_y) # backward self.trainer.callbacks.trigger('before_backward', trainer=self, loss=loss) if self.trainer.accelerator is None: (loss/self.trainer.grad_accumulate_steps).backward() else: # accelerate的backward self.trainer.accelerator.backward(loss/self.trainer.grad_accumulate_steps) self.trainer.callbacks.trigger('after_backward', trainer=self, loss=loss) # 记录各sub-batch的总损失、模型输出、标签 _loss = loss.detach().clone() self.total_loss += _loss * batch_size(model_out)
self.model_outs.append(detach_clone(model_out))
10
2023-10-19 05:41:48+00:00
12k
vorausrobotik/voraus-ad-dataset
train.py
[ { "identifier": "Configuration", "path": "configuration.py", "snippet": "class Configuration(BaseModel):\n \"\"\"Describes the configuration parameters.\"\"\"\n\n seed: int\n epochs: int\n batchsize: int\n n_hidden_layers: int = Field(alias=\"nHiddenLayers\")\n n_coupling_blocks: int = Field(alias=\"nCouplingBlocks\")\n scale: int\n columns: Literal[\"machine\", \"mechanical\", \"electrical\", \"computed\", \"measured\"]\n clamp: float\n pad: bool\n frequency_divider: int = Field(alias=\"frequencyDivider\")\n train_gain: float = Field(alias=\"trainGain\")\n normalize: bool\n kernel_size_1: int = Field(alias=\"kernelSize1\")\n dilation_1: int = Field(alias=\"dilation1\")\n kernel_size_2: int = Field(alias=\"kernelSize2\")\n dilation_2: int = Field(alias=\"dilation2\")\n kernel_size_3: int = Field(alias=\"kernelSize3\")\n dilation_3: int = Field(alias=\"dilation3\")\n milestones: list[int]\n gamma: float\n learning_rate: float = Field(alias=\"learningRate\")" }, { "identifier": "NormalizingFlow", "path": "normalizing_flow.py", "snippet": "class NormalizingFlow(GraphINN):\r\n \"\"\"Describes the normalizing flow model.\"\"\"\r\n\r\n def __init__(self, input_dimension: Tuple[int, ...], config: Configuration) -> None:\r\n \"\"\"Initializes the normalizing flow model.\r\n\r\n Args:\r\n input_dimension: The input dimensions.\r\n config: The configuration of the model.\r\n \"\"\"\r\n nodes = [InputNode(*input_dimension, name=\"input\")]\r\n\r\n int_network = InternalNetwork.setup(\r\n input_dimension[1],\r\n input_dimension[0],\r\n n_hidden_layers=config.n_hidden_layers,\r\n scale=config.scale,\r\n kernel_size_1=config.kernel_size_1,\r\n dilation_1=config.dilation_1,\r\n kernel_size_2=config.kernel_size_2,\r\n dilation_2=config.dilation_2,\r\n kernel_size_3=config.kernel_size_3,\r\n dilation_3=config.dilation_3,\r\n )\r\n\r\n for cbi in range(config.n_coupling_blocks):\r\n kwargs: Dict[Any, Any] = {}\r\n\r\n nodes.append(\r\n Node(nodes[-1], PermuteRandom, kwargs, name=f\"permute{cbi}\"),\r\n )\r\n nodes.append(\r\n Node(\r\n nodes[-1],\r\n CouplingBlock,\r\n {\r\n \"subnet_constructor\": int_network.constructor,\r\n \"clamp\": config.clamp,\r\n },\r\n name=f\"cb{cbi}\",\r\n )\r\n )\r\n\r\n output_node = OutputNode(nodes[-1], name=\"output\")\r\n nodes.append(output_node)\r\n\r\n super().__init__(nodes)\r" }, { "identifier": "get_loss", "path": "normalizing_flow.py", "snippet": "def get_loss(z_space: Tensor, jac: Tensor) -> Tensor:\r\n \"\"\"Calculate the loss of a batch.\r\n\r\n Computes the negative log likelihood loss (per dimension) assuming z should be Gaussian.\r\n\r\n Args:\r\n z_space: The batch result.\r\n jac: The jacobian matrix.\r\n\r\n Returns:\r\n The loss of the batch.\r\n \"\"\"\r\n sum_dimension = tuple(range(1, z_space.dim()))\r\n number = numpy.prod(z_space.shape[1:])\r\n return torch.mean(torch.sum(z_space**2, dim=sum_dimension) - jac) / number\r" }, { "identifier": "get_loss_per_sample", "path": "normalizing_flow.py", "snippet": "def get_loss_per_sample(z_space: Tensor, jac: Tensor) -> Tensor:\r\n \"\"\"Calculates the loss per sample.\r\n\r\n Args:\r\n z_space: The batch result.\r\n jac: The jacobian matrix.\r\n\r\n Returns:\r\n The loss per sample.\r\n \"\"\"\r\n sum_dimension = tuple(range(1, z_space.dim()))\r\n loss = 0.5 * torch.sum(z_space**2, dim=sum_dimension) - jac\r\n return loss\r" }, { "identifier": "ANOMALY_CATEGORIES", "path": "voraus_ad.py", "snippet": "ANOMALY_CATEGORIES = [\n Category.AXIS_FRICTION,\n Category.AXIS_WEIGHT,\n Category.COLLISION_FOAM,\n Category.COLLISION_CABLE,\n Category.COLLISION_CARTON,\n Category.MISS_CAN,\n Category.LOSE_CAN,\n Category.CAN_WEIGHT,\n Category.ENTANGLED,\n Category.INVALID_POSITION,\n Category.MOTOR_COMMUTATION,\n Category.WOBBLING_STATION,\n]" }, { "identifier": "Signals", "path": "voraus_ad.py", "snippet": "class Signals:\n \"\"\"Contains the signals of the robot used in the dataset.\"\"\"\n\n TIME = \"time\"\n SAMPLE = \"sample\"\n ANOMALY = \"anomaly\"\n CATEGORY = \"category\"\n SETTING = \"setting\"\n ACTION = \"action\"\n ACTIVE = \"active\"\n ROBOT_VOLTAGE = \"robot_voltage\"\n ROBOT_CURRENT = \"robot_current\"\n IO_CURRENT = \"io_current\"\n SYSTEM_CURRENT = \"system_current\"\n TARGET_POSITION_1 = \"target_position_1\"\n TARGET_VELOCITY_1 = \"target_velocity_1\"\n TARGET_ACCELERATION_1 = \"target_acceleration_1\"\n TARGET_TORQUE_1 = \"target_torque_1\"\n COMPUTED_INERTIA_1 = \"computed_inertia_1\"\n COMPUTED_TORQUE_1 = \"computed_torque_1\"\n MOTOR_POSITION_1 = \"motor_position_1\"\n MOTOR_VELOCITY_1 = \"motor_velocity_1\"\n JOINT_POSITION_1 = \"joint_position_1\"\n JOINT_VELOCITY_1 = \"joint_velocity_1\"\n MOTOR_TORQUE_1 = \"motor_torque_1\"\n TORQUE_SENSOR_A_1 = \"torque_sensor_a_1\"\n TORQUE_SENSOR_B_1 = \"torque_sensor_b_1\"\n MOTOR_IQ_1 = \"motor_iq_1\"\n MOTOR_ID_1 = \"motor_id_1\"\n POWER_MOTOR_EL_1 = \"power_motor_el_1\"\n POWER_MOTOR_MECH_1 = \"power_motor_mech_1\"\n POWER_LOAD_MECH_1 = \"power_load_mech_1\"\n MOTOR_VOLTAGE_1 = \"motor_voltage_1\"\n SUPPLY_VOLTAGE_1 = \"supply_voltage_1\"\n BRAKE_VOLTAGE_1 = \"brake_voltage_1\"\n TARGET_POSITION_2 = \"target_position_2\"\n TARGET_VELOCITY_2 = \"target_velocity_2\"\n TARGET_ACCELERATION_2 = \"target_acceleration_2\"\n TARGET_TORQUE_2 = \"target_torque_2\"\n COMPUTED_INERTIA_2 = \"computed_inertia_2\"\n COMPUTED_TORQUE_2 = \"computed_torque_2\"\n MOTOR_POSITION_2 = \"motor_position_2\"\n MOTOR_VELOCITY_2 = \"motor_velocity_2\"\n JOINT_POSITION_2 = \"joint_position_2\"\n JOINT_VELOCITY_2 = \"joint_velocity_2\"\n MOTOR_TORQUE_2 = \"motor_torque_2\"\n TORQUE_SENSOR_A_2 = \"torque_sensor_a_2\"\n TORQUE_SENSOR_B_2 = \"torque_sensor_b_2\"\n MOTOR_IQ_2 = \"motor_iq_2\"\n MOTOR_ID_2 = \"motor_id_2\"\n POWER_MOTOR_EL_2 = \"power_motor_el_2\"\n POWER_MOTOR_MECH_2 = \"power_motor_mech_2\"\n POWER_LOAD_MECH_2 = \"power_load_mech_2\"\n MOTOR_VOLTAGE_2 = \"motor_voltage_2\"\n SUPPLY_VOLTAGE_2 = \"supply_voltage_2\"\n BRAKE_VOLTAGE_2 = \"brake_voltage_2\"\n TARGET_POSITION_3 = \"target_position_3\"\n TARGET_VELOCITY_3 = \"target_velocity_3\"\n TARGET_ACCELERATION_3 = \"target_acceleration_3\"\n TARGET_TORQUE_3 = \"target_torque_3\"\n COMPUTED_INERTIA_3 = \"computed_inertia_3\"\n COMPUTED_TORQUE_3 = \"computed_torque_3\"\n MOTOR_POSITION_3 = \"motor_position_3\"\n MOTOR_VELOCITY_3 = \"motor_velocity_3\"\n JOINT_POSITION_3 = \"joint_position_3\"\n JOINT_VELOCITY_3 = \"joint_velocity_3\"\n MOTOR_TORQUE_3 = \"motor_torque_3\"\n TORQUE_SENSOR_A_3 = \"torque_sensor_a_3\"\n TORQUE_SENSOR_B_3 = \"torque_sensor_b_3\"\n MOTOR_IQ_3 = \"motor_iq_3\"\n MOTOR_ID_3 = \"motor_id_3\"\n POWER_MOTOR_EL_3 = \"power_motor_el_3\"\n POWER_MOTOR_MECH_3 = \"power_motor_mech_3\"\n POWER_LOAD_MECH_3 = \"power_load_mech_3\"\n MOTOR_VOLTAGE_3 = \"motor_voltage_3\"\n SUPPLY_VOLTAGE_3 = \"supply_voltage_3\"\n BRAKE_VOLTAGE_3 = \"brake_voltage_3\"\n TARGET_POSITION_4 = \"target_position_4\"\n TARGET_VELOCITY_4 = \"target_velocity_4\"\n TARGET_ACCELERATION_4 = \"target_acceleration_4\"\n TARGET_TORQUE_4 = \"target_torque_4\"\n COMPUTED_INERTIA_4 = \"computed_inertia_4\"\n COMPUTED_TORQUE_4 = \"computed_torque_4\"\n MOTOR_POSITION_4 = \"motor_position_4\"\n MOTOR_VELOCITY_4 = \"motor_velocity_4\"\n JOINT_POSITION_4 = \"joint_position_4\"\n JOINT_VELOCITY_4 = \"joint_velocity_4\"\n MOTOR_TORQUE_4 = \"motor_torque_4\"\n TORQUE_SENSOR_A_4 = \"torque_sensor_a_4\"\n TORQUE_SENSOR_B_4 = \"torque_sensor_b_4\"\n MOTOR_IQ_4 = \"motor_iq_4\"\n MOTOR_ID_4 = \"motor_id_4\"\n POWER_MOTOR_EL_4 = \"power_motor_el_4\"\n POWER_MOTOR_MECH_4 = \"power_motor_mech_4\"\n POWER_LOAD_MECH_4 = \"power_load_mech_4\"\n MOTOR_VOLTAGE_4 = \"motor_voltage_4\"\n SUPPLY_VOLTAGE_4 = \"supply_voltage_4\"\n BRAKE_VOLTAGE_4 = \"brake_voltage_4\"\n TARGET_POSITION_5 = \"target_position_5\"\n TARGET_VELOCITY_5 = \"target_velocity_5\"\n TARGET_ACCELERATION_5 = \"target_acceleration_5\"\n TARGET_TORQUE_5 = \"target_torque_5\"\n COMPUTED_INERTIA_5 = \"computed_inertia_5\"\n COMPUTED_TORQUE_5 = \"computed_torque_5\"\n MOTOR_POSITION_5 = \"motor_position_5\"\n MOTOR_VELOCITY_5 = \"motor_velocity_5\"\n JOINT_POSITION_5 = \"joint_position_5\"\n JOINT_VELOCITY_5 = \"joint_velocity_5\"\n MOTOR_TORQUE_5 = \"motor_torque_5\"\n TORQUE_SENSOR_A_5 = \"torque_sensor_a_5\"\n TORQUE_SENSOR_B_5 = \"torque_sensor_b_5\"\n MOTOR_IQ_5 = \"motor_iq_5\"\n MOTOR_ID_5 = \"motor_id_5\"\n POWER_MOTOR_EL_5 = \"power_motor_el_5\"\n POWER_MOTOR_MECH_5 = \"power_motor_mech_5\"\n POWER_LOAD_MECH_5 = \"power_load_mech_5\"\n MOTOR_VOLTAGE_5 = \"motor_voltage_5\"\n SUPPLY_VOLTAGE_5 = \"supply_voltage_5\"\n BRAKE_VOLTAGE_5 = \"brake_voltage_5\"\n TARGET_POSITION_6 = \"target_position_6\"\n TARGET_VELOCITY_6 = \"target_velocity_6\"\n TARGET_ACCELERATION_6 = \"target_acceleration_6\"\n TARGET_TORQUE_6 = \"target_torque_6\"\n COMPUTED_INERTIA_6 = \"computed_inertia_6\"\n COMPUTED_TORQUE_6 = \"computed_torque_6\"\n MOTOR_POSITION_6 = \"motor_position_6\"\n MOTOR_VELOCITY_6 = \"motor_velocity_6\"\n JOINT_POSITION_6 = \"joint_position_6\"\n JOINT_VELOCITY_6 = \"joint_velocity_6\"\n MOTOR_TORQUE_6 = \"motor_torque_6\"\n TORQUE_SENSOR_A_6 = \"torque_sensor_a_6\"\n TORQUE_SENSOR_B_6 = \"torque_sensor_b_6\"\n MOTOR_IQ_6 = \"motor_iq_6\"\n MOTOR_ID_6 = \"motor_id_6\"\n POWER_MOTOR_EL_6 = \"power_motor_el_6\"\n POWER_MOTOR_MECH_6 = \"power_motor_mech_6\"\n POWER_LOAD_MECH_6 = \"power_load_mech_6\"\n MOTOR_VOLTAGE_6 = \"motor_voltage_6\"\n SUPPLY_VOLTAGE_6 = \"supply_voltage_6\"\n BRAKE_VOLTAGE_6 = \"brake_voltage_6\"\n\n @classmethod\n def all(cls) -> tuple[str, ...]:\n \"\"\"Returns all signals (machine data and meta) included in the voraus-AD dataset.\n\n Returns:\n All signals of the voraus-AD dataset.\n \"\"\"\n return (\n cls.TIME,\n cls.SAMPLE,\n cls.ANOMALY,\n cls.CATEGORY,\n cls.SETTING,\n cls.ACTION,\n cls.ACTIVE,\n cls.ROBOT_VOLTAGE,\n cls.ROBOT_CURRENT,\n cls.IO_CURRENT,\n cls.SYSTEM_CURRENT,\n cls.TARGET_POSITION_1,\n cls.TARGET_VELOCITY_1,\n cls.TARGET_ACCELERATION_1,\n cls.TARGET_TORQUE_1,\n cls.COMPUTED_INERTIA_1,\n cls.COMPUTED_TORQUE_1,\n cls.MOTOR_POSITION_1,\n cls.MOTOR_VELOCITY_1,\n cls.JOINT_POSITION_1,\n cls.JOINT_VELOCITY_1,\n cls.MOTOR_TORQUE_1,\n cls.TORQUE_SENSOR_A_1,\n cls.TORQUE_SENSOR_B_1,\n cls.MOTOR_IQ_1,\n cls.MOTOR_ID_1,\n cls.POWER_MOTOR_EL_1,\n cls.POWER_MOTOR_MECH_1,\n cls.POWER_LOAD_MECH_1,\n cls.MOTOR_VOLTAGE_1,\n cls.SUPPLY_VOLTAGE_1,\n cls.BRAKE_VOLTAGE_1,\n cls.TARGET_POSITION_2,\n cls.TARGET_VELOCITY_2,\n cls.TARGET_ACCELERATION_2,\n cls.TARGET_TORQUE_2,\n cls.COMPUTED_INERTIA_2,\n cls.COMPUTED_TORQUE_2,\n cls.MOTOR_POSITION_2,\n cls.MOTOR_VELOCITY_2,\n cls.JOINT_POSITION_2,\n cls.JOINT_VELOCITY_2,\n cls.MOTOR_TORQUE_2,\n cls.TORQUE_SENSOR_A_2,\n cls.TORQUE_SENSOR_B_2,\n cls.MOTOR_IQ_2,\n cls.MOTOR_ID_2,\n cls.POWER_MOTOR_EL_2,\n cls.POWER_MOTOR_MECH_2,\n cls.POWER_LOAD_MECH_2,\n cls.MOTOR_VOLTAGE_2,\n cls.SUPPLY_VOLTAGE_2,\n cls.BRAKE_VOLTAGE_2,\n cls.TARGET_POSITION_3,\n cls.TARGET_VELOCITY_3,\n cls.TARGET_ACCELERATION_3,\n cls.TARGET_TORQUE_3,\n cls.COMPUTED_INERTIA_3,\n cls.COMPUTED_TORQUE_3,\n cls.MOTOR_POSITION_3,\n cls.MOTOR_VELOCITY_3,\n cls.JOINT_POSITION_3,\n cls.JOINT_VELOCITY_3,\n cls.MOTOR_TORQUE_3,\n cls.TORQUE_SENSOR_A_3,\n cls.TORQUE_SENSOR_B_3,\n cls.MOTOR_IQ_3,\n cls.MOTOR_ID_3,\n cls.POWER_MOTOR_EL_3,\n cls.POWER_MOTOR_MECH_3,\n cls.POWER_LOAD_MECH_3,\n cls.MOTOR_VOLTAGE_3,\n cls.SUPPLY_VOLTAGE_3,\n cls.BRAKE_VOLTAGE_3,\n cls.TARGET_POSITION_4,\n cls.TARGET_VELOCITY_4,\n cls.TARGET_ACCELERATION_4,\n cls.TARGET_TORQUE_4,\n cls.COMPUTED_INERTIA_4,\n cls.COMPUTED_TORQUE_4,\n cls.MOTOR_POSITION_4,\n cls.MOTOR_VELOCITY_4,\n cls.JOINT_POSITION_4,\n cls.JOINT_VELOCITY_4,\n cls.MOTOR_TORQUE_4,\n cls.TORQUE_SENSOR_A_4,\n cls.TORQUE_SENSOR_B_4,\n cls.MOTOR_IQ_4,\n cls.MOTOR_ID_4,\n cls.POWER_MOTOR_EL_4,\n cls.POWER_MOTOR_MECH_4,\n cls.POWER_LOAD_MECH_4,\n cls.MOTOR_VOLTAGE_4,\n cls.SUPPLY_VOLTAGE_4,\n cls.BRAKE_VOLTAGE_4,\n cls.TARGET_POSITION_5,\n cls.TARGET_VELOCITY_5,\n cls.TARGET_ACCELERATION_5,\n cls.TARGET_TORQUE_5,\n cls.COMPUTED_INERTIA_5,\n cls.COMPUTED_TORQUE_5,\n cls.MOTOR_POSITION_5,\n cls.MOTOR_VELOCITY_5,\n cls.JOINT_POSITION_5,\n cls.JOINT_VELOCITY_5,\n cls.MOTOR_TORQUE_5,\n cls.TORQUE_SENSOR_A_5,\n cls.TORQUE_SENSOR_B_5,\n cls.MOTOR_IQ_5,\n cls.MOTOR_ID_5,\n cls.POWER_MOTOR_EL_5,\n cls.POWER_MOTOR_MECH_5,\n cls.POWER_LOAD_MECH_5,\n cls.MOTOR_VOLTAGE_5,\n cls.SUPPLY_VOLTAGE_5,\n cls.BRAKE_VOLTAGE_5,\n cls.TARGET_POSITION_6,\n cls.TARGET_VELOCITY_6,\n cls.TARGET_ACCELERATION_6,\n cls.TARGET_TORQUE_6,\n cls.COMPUTED_INERTIA_6,\n cls.COMPUTED_TORQUE_6,\n cls.MOTOR_POSITION_6,\n cls.MOTOR_VELOCITY_6,\n cls.JOINT_POSITION_6,\n cls.JOINT_VELOCITY_6,\n cls.MOTOR_TORQUE_6,\n cls.TORQUE_SENSOR_A_6,\n cls.TORQUE_SENSOR_B_6,\n cls.MOTOR_IQ_6,\n cls.MOTOR_ID_6,\n cls.POWER_MOTOR_EL_6,\n cls.POWER_MOTOR_MECH_6,\n cls.POWER_LOAD_MECH_6,\n cls.MOTOR_VOLTAGE_6,\n cls.SUPPLY_VOLTAGE_6,\n cls.BRAKE_VOLTAGE_6,\n )\n\n @classmethod\n def meta(cls) -> tuple[str, ...]:\n \"\"\"Returns the meta colums of the voraus-AD dataset.\n\n Returns:\n The meta columns of the dataset.\n \"\"\"\n return (\n cls.TIME,\n cls.SAMPLE,\n cls.ANOMALY,\n cls.CATEGORY,\n cls.SETTING,\n cls.ACTION,\n cls.ACTIVE,\n )\n\n @classmethod\n def meta_constant(cls) -> tuple[str, ...]:\n \"\"\"Returns time invariant meta colums of the voraus-AD dataset.\n\n Returns:\n The time invariant meta columns.\n \"\"\"\n return (\n cls.SAMPLE,\n cls.ANOMALY,\n cls.CATEGORY,\n cls.SETTING,\n )\n\n @classmethod\n def electrical(cls) -> tuple[str, ...]:\n \"\"\"Returns the part of the machine data columns, which describes electrical values.\n\n Returns:\n The electrical signals.\n \"\"\"\n return (\n cls.ROBOT_VOLTAGE,\n cls.ROBOT_CURRENT,\n cls.IO_CURRENT,\n cls.SYSTEM_CURRENT,\n cls.MOTOR_IQ_1,\n cls.MOTOR_ID_1,\n cls.POWER_MOTOR_EL_1,\n cls.MOTOR_VOLTAGE_1,\n cls.SUPPLY_VOLTAGE_1,\n cls.BRAKE_VOLTAGE_1,\n cls.MOTOR_IQ_2,\n cls.MOTOR_ID_2,\n cls.POWER_MOTOR_EL_2,\n cls.MOTOR_VOLTAGE_2,\n cls.SUPPLY_VOLTAGE_2,\n cls.BRAKE_VOLTAGE_2,\n cls.MOTOR_IQ_3,\n cls.MOTOR_ID_3,\n cls.POWER_MOTOR_EL_3,\n cls.MOTOR_VOLTAGE_3,\n cls.SUPPLY_VOLTAGE_3,\n cls.BRAKE_VOLTAGE_3,\n cls.MOTOR_IQ_4,\n cls.MOTOR_ID_4,\n cls.POWER_MOTOR_EL_4,\n cls.MOTOR_VOLTAGE_4,\n cls.SUPPLY_VOLTAGE_4,\n cls.BRAKE_VOLTAGE_4,\n cls.MOTOR_IQ_5,\n cls.MOTOR_ID_5,\n cls.POWER_MOTOR_EL_5,\n cls.MOTOR_VOLTAGE_5,\n cls.SUPPLY_VOLTAGE_5,\n cls.BRAKE_VOLTAGE_5,\n cls.MOTOR_IQ_6,\n cls.MOTOR_ID_6,\n cls.POWER_MOTOR_EL_6,\n cls.MOTOR_VOLTAGE_6,\n cls.SUPPLY_VOLTAGE_6,\n cls.BRAKE_VOLTAGE_6,\n )\n\n @classmethod\n def measured(cls) -> tuple[str, ...]:\n \"\"\"Returns the part of the machine data, which describes measured values.\n\n Returns:\n The measured signals.\n \"\"\"\n return (\n cls.ROBOT_VOLTAGE,\n cls.ROBOT_CURRENT,\n cls.IO_CURRENT,\n cls.SYSTEM_CURRENT,\n cls.MOTOR_POSITION_1,\n cls.MOTOR_VELOCITY_1,\n cls.JOINT_POSITION_1,\n cls.JOINT_VELOCITY_1,\n cls.TORQUE_SENSOR_A_1,\n cls.TORQUE_SENSOR_B_1,\n cls.MOTOR_VOLTAGE_1,\n cls.SUPPLY_VOLTAGE_1,\n cls.BRAKE_VOLTAGE_1,\n cls.MOTOR_POSITION_2,\n cls.MOTOR_VELOCITY_2,\n cls.JOINT_POSITION_2,\n cls.JOINT_VELOCITY_2,\n cls.TORQUE_SENSOR_A_2,\n cls.TORQUE_SENSOR_B_2,\n cls.MOTOR_VOLTAGE_2,\n cls.SUPPLY_VOLTAGE_2,\n cls.BRAKE_VOLTAGE_2,\n cls.MOTOR_POSITION_3,\n cls.MOTOR_VELOCITY_3,\n cls.JOINT_POSITION_3,\n cls.JOINT_VELOCITY_3,\n cls.TORQUE_SENSOR_A_3,\n cls.TORQUE_SENSOR_B_3,\n cls.MOTOR_VOLTAGE_3,\n cls.SUPPLY_VOLTAGE_3,\n cls.BRAKE_VOLTAGE_3,\n cls.MOTOR_POSITION_4,\n cls.MOTOR_VELOCITY_4,\n cls.JOINT_POSITION_4,\n cls.JOINT_VELOCITY_4,\n cls.TORQUE_SENSOR_A_4,\n cls.TORQUE_SENSOR_B_4,\n cls.MOTOR_VOLTAGE_4,\n cls.SUPPLY_VOLTAGE_4,\n cls.BRAKE_VOLTAGE_4,\n cls.MOTOR_POSITION_5,\n cls.MOTOR_VELOCITY_5,\n cls.JOINT_POSITION_5,\n cls.JOINT_VELOCITY_5,\n cls.TORQUE_SENSOR_A_5,\n cls.TORQUE_SENSOR_B_5,\n cls.MOTOR_VOLTAGE_5,\n cls.SUPPLY_VOLTAGE_5,\n cls.BRAKE_VOLTAGE_5,\n cls.MOTOR_POSITION_6,\n cls.MOTOR_VELOCITY_6,\n cls.JOINT_POSITION_6,\n cls.JOINT_VELOCITY_6,\n cls.TORQUE_SENSOR_A_6,\n cls.TORQUE_SENSOR_B_6,\n cls.MOTOR_VOLTAGE_6,\n cls.SUPPLY_VOLTAGE_6,\n cls.BRAKE_VOLTAGE_6,\n )\n\n @classmethod\n def robot(cls) -> tuple[str, ...]:\n \"\"\"Returns all columns, which are not related to the robot axes, but to the robot itself.\n\n Returns:\n The robot system signals.\n \"\"\"\n return (\n cls.ROBOT_VOLTAGE,\n cls.ROBOT_CURRENT,\n cls.IO_CURRENT,\n cls.SYSTEM_CURRENT,\n )\n\n @classmethod\n def machine(cls) -> tuple[str, ...]:\n \"\"\"Returns all columns, which are machine data.\n\n This excludes the meta columns of the dataset.\n The machine data should be used for training, it contains all available measurements and target values.\n\n Returns:\n The machine data signals.\n \"\"\"\n return tuple(s for s in cls.all() if s not in cls.meta())\n\n @classmethod\n def mechanical(cls) -> tuple[str, ...]:\n \"\"\"Returns the columns, which describe mechanical values.\n\n Returns:\n The machanical signals.\n \"\"\"\n return tuple(s for s in cls.machine() if s not in cls.electrical())\n\n @classmethod\n def computed(cls) -> tuple[str, ...]:\n \"\"\"Returns the columns, which describe computed values like targets.\n\n Returns:\n The computed signals.\n \"\"\"\n return tuple(s for s in cls.machine() if s not in cls.measured())\n\n @classmethod\n def axis(cls) -> tuple[str, ...]:\n \"\"\"Returns the columns, which describe robot axis specific values.\n\n Returns:\n The robot axis specific signals.\n \"\"\"\n signals_axis = tuple(s for s in cls.machine() if s not in cls.robot())\n number_of_axis = 6\n assert len(signals_axis) % number_of_axis == 0\n signals_per_axis = round(len(signals_axis) / number_of_axis)\n print(signals_per_axis)\n return signals_axis\n\n @classmethod\n def groups(cls) -> dict[str, tuple[str, ...]]:\n \"\"\"Access the signal groups by name.\n\n Returns:\n The signal group dictionary.\n \"\"\"\n return {\n \"mechanical\": cls.mechanical(),\n \"electrical\": cls.electrical(),\n \"computed\": cls.computed(),\n \"measured\": cls.measured(),\n \"machine\": cls.machine(), # all machine data\n }" }, { "identifier": "load_torch_dataloaders", "path": "voraus_ad.py", "snippet": "def load_torch_dataloaders( # pylint: disable=too-many-locals\n dataset: Union[Path, str],\n batch_size: int,\n seed: int,\n columns: Union[List[str], Tuple],\n normalize: bool,\n frequency_divider: int,\n train_gain: float,\n pad: bool = True,\n) -> tuple[VorausADDataset, VorausADDataset, DataLoader, DataLoader]:\n \"\"\"Loads the voraus-AD dataset (train and test) as torch data loaders and datasets.\n\n Args:\n dataset: The path to the dataset.\n batch_size: The batch size to use.\n seed: The seed o use for the dataloader random generator.\n columns: The colums to load.\n normalize: Whether to normalize the data with standard scaler or not.\n frequency_divider: Scale the dataset down by dropping every nth sample.\n train_gain: The factor of train samples to use.\n pad: Whether to use zero padding or not.\n\n Returns:\n The data loaders and datasets.\n \"\"\"\n x_train, y_train, x_test, y_test = load_torch_tensors(\n path=dataset,\n columns=columns,\n normalize=normalize,\n frequency_divider=frequency_divider,\n train_gain=train_gain,\n pad=pad,\n )\n\n train_dataset = VorausADDataset(x_train, y_train, list(columns))\n test_dataset = VorausADDataset(x_test, y_test, list(columns))\n\n generator = torch.Generator()\n generator.manual_seed(seed)\n\n train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, generator=generator)\n test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)\n\n return train_dataset, test_dataset, train_dataloader, test_dataloader" } ]
import random import numpy import pandas import torch import torch.backends.cudnn from pathlib import Path from typing import Dict, List, Optional from sklearn import metrics from torch import optim from configuration import Configuration from normalizing_flow import NormalizingFlow, get_loss, get_loss_per_sample from voraus_ad import ANOMALY_CATEGORIES, Signals, load_torch_dataloaders
7,372
"""Contains the training of the normalizing flow model.""" # If deterministic CUDA is activated, some calculations cannot be calculated in parallel on the GPU. # The training will take much longer but is reproducible. DETERMINISTIC_CUDA = False DATASET_PATH = Path.home() / "Downloads" / "voraus-ad-dataset-100hz.parquet" MODEL_PATH: Optional[Path] = Path.cwd() / "model.pth" DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Define the training configuration and hyperparameters of the model. configuration = Configuration( columns="machine", epochs=70, frequencyDivider=1, trainGain=1.0, seed=177, batchsize=32, nCouplingBlocks=4, clamp=1.2, learningRate=8e-4, normalize=True, pad=True, nHiddenLayers=0, scale=2, kernelSize1=13, dilation1=2, kernelSize2=1, dilation2=1, kernelSize3=1, dilation3=1, milestones=[11, 61], gamma=0.1, ) # Make the training reproducible. torch.manual_seed(configuration.seed) torch.cuda.manual_seed_all(configuration.seed) numpy.random.seed(configuration.seed) random.seed(configuration.seed) if DETERMINISTIC_CUDA: torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False # Disable pylint too-many-variables here for readability. # The whole training should run in a single function call. def train() -> List[Dict]: # pylint: disable=too-many-locals """Trains the model with the paper-given parameters. Returns: The auroc (mean over categories) and loss per epoch. """ # Load the dataset as torch data loaders.
"""Contains the training of the normalizing flow model.""" # If deterministic CUDA is activated, some calculations cannot be calculated in parallel on the GPU. # The training will take much longer but is reproducible. DETERMINISTIC_CUDA = False DATASET_PATH = Path.home() / "Downloads" / "voraus-ad-dataset-100hz.parquet" MODEL_PATH: Optional[Path] = Path.cwd() / "model.pth" DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Define the training configuration and hyperparameters of the model. configuration = Configuration( columns="machine", epochs=70, frequencyDivider=1, trainGain=1.0, seed=177, batchsize=32, nCouplingBlocks=4, clamp=1.2, learningRate=8e-4, normalize=True, pad=True, nHiddenLayers=0, scale=2, kernelSize1=13, dilation1=2, kernelSize2=1, dilation2=1, kernelSize3=1, dilation3=1, milestones=[11, 61], gamma=0.1, ) # Make the training reproducible. torch.manual_seed(configuration.seed) torch.cuda.manual_seed_all(configuration.seed) numpy.random.seed(configuration.seed) random.seed(configuration.seed) if DETERMINISTIC_CUDA: torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False # Disable pylint too-many-variables here for readability. # The whole training should run in a single function call. def train() -> List[Dict]: # pylint: disable=too-many-locals """Trains the model with the paper-given parameters. Returns: The auroc (mean over categories) and loss per epoch. """ # Load the dataset as torch data loaders.
train_dataset, _, train_dl, test_dl = load_torch_dataloaders(
6
2023-10-18 15:09:24+00:00
12k
invictus717/UniDG
domainbed/scripts/visualize_adaption.py
[ { "identifier": "datasets", "path": "domainbed/datasets.py", "snippet": "DATASETS = [\n # Debug\n \"Debug28\",\n \"Debug224\",\n # Small images\n \"ColoredMNIST\",\n \"RotatedMNIST\",\n # Big images\n \"VLCS\",\n \"PACS\",\n \"OfficeHome\",\n \"TerraIncognita\",\n \"DomainNet\",\n \"SVIRO\",\n # WILDS datasets\n \"WILDSCamelyon\",\n \"WILDSFMoW\"\n]\n N_STEPS = 5001 # Default, subclasses may override\n CHECKPOINT_FREQ = 100 # Default, subclasses may override\n N_WORKERS = 4 # Default, subclasses may override\n ENVIRONMENTS = None # Subclasses should override\n INPUT_SHAPE = None # Subclasses should override\n INPUT_SHAPE = (3, 28, 28)\n ENVIRONMENTS = ['0', '1', '2']\n INPUT_SHAPE = (3, 224, 224)\n ENVIRONMENTS = ['0', '1', '2']\n ENVIRONMENTS = ['+90%', '+80%', '-90%']\n ENVIRONMENTS = ['0', '15', '30', '45', '60', '75']\n CHECKPOINT_FREQ = 300\n ENVIRONMENTS = [\"C\", \"L\", \"S\", \"V\"]\n CHECKPOINT_FREQ = 300\n ENVIRONMENTS = [\"A\", \"C\", \"P\", \"S\"]\n CHECKPOINT_FREQ = 1000\n ENVIRONMENTS = [\"clip\", \"info\", \"paint\", \"quick\", \"real\", \"sketch\"]\n CHECKPOINT_FREQ = 300\n ENVIRONMENTS = [\"A\", \"C\", \"P\", \"R\"]\n CHECKPOINT_FREQ = 300\n ENVIRONMENTS = [\"L100\", \"L38\", \"L43\", \"L46\"]\n CHECKPOINT_FREQ = 300\n ENVIRONMENTS = [\"aclass\", \"escape\", \"hilux\", \"i3\", \"lexus\", \"tesla\", \"tiguan\", \"tucson\", \"x5\", \"zoe\"]\n INPUT_SHAPE = (3, 224, 224)\n ENVIRONMENTS = [ \"hospital_0\", \"hospital_1\", \"hospital_2\", \"hospital_3\",\n \"hospital_4\"]\n ENVIRONMENTS = [ \"region_0\", \"region_1\", \"region_2\", \"region_3\",\n \"region_4\", \"region_5\"]\nclass MyDataParallel(torch.nn.DataParallel):\nclass MultipleDomainDataset:\nclass Debug(MultipleDomainDataset):\nclass Debug28(Debug):\nclass Debug224(Debug):\nclass MultipleEnvironmentMNIST(MultipleDomainDataset):\nclass ColoredMNIST(MultipleEnvironmentMNIST):\nclass RotatedMNIST(MultipleEnvironmentMNIST):\nclass MultipleEnvironmentImageFolder(MultipleDomainDataset):\nclass VLCS(MultipleEnvironmentImageFolder):\nclass PACS(MultipleEnvironmentImageFolder):\nclass DomainNet(MultipleEnvironmentImageFolder):\nclass OfficeHome(MultipleEnvironmentImageFolder):\nclass TerraIncognita(MultipleEnvironmentImageFolder):\nclass SVIRO(MultipleEnvironmentImageFolder):\nclass WILDSEnvironment:\nclass WILDSDataset(MultipleDomainDataset):\nclass WILDSCamelyon(WILDSDataset):\nclass WILDSFMoW(WILDSDataset):\n def __getattr__(self, name):\ndef get_dataset_class(dataset_name):\ndef num_environments(dataset_name):\n def __getitem__(self, index):\n def __len__(self):\n def __init__(self, root, test_envs, hparams):\n def __init__(self, root, environments, dataset_transform, input_shape,\n num_classes):\n def __init__(self, root, test_envs, hparams):\n def color_dataset(self, images, labels, environment):\n def torch_bernoulli_(self, p, size):\n def torch_xor_(self, a, b):\n def __init__(self, root, test_envs, hparams):\n def rotate_dataset(self, images, labels, angle):\n def __init__(self, root, test_envs, augment, hparams):\n def __init__(self, root, test_envs, hparams):\n def __init__(self, root, test_envs, hparams):\n def __init__(self, root, test_envs, hparams):\n def __init__(self, root, test_envs, hparams):\n def __init__(self, root, test_envs, hparams):\n def __init__(self, root, test_envs, hparams):\n def __init__(\n self,\n wilds_dataset,\n metadata_name,\n metadata_value,\n transform=None):\n def __getitem__(self, i):\n def __len__(self):\n def __init__(self, dataset, metadata_name, test_envs, augment, hparams):\n def metadata_values(self, wilds_dataset, metadata_name):\n def __init__(self, root, test_envs, hparams):\n def __init__(self, root, test_envs, hparams):" }, { "identifier": "hparams_registry", "path": "domainbed/hparams_registry.py", "snippet": "def _define_hparam(hparams, hparam_name, default_val, random_val_fn):\ndef _hparams(algorithm, dataset, random_seed):\n def _hparam(name, default_val, random_val_fn):\ndef default_hparams(algorithm, dataset):\ndef random_hparams(algorithm, dataset, seed):\n SMALL_IMAGES = ['Debug28', 'RotatedMNIST', 'ColoredMNIST']" }, { "identifier": "algorithms", "path": "domainbed/algorithms.py", "snippet": "ALGORITHMS = [\n 'ERM',\n 'IRM',\n 'GroupDRO',\n 'Mixup',\n 'MLDG',\n 'CORAL',\n 'MMD',\n 'DANN',\n 'CDANN',\n 'MTL',\n 'SagNet',\n 'ARM',\n 'VREx',\n 'RSC',\n 'SD',\n 'MIRO'\n]\n D = self.my_cdist(x, y)\n K = torch.zeros_like(D)\ndef get_algorithm_class(algorithm_name):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def update(self, minibatches, unlabeled=None):\n def predict(self, x):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def update(self, minibatches, unlabeled=None):\n def predict(self, x):\n def forward(self, x):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def predict(self, x):\n def __init__(self, input_shape, num_classes, num_domains,\n hparams, conditional, class_balance):\n def update(self, minibatches, unlabeled=None):\n def predict(self, x):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def _irm_penalty(logits, y):\n def update(self, minibatches, unlabeled=None):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def update(self, minibatches, unlabeled=None):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def update(self, minibatches, unlabeled=None):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def update(self, minibatches, unlabeled=None):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def update(self, minibatches, unlabeled=None):\n def __init__(self, input_shape, num_classes, num_domains, hparams, gaussian):\n def my_cdist(self, x1, x2):\n def gaussian_kernel(self, x, y, gamma=[0.001, 0.01, 0.1, 1, 10, 100,\n 1000]):\n def mmd(self, x, y):\n def update(self, minibatches, unlabeled=None):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def update(self, minibatches, unlabeled=None):\n def update_embeddings_(self, features, env=None):\n def predict(self, x, env=None):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def opt(p):\n def forward_c(self, x):\n def forward_s(self, x):\n def randomize(self, x, what=\"style\", eps=1e-5):\n def update(self, minibatches, unlabeled=None):\n def predict(self, x):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def update(self, minibatches, unlabeled=None):\n def __init__(self, input_shape, num_classes, num_domains, hparams):\n def update(self, minibatches, unlabeled=None):\n def __init__(self, network):\n def forward(self, x):\n def predict(self, x):\n def __init__(self, shape):\n def forward(self, x):\n def __init__(self, shape, init=0.1, channelwise=True, eps=1e-5):\n def forward(self, x):\ndef get_shapes(model, input_shape):\n def __init__(self, input_shape, num_classes, num_domains, hparams, **kwargs):\n def update(self, x, y, **kwargs):\n def predict(self, x):\n def get_forward_model(self):\nclass Algorithm(torch.nn.Module):\nclass ERM(Algorithm):\nclass ARM(ERM):\nclass AbstractDANN(Algorithm):\nclass DANN(AbstractDANN):\nclass CDANN(AbstractDANN):\nclass IRM(ERM):\nclass VREx(ERM):\nclass Mixup(ERM):\nclass GroupDRO(ERM):\nclass MLDG(ERM):\nclass AbstractMMD(ERM):\nclass MMD(AbstractMMD):\nclass CORAL(AbstractMMD):\nclass MTL(Algorithm):\nclass SagNet(Algorithm):\nclass RSC(ERM):\nclass SD(ERM):\nclass ForwardModel(nn.Module):\nclass MeanEncoder(nn.Module):\nclass VarianceEncoder(nn.Module):\nclass MIRO(Algorithm):" }, { "identifier": "misc", "path": "domainbed/lib/misc.py", "snippet": "def make_weights_for_balanced_classes(dataset):\ndef pdb():\ndef seed_hash(*args):\ndef print_separator():\ndef print_row(row, colwidth=10, latex=False):\n def format_val(x):\n def __init__(self, underlying_dataset, keys):\n def __getitem__(self, key):\n def __len__(self):\ndef split_dataset(dataset, n, seed=0):\ndef random_pairs_of_minibatches(minibatches):\ndef accuracy(network, loader, weights, device):\ndef softmax_entropy(x: torch.Tensor) -> torch.Tensor:\ndef accuracy_ent(network, loader, weights, device, adapt=False):\n def __init__(self, fname, mode=\"a\"):\n def write(self, message):\n def flush(self):\nclass _SplitDataset(torch.utils.data.Dataset):\nclass Tee:" }, { "identifier": "accuracy_ent", "path": "domainbed/lib/misc.py", "snippet": "def accuracy_ent(network, loader, weights, device, adapt=False):\n correct = 0\n total = 0\n weights_offset = 0\n ent = 0\n \n if adapt == False:\n network.eval()\n #with torch.no_grad():\n for x, y in loader:\n x = x.to(device)\n y = y.to(device)\n if adapt is None:\n p = network(x)\n else:\n p = network(x, adapt)\n if weights is None:\n batch_weights = torch.ones(len(p)) # x\n else:\n batch_weights = weights[weights_offset: weights_offset + len(x)]\n weights_offset += len(x)\n batch_weights = batch_weights.to(device)\n if len(p) != len(x):\n y = torch.cat((y,y))\n if p.size(1) == 1:\n correct += (p.gt(0).eq(y).float() * batch_weights.view(-1, 1)).sum().item()\n else:\n correct += (p.argmax(1).eq(y).float() * batch_weights).sum().item()\n total += batch_weights.sum().item()\n ent += softmax_entropy(p).sum().item()\n if adapt == False:\n network.train()\n\n return correct / total, ent / total" }, { "identifier": "InfiniteDataLoader", "path": "domainbed/lib/fast_data_loader.py", "snippet": "class InfiniteDataLoader:\n def __init__(self, dataset, weights, batch_size, num_workers):\n super().__init__()\n\n if weights is None:\n sampler = torch.utils.data.RandomSampler(dataset,\n replacement=True)\n else:\n sampler = torch.utils.data.WeightedRandomSampler(weights,\n replacement=True,\n num_samples=batch_size)\n\n # if weights is None:\n # weights = torch.ones(len(dataset))\n\n batch_sampler = torch.utils.data.BatchSampler(\n sampler,\n batch_size=batch_size,\n drop_last=True)\n\n self._infinite_iterator = iter(torch.utils.data.DataLoader(\n dataset,\n num_workers=num_workers,\n batch_sampler=_InfiniteSampler(batch_sampler)\n ))\n\n def __iter__(self):\n while True:\n yield next(self._infinite_iterator)\n\n def __len__(self):\n raise ValueError" }, { "identifier": "FastDataLoader", "path": "domainbed/lib/fast_data_loader.py", "snippet": "class FastDataLoader:\n \"\"\"DataLoader wrapper with slightly improved speed by not respawning worker\n processes at every epoch.\"\"\"\n def __init__(self, dataset, batch_size, num_workers):\n super().__init__()\n\n batch_sampler = torch.utils.data.BatchSampler(\n torch.utils.data.RandomSampler(dataset, replacement=False),\n batch_size=batch_size,\n drop_last=False\n )\n\n self._infinite_iterator = iter(torch.utils.data.DataLoader(\n dataset,\n num_workers=num_workers,\n batch_sampler=_InfiniteSampler(batch_sampler)\n ))\n\n self._length = len(batch_sampler)\n\n def __iter__(self):\n for _ in range(len(self)):\n yield next(self._infinite_iterator)\n\n def __len__(self):\n return self._length" }, { "identifier": "DataParallelPassthrough", "path": "domainbed/lib/fast_data_loader.py", "snippet": "class DataParallelPassthrough(torch.nn.DataParallel):\n def __getattr__(self, name):\n try:\n return super().__getattr__(name)\n except AttributeError:\n return getattr(self.module, name)" }, { "identifier": "model_selection", "path": "domainbed/model_selection.py", "snippet": "def get_test_records(records):\n def __init__(self):\n def run_acc(self, run_records):\n def hparams_accs(self, records):\n def sweep_acc(self, records):\n def run_acc(self, run_records):\n def _step_acc(self, record):\n def run_acc(self, run_records):\n def _step_acc(self, record):\n def run_acc(self, run_records):\n def _step_acc(self, records):\n def run_acc(self, records):\nclass SelectionMethod:\nclass OracleSelectionMethod(SelectionMethod):\nclass IIDAccuracySelectionMethod(SelectionMethod):\nclass IIDAccuracySelectionMethod_Adaption(SelectionMethod):\nclass LeaveOneOutSelectionMethod(SelectionMethod):" }, { "identifier": "Q", "path": "domainbed/lib/query.py", "snippet": "class Q(object):\n def __init__(self, list_):\n super(Q, self).__init__()\n self._list = list_\n\n def __len__(self):\n return len(self._list)\n\n def __getitem__(self, key):\n return self._list[key]\n\n def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self._list == other._list\n else:\n return self._list == other\n\n def __str__(self):\n return str(self._list)\n\n def __repr__(self):\n return repr(self._list)\n\n def _append(self, item):\n \"\"\"Unsafe, be careful you know what you're doing.\"\"\"\n self._list.append(item)\n\n def group(self, selector):\n \"\"\"\n Group elements by selector and return a list of (group, group_records)\n tuples.\n \"\"\"\n selector = make_selector_fn(selector)\n groups = {}\n for x in self._list:\n group = selector(x)\n group_key = hashable(group)\n if group_key not in groups:\n groups[group_key] = (group, Q([]))\n groups[group_key][1]._append(x)\n results = [groups[key] for key in sorted(groups.keys())]\n return Q(results)\n\n def group_map(self, selector, fn):\n \"\"\"\n Group elements by selector, apply fn to each group, and return a list\n of the results.\n \"\"\"\n return self.group(selector).map(fn)\n\n def map(self, fn):\n \"\"\"\n map self onto fn. If fn takes multiple args, tuple-unpacking\n is applied.\n \"\"\"\n if len(inspect.signature(fn).parameters) > 1:\n return Q([fn(*x) for x in self._list])\n else:\n return Q([fn(x) for x in self._list])\n\n def select(self, selector):\n selector = make_selector_fn(selector)\n return Q([selector(x) for x in self._list])\n\n def min(self):\n return min(self._list)\n\n def max(self):\n return max(self._list)\n\n def sum(self):\n return sum(self._list)\n\n def len(self):\n return len(self._list)\n\n def mean(self):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return float(np.mean(self._list))\n\n def std(self):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return float(np.std(self._list))\n\n def mean_std(self):\n return (self.mean(), self.std())\n\n def argmax(self, selector):\n selector = make_selector_fn(selector)\n return max(self._list, key=selector)\n\n def filter(self, fn):\n return Q([x for x in self._list if fn(x)])\n\n def filter_equals(self, selector, value):\n \"\"\"like [x for x in y if x.selector == value]\"\"\"\n selector = make_selector_fn(selector)\n return self.filter(lambda r: selector(r) == value)\n\n def filter_in(self, selector, values):\n assert isinstance(values, list)\n selector = make_selector_fn(selector)\n return self.filter(lambda r: selector(r) in values)\n\n def filter_not_none(self):\n return self.filter(lambda r: r is not None)\n\n def filter_not_nan(self):\n return self.filter(lambda r: not np.isnan(r))\n\n def flatten(self):\n return Q([y for x in self._list for y in x])\n\n def unique(self):\n result = []\n result_set = set()\n for x in self._list:\n hashable_x = hashable(x)\n if hashable_x not in result_set:\n result_set.add(hashable_x)\n result.append(x)\n return Q(result)\n\n def sorted(self, key=None):\n if key is None:\n key = lambda x: x\n def key2(x):\n x = key(x)\n if isinstance(x, (np.floating, float)) and np.isnan(x):\n return float('-inf')\n else:\n return x\n return Q(sorted(self._list, key=key2))" }, { "identifier": "adapt_algorithms", "path": "domainbed/adapt_algorithms.py", "snippet": "ALGORITHMS = [\n 'T3A', \n 'TentFull', \n 'TentNorm', \n 'TentPreBN', # Tent-BN in the paper\n 'TentClf', # Tent-C in the paper\n 'PseudoLabel', \n 'PLClf', \n 'SHOT', \n 'SHOTIM',\n 'T3A_Aug',\n 'UniDG',\n]\ndef get_tta_transforms(gaussian_std: float=0.005, soft=False, clip_inputs=False):\ndef get_algorithm_class(algorithm_name):\n def __init__(self, input_shape, num_classes, num_domains, hparams, algorithm):\n def forward(self, x, adapt=False):\n def select_supports(self):\n def predict(self, x, adapt=False):\n def reset(self):\n def __init__(self, input_shape, num_classes, num_domains, hparams, algorithm):\n def forward(self, x, adapt=False):\n def select_supports(self):\n def predict(self, x, adapt=False):\n def reset(self):\n def __init__(self, input_shape, num_classes, num_domains, hparams, algorithm):\n def forward(self, x, adapt=False):\n def forward_and_adapt(self, x, model, optimizer):\n def configure_model_optimizer(self, algorithm, alpha):\n def reset(self):\n def forward(self, x, adapt=False):\n def configure_model_optimizer(self, algorithm, alpha):\n def configure_model_optimizer(self, algorithm, alpha):\ndef configure_model(model):\ndef copy_model_and_optimizer(model, optimizer):\ndef load_model_and_optimizer(model, optimizer, model_state, optimizer_state):\ndef softmax_entropy(x: torch.Tensor) -> torch.Tensor:\n def __init__(self, m, num_features, **kwargs):\n def forward(self, x):\n def predict(self, x):\ndef collect_params(model):\n def __init__(self, input_shape, num_classes, num_domains, hparams, algorithm):\n def forward(self, x, adapt=False):\n def forward_and_adapt(self, x, model, optimizer):\n def configure_model_optimizer(self, algorithm, alpha):\n def predict(self, x, adapt=False):\n def reset(self):\n def configure_model_optimizer(self, algorithm, alpha):\n def predict(self, x, adapt=False):\n def reset(self):\n def __init__(self, input_shape, num_classes, num_domains, hparams, algorithm):\n def forward(self, x, adapt=False):\n def forward_and_adapt(self, x, model, optimizer):\n def loss(self, outputs):\n def configure_model_optimizer(self, algorithm, alpha):\n def reset(self):\n def loss(self, outputs):\n def __init__(self, input_shape, num_classes, num_domains, hparams, algorithm):\n def forward(self, x, adapt=False):\n def select_supports(self):\n def predict(self, x, adapt=False):\n def reset(self):\nclass T3A_Aug(Algorithm):\nclass T3A(Algorithm):\nclass TentFull(Algorithm):\nclass TentNorm(TentFull):\nclass TentPreBN(TentFull):\nclass TentClf(TentFull):\nclass PreBN(torch.nn.Module):\nclass PseudoLabel(Algorithm):\nclass PLClf(PseudoLabel):\nclass SHOT(Algorithm):\nclass SHOTIM(SHOT): \nclass UniDG(Algorithm):" } ]
import argparse import collections import json import os import random import sys import time import uuid import itertools import copy import numpy as np import PIL import torch import torchvision import torch.utils.data import itertools import matplotlib.pyplot as plt import numpy as np from argparse import Namespace from itertools import chain from domainbed import datasets from domainbed import hparams_registry from domainbed import algorithms from domainbed.lib import misc from domainbed.lib.misc import accuracy_ent from domainbed.lib.fast_data_loader import InfiniteDataLoader, FastDataLoader, DataParallelPassthrough from domainbed import model_selection from domainbed.lib.query import Q from domainbed import adapt_algorithms from MulticoreTSNE import MulticoreTSNE as TSNE
7,569
epochs_path = os.path.join(args_in.input_dir, 'results.jsonl') records = [] with open(epochs_path, 'r') as f: for line in f: records.append(json.loads(line[:-1])) records = Q(records) r = records[0] args = Namespace(**r['args']) print(args) args.input_dir = args_in.input_dir if '-' in args_in.adapt_algorithm: args.adapt_algorithm, test_batch_size = args_in.adapt_algorithm.split('-') args.test_batch_size = int(test_batch_size) else: args.adapt_algorithm = args_in.adapt_algorithm args.test_batch_size = 128 # default args.test_batch_size = 128 # default args.output_dir = args.input_dir alg_name = args_in.adapt_algorithm if args.adapt_algorithm in['T3A', 'TentPreBN', 'TentClf', 'PLClf']: use_featurer_cache = True else: use_featurer_cache = False if os.path.exists(os.path.join(args.output_dir, 'done_{}'.format(alg_name))): print("{} has already excecuted".format(alg_name)) # If we ever want to implement checkpointing, just persist these values # every once in a while, and then load them from disk here. algorithm_dict = None # os.makedirs(args.output_dir, exist_ok=True) sys.stdout = misc.Tee(os.path.join(args.output_dir, 'out_{}.txt'.format(alg_name))) sys.stderr = misc.Tee(os.path.join(args.output_dir, 'err_{}.txt'.format(alg_name))) print("Environment:") print("\tPython: {}".format(sys.version.split(" ")[0])) print("\tPyTorch: {}".format(torch.__version__)) print("\tTorchvision: {}".format(torchvision.__version__)) print("\tCUDA: {}".format(torch.version.cuda)) print("\tCUDNN: {}".format(torch.backends.cudnn.version())) print("\tNumPy: {}".format(np.__version__)) print("\tPIL: {}".format(PIL.__version__)) print('Args:') for k, v in sorted(vars(args).items()): print('\t{}: {}'.format(k, v)) if args.hparams_seed == 0: hparams = hparams_registry.default_hparams(args.algorithm, args.dataset) else: hparams = hparams_registry.random_hparams(args.algorithm, args.dataset, misc.seed_hash(args.hparams_seed, args.trial_seed)) if args.hparams: hparams.update(json.loads(args.hparams)) print('HParams:') for k, v in sorted(hparams.items()): print('\t{}: {}'.format(k, v)) assert os.path.exists(os.path.join(args.output_dir, 'done')) assert os.path.exists(os.path.join(args.output_dir, 'IID_best.pkl')) # IID_best is produced by train.py random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False if torch.cuda.is_available(): device = "cuda" else: device = "cpu" if args.dataset in vars(datasets): dataset = vars(datasets)[args.dataset](args.data_dir, args.test_envs, hparams) else: raise NotImplementedError # Split each env into an 'in-split' and an 'out-split'. We'll train on # each in-split except the test envs, and evaluate on all splits. # To allow unsupervised domain adaptation experiments, we split each test # env into 'in-split', 'uda-split' and 'out-split'. The 'in-split' is used # by collect_results.py to compute classification accuracies. The # 'out-split' is used by the Oracle model selectino method. The unlabeled # samples in 'uda-split' are passed to the algorithm at training time if # args.task == "domain_adaptation". If we are interested in comparing # domain generalization and domain adaptation results, then domain # generalization algorithms should create the same 'uda-splits', which will # be discared at training. in_splits = [] out_splits = [] uda_splits = [] for env_i, env in enumerate(dataset): uda = [] out, in_ = misc.split_dataset(env, int(len(env)*args.holdout_fraction), misc.seed_hash(args.trial_seed, env_i)) if env_i in args.test_envs: uda, in_ = misc.split_dataset(in_, int(len(in_)*args.uda_holdout_fraction), misc.seed_hash(args.trial_seed, env_i)) if hparams['class_balanced']: in_weights = misc.make_weights_for_balanced_classes(in_) out_weights = misc.make_weights_for_balanced_classes(out) if uda is not None: uda_weights = misc.make_weights_for_balanced_classes(uda) else: in_weights, out_weights, uda_weights = None, None, None in_splits.append((in_, in_weights)) out_splits.append((out, out_weights)) if len(uda): uda_splits.append((uda, uda_weights)) # Use out splits as training data (to fair comparison with train.py)
# The code is modified from domainbed.scripts.train def softmax_entropy(x: torch.Tensor) -> torch.Tensor: """Entropy of softmax distribution from logits.""" return -(x.softmax(1) * x.log_softmax(1)).sum(1) class Dataset: def __init__(self, x, y): self.x = x self.y = y def __len__(self): return len(self.x) def __getitem__(self, idx): return self.x[idx], self.y[idx] def generate_featurelized_loader(loader, network, classifier, batch_size=128): """ The classifier adaptation does not need to repeat the heavy forward path, We speeded up the experiments by converting the observations into representations. """ z_list = [] y_list = [] p_list = [] network.eval() classifier.eval() for x, y in loader: x = x.to(device) z = network(x) p = classifier(z) z_list.append(z.detach().cpu()) y_list.append(y.detach().cpu()) p_list.append(p.detach().cpu()) # p_list.append(p.argmax(1).float().cpu().detach()) network.train() classifier.train() z = torch.cat(z_list) y = torch.cat(y_list) p = torch.cat(p_list) ent = softmax_entropy(p) py = p.argmax(1).float().cpu().detach() dataset1, dataset2 = Dataset(z, y), Dataset(z, py) loader1 = torch.utils.data.DataLoader(dataset1, batch_size=batch_size, shuffle=False, drop_last=True) loader2 = torch.utils.data.DataLoader(dataset2, batch_size=batch_size, shuffle=False, drop_last=True) return loader1, loader2, ent def visualize_tsne(network, loader, weights, device, adapt,env, name): print("Start visualizing {}...".format(name)) if adapt: flag = 'Adapted' else: flag = 'Base' network.eval() for x, y in loader: x = x.to(device) y = y.to(device) if adapt is False: p = network(x) else: p = network(x, adapt) x = p.detach().cpu().numpy() tsne = TSNE(n_components=2).fit_transform(x) label = np.squeeze(y.cpu().numpy()) plt.figure(figsize=(6, 6)) size=100 line=0.7 t=.8 # plt.scatter(tsne[:, 0], tsne[:, 1], c=label,cmap=plt.get_cmap('hsv'),marker = 'o',linewidths=line,alpha=t,edgecolors='black') plt.scatter(tsne[:, 0], tsne[:, 1], c=label,cmap=plt.get_cmap('terrain'),marker = 'o',linewidths=line,alpha=t,edgecolors='black') plt.axis('off') plt.colorbar() plt.savefig('./visualization/vis_test_{}_{}_{}.jpg'.format(env,flag,name)) print("Visualization Results Saved...") if __name__ == "__main__": parser = argparse.ArgumentParser(description='Domain generalization') parser.add_argument('--input_dir', type=str) parser.add_argument('--adapt_algorithm', type=str, default="UniDG") args_in = parser.parse_args() epochs_path = os.path.join(args_in.input_dir, 'results.jsonl') records = [] with open(epochs_path, 'r') as f: for line in f: records.append(json.loads(line[:-1])) records = Q(records) r = records[0] args = Namespace(**r['args']) print(args) args.input_dir = args_in.input_dir if '-' in args_in.adapt_algorithm: args.adapt_algorithm, test_batch_size = args_in.adapt_algorithm.split('-') args.test_batch_size = int(test_batch_size) else: args.adapt_algorithm = args_in.adapt_algorithm args.test_batch_size = 128 # default args.test_batch_size = 128 # default args.output_dir = args.input_dir alg_name = args_in.adapt_algorithm if args.adapt_algorithm in['T3A', 'TentPreBN', 'TentClf', 'PLClf']: use_featurer_cache = True else: use_featurer_cache = False if os.path.exists(os.path.join(args.output_dir, 'done_{}'.format(alg_name))): print("{} has already excecuted".format(alg_name)) # If we ever want to implement checkpointing, just persist these values # every once in a while, and then load them from disk here. algorithm_dict = None # os.makedirs(args.output_dir, exist_ok=True) sys.stdout = misc.Tee(os.path.join(args.output_dir, 'out_{}.txt'.format(alg_name))) sys.stderr = misc.Tee(os.path.join(args.output_dir, 'err_{}.txt'.format(alg_name))) print("Environment:") print("\tPython: {}".format(sys.version.split(" ")[0])) print("\tPyTorch: {}".format(torch.__version__)) print("\tTorchvision: {}".format(torchvision.__version__)) print("\tCUDA: {}".format(torch.version.cuda)) print("\tCUDNN: {}".format(torch.backends.cudnn.version())) print("\tNumPy: {}".format(np.__version__)) print("\tPIL: {}".format(PIL.__version__)) print('Args:') for k, v in sorted(vars(args).items()): print('\t{}: {}'.format(k, v)) if args.hparams_seed == 0: hparams = hparams_registry.default_hparams(args.algorithm, args.dataset) else: hparams = hparams_registry.random_hparams(args.algorithm, args.dataset, misc.seed_hash(args.hparams_seed, args.trial_seed)) if args.hparams: hparams.update(json.loads(args.hparams)) print('HParams:') for k, v in sorted(hparams.items()): print('\t{}: {}'.format(k, v)) assert os.path.exists(os.path.join(args.output_dir, 'done')) assert os.path.exists(os.path.join(args.output_dir, 'IID_best.pkl')) # IID_best is produced by train.py random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False if torch.cuda.is_available(): device = "cuda" else: device = "cpu" if args.dataset in vars(datasets): dataset = vars(datasets)[args.dataset](args.data_dir, args.test_envs, hparams) else: raise NotImplementedError # Split each env into an 'in-split' and an 'out-split'. We'll train on # each in-split except the test envs, and evaluate on all splits. # To allow unsupervised domain adaptation experiments, we split each test # env into 'in-split', 'uda-split' and 'out-split'. The 'in-split' is used # by collect_results.py to compute classification accuracies. The # 'out-split' is used by the Oracle model selectino method. The unlabeled # samples in 'uda-split' are passed to the algorithm at training time if # args.task == "domain_adaptation". If we are interested in comparing # domain generalization and domain adaptation results, then domain # generalization algorithms should create the same 'uda-splits', which will # be discared at training. in_splits = [] out_splits = [] uda_splits = [] for env_i, env in enumerate(dataset): uda = [] out, in_ = misc.split_dataset(env, int(len(env)*args.holdout_fraction), misc.seed_hash(args.trial_seed, env_i)) if env_i in args.test_envs: uda, in_ = misc.split_dataset(in_, int(len(in_)*args.uda_holdout_fraction), misc.seed_hash(args.trial_seed, env_i)) if hparams['class_balanced']: in_weights = misc.make_weights_for_balanced_classes(in_) out_weights = misc.make_weights_for_balanced_classes(out) if uda is not None: uda_weights = misc.make_weights_for_balanced_classes(uda) else: in_weights, out_weights, uda_weights = None, None, None in_splits.append((in_, in_weights)) out_splits.append((out, out_weights)) if len(uda): uda_splits.append((uda, uda_weights)) # Use out splits as training data (to fair comparison with train.py)
train_loaders = [FastDataLoader(
6
2023-10-15 14:26:12+00:00
12k
jianlanluo/SAQ
vqn/vqn_main.py
[ { "identifier": "VQN", "path": "vqn/vqn.py", "snippet": "class VQN(object):\n\n @staticmethod\n def get_default_config(updates=None):\n config = ConfigDict()\n config.embedding_dim = 128\n config.codebook_size = 64\n config.commitment_cost = 1.0\n config.quantization_cost = 1.0\n config.entropy_loss_ratio = 0.0\n config.entropy_loss_type = \"softmax\"\n config.entropy_temperature = 1.0\n config.vqvae_arch = '512-512'\n config.action_only_quantization = False\n config.reconstruction_loss_type = 'l2'\n config.vqvae_lr = 3e-4\n\n config.discount = 0.99\n config.qf_arch = '512-512'\n config.qf_lr = 3e-4\n config.target_update_period = 200\n config.reset_qf = False\n config.td_loss_weight = 1.0\n\n config.bc_loss_weight = 0.0\n\n config.action_selection_threshold = 0.0\n\n config.cql_temp = 1.0\n config.cql_min_q_weight = 0.0\n \n config.qf_weight_decay = 0.0\n\n config.q_value_penalty_weight = 0.0\n config.q_value_penalty_type = 'l1'\n config.q_value_penalty_aggregation = 'mean'\n\n if updates is not None:\n config.update(ConfigDict(updates).copy_and_resolve_references())\n return config\n\n def __init__(self, config, observation_dim, action_dim):\n self.config = self.get_default_config(config)\n self.observation_dim = observation_dim\n self.action_dim = action_dim\n\n self.vqvae = ActionVQVAE(\n observation_dim=self.observation_dim,\n action_dim=self.action_dim,\n embedding_dim=self.config.embedding_dim,\n codebook_size=self.config.codebook_size,\n commitment_cost=self.config.commitment_cost,\n quantization_cost=self.config.quantization_cost,\n entropy_loss_ratio=self.config.entropy_loss_ratio,\n entropy_loss_type=self.config.entropy_loss_type,\n entropy_temperature=self.config.entropy_temperature,\n arch=self.config.vqvae_arch,\n action_only_quantization=self.config.action_only_quantization,\n reconstruction_loss_type=self.config.reconstruction_loss_type,\n )\n\n self._vqvae_train_state = TrainState.create(\n params=self.vqvae.init(\n next_rng(self.vqvae.rng_keys()),\n jnp.zeros((1, observation_dim)),\n jnp.zeros((1, action_dim)),\n train=True\n ),\n tx=optax.adam(self.config.vqvae_lr),\n apply_fn=None,\n )\n self._vqvae_total_steps = 0\n\n self.qf = FullyConnectedNetwork(\n output_dim=self.config.codebook_size,\n arch=self.config.qf_arch,\n )\n\n qf_params = self.qf.init(\n next_rng(self.qf.rng_keys()),\n jnp.zeros((1, observation_dim)),\n )\n\n self._qf_optimizer = optax.adam(self.config.qf_lr)\n self._qf_train_state = DQNTrainState.create(\n params=qf_params,\n target_params=deepcopy(qf_params),\n tx=optax.adamw(self.config.qf_lr, self.config.qf_weight_decay),\n apply_fn=None,\n )\n self._dqn_total_steps = 0\n\n self._sampler_policy = VQSamplerPolicy(\n self.qf, self.vqvae,\n self._qf_train_state.params, self._vqvae_train_state.params\n )\n\n\n def train_vqvae(self, batch):\n self._vqvae_train_state, metrics = self._vqvae_train_step(\n next_rng(), self._vqvae_train_state, batch\n )\n self._vqvae_total_steps += 1\n return metrics\n\n @partial(jax.jit, static_argnames=('self', ))\n def _vqvae_train_step(self, rng, train_state, batch):\n observations = batch['observations']\n actions = batch['actions']\n rng_generator = JaxRNG(rng)\n\n @partial(jax.grad, has_aux=True)\n def grad_fn(train_params):\n reconstructed, result_dict = self.vqvae.apply(\n train_params,\n observations,\n actions,\n train=True,\n )\n return result_dict['loss'], result_dict\n\n grads, aux_values = grad_fn(train_state.params)\n new_train_state = train_state.apply_gradients(grads=grads)\n metrics = collect_jax_metrics(\n aux_values,\n ['loss', 'reconstruction_loss', 'quantizer_loss', 'e_latent_loss', 'q_latent_loss',\n 'entropy_loss', 'action_prior_loss', 'action_prior_accuracy'],\n )\n return new_train_state, metrics\n\n def train_dqn(self, batch, bc=False):\n self._qf_train_state, metrics = self._dqn_train_step(\n next_rng(), self._qf_train_state, self._vqvae_train_state, batch,\n bc\n )\n self._dqn_total_steps += 1\n return metrics\n\n @partial(jax.jit, static_argnames=('self', 'bc'))\n def _dqn_train_step(self, rng, qf_train_state, vqvae_train_state, batch, bc=False):\n observations = batch['observations']\n original_actions = batch['actions']\n rewards = batch['rewards']\n next_observations = batch['next_observations']\n dones = batch['dones']\n rng_generator = JaxRNG(rng)\n\n actions = self.vqvae.apply(\n vqvae_train_state.params,\n observations,\n original_actions,\n method=self.vqvae.encode\n )\n\n @partial(jax.grad, has_aux=True)\n def grad_fn(train_params):\n def select_by_action(q_vals, actions):\n return jnp.squeeze(\n jnp.take_along_axis(\n q_vals, jnp.expand_dims(actions, -1), axis=-1\n ),\n axis=-1\n )\n\n def select_actions(params, observations):\n q_values = self.qf.apply(params, observations)\n action_priors = jax.nn.softmax(\n self.vqvae.apply(\n vqvae_train_state.params,\n observations,\n method=self.vqvae.action_prior_logits\n ),\n axis=-1\n )\n action_selection_threshold = jnp.minimum(\n jnp.amax(action_priors, axis=-1, keepdims=True),\n self.config.action_selection_threshold\n )\n action_mask = (\n action_priors >= action_selection_threshold\n ).astype(jnp.float32)\n masked_q_values = (\n action_mask * q_values + (1.0 - action_mask) * jnp.min(q_values)\n )\n return jnp.argmax(masked_q_values, axis=-1)\n\n\n q_values = self.qf.apply(train_params, observations)\n current_actions_q_values = select_by_action(q_values, actions)\n next_q_values = self.qf.apply(qf_train_state.target_params, next_observations)\n next_actions = select_actions(train_params, next_observations)\n target_q_values = select_by_action(next_q_values, next_actions)\n\n td_target = rewards + (1. - dones) * self.config.discount * target_q_values\n\n td_loss = mse_loss(current_actions_q_values, jax.lax.stop_gradient(td_target))\n loss = self.config.td_loss_weight * td_loss\n\n current_actions = jnp.argmax(q_values, axis=-1)\n max_q_values = jnp.max(q_values, axis=-1)\n advantage = max_q_values - current_actions_q_values\n\n policy_dataset_aggrement_rate = jnp.mean(current_actions == actions)\n reconstructed_current_actions = self.vqvae.apply(\n vqvae_train_state.params,\n observations,\n current_actions,\n method=self.vqvae.decode\n )\n current_action_mse = jnp.sum(\n jnp.square(reconstructed_current_actions - original_actions),\n axis=-1\n ).mean()\n\n bc_loss = jnp.mean(optax.softmax_cross_entropy_with_integer_labels(q_values, actions))\n loss = loss + self.config.bc_loss_weight * bc_loss\n\n cql_lse_q_values = self.config.cql_temp * jax.scipy.special.logsumexp(\n q_values / self.config.cql_temp, axis=-1\n )\n cql_min_q_loss = jnp.mean(cql_lse_q_values - current_actions_q_values)\n loss = loss + self.config.cql_min_q_weight * cql_min_q_loss\n\n if self.config.q_value_penalty_aggregation == 'none':\n aggregated_q_values = q_values\n elif self.config.q_value_penalty_aggregation == 'mean':\n aggregated_q_values = jnp.mean(q_values)\n else:\n raise ValueError('Unsupport value penalty aggregation type!')\n\n if self.config.q_value_penalty_type == 'l1':\n q_value_penalty_loss = jnp.mean(jnp.abs(aggregated_q_values))\n elif self.config.q_value_penalty_type == 'l2':\n q_value_penalty_loss = jnp.mean(jnp.square(aggregated_q_values))\n else:\n raise ValueError('Unsupport value penalty type!')\n\n loss = loss + self.config.q_value_penalty_weight * q_value_penalty_loss\n\n if bc:\n loss = bc_loss\n\n return loss, locals()\n\n grads, aux_values = grad_fn(qf_train_state.params)\n new_target_params = jax.lax.cond(\n qf_train_state.step % self.config.target_update_period == self.config.target_update_period - 1,\n lambda: qf_train_state.params,\n lambda: qf_train_state.target_params,\n )\n if self.config.reset_qf:\n def reset_qf_params():\n qf_params = self.qf.init(\n rng_generator(self.qf.rng_keys()),\n jnp.zeros((1, self.observation_dim)),\n )\n return DQNTrainState.create(\n params=qf_params,\n target_params=new_target_params,\n tx=self._qf_optimizer,\n apply_fn=None,\n )\n\n new_qf_train_state = jax.lax.cond(\n qf_train_state.step % self.config.target_update_period == self.config.target_update_period - 1,\n reset_qf_params,\n lambda: qf_train_state.apply_gradients(grads=grads, target_params=new_target_params)\n )\n else:\n new_qf_train_state = qf_train_state.apply_gradients(\n grads=grads, target_params=new_target_params\n )\n\n metrics = collect_jax_metrics(\n aux_values,\n ['loss', 'current_actions_q_values', 'max_q_values', 'target_q_values',\n 'advantage', 'td_target', 'td_loss', 'cql_lse_q_values', 'cql_min_q_loss',\n 'policy_dataset_aggrement_rate', 'bc_loss', 'current_action_mse',\n 'q_value_penalty_loss'],\n )\n\n return new_qf_train_state, metrics\n\n def get_sampler_policy(self):\n return self._sampler_policy.update_params(\n self._qf_train_state.params, self._vqvae_train_state.params\n )" }, { "identifier": "get_d4rl_dataset", "path": "vqn/replay_buffer.py", "snippet": "def get_d4rl_dataset(env):\n dataset = d4rl.qlearning_dataset(env)\n return dict(\n observations=dataset['observations'],\n actions=dataset['actions'],\n next_observations=dataset['next_observations'],\n rewards=dataset['rewards'],\n dones=dataset['terminals'].astype(np.float32),\n )" }, { "identifier": "subsample_batch", "path": "vqn/replay_buffer.py", "snippet": "def subsample_batch(batch, size):\n indices = np.random.randint(batch['observations'].shape[0], size=size)\n return index_batch(batch, indices)" }, { "identifier": "batch_to_jax", "path": "vqn/jax_utils.py", "snippet": "@jax.jit\ndef batch_to_jax(batch):\n return jax.tree_util.tree_map(jax.device_put, batch)" }, { "identifier": "TanhGaussianPolicy", "path": "vqn/model.py", "snippet": "class TanhGaussianPolicy(nn.Module):\n observation_dim: int\n action_dim: int\n arch: str = '256-256'\n orthogonal_init: bool = False\n log_std_multiplier: float = 1.0\n log_std_offset: float = -1.0\n use_tanh: bool = True\n\n def setup(self):\n self.base_network = FullyConnectedNetwork(\n output_dim=2 * self.action_dim, arch=self.arch, orthogonal_init=self.orthogonal_init\n )\n self.log_std_multiplier_module = Scalar(self.log_std_multiplier)\n self.log_std_offset_module = Scalar(self.log_std_offset)\n\n def log_prob(self, observations, actions):\n if actions.ndim == 3:\n observations = extend_and_repeat(observations, 1, actions.shape[1])\n base_network_output = self.base_network(observations)\n mean, log_std = jnp.split(base_network_output, 2, axis=-1)\n log_std = self.log_std_multiplier_module() * log_std + self.log_std_offset_module()\n log_std = jnp.clip(log_std, -20.0, 2.0)\n action_distribution = distrax.MultivariateNormalDiag(mean, jnp.exp(log_std))\n if self.use_tanh:\n action_distribution = distrax.Transformed(\n action_distribution, distrax.Block(distrax.Tanh(), ndims=1)\n )\n return action_distribution.log_prob(actions)\n\n def __call__(self, observations, deterministic=False, repeat=None):\n if repeat is not None:\n observations = extend_and_repeat(observations, 1, repeat)\n base_network_output = self.base_network(observations)\n mean, log_std = jnp.split(base_network_output, 2, axis=-1)\n log_std = self.log_std_multiplier_module() * log_std + self.log_std_offset_module()\n log_std = jnp.clip(log_std, -20.0, 2.0)\n action_distribution = distrax.MultivariateNormalDiag(mean, jnp.exp(log_std))\n if self.use_tanh:\n action_distribution = distrax.Transformed(\n action_distribution, distrax.Block(distrax.Tanh(), ndims=1)\n )\n if deterministic:\n samples = mean\n if self.use_tanh:\n samples = jnp.tanh(samples)\n log_prob = action_distribution.log_prob(samples)\n else:\n samples, log_prob = action_distribution.sample_and_log_prob(seed=self.make_rng('noise'))\n\n return samples, log_prob\n\n @nn.nowrap\n def rng_keys(self):\n return ('params', 'noise')" }, { "identifier": "FullyConnectedQFunction", "path": "vqn/model.py", "snippet": "class FullyConnectedQFunction(nn.Module):\n observation_dim: int\n action_dim: int\n arch: str = '256-256'\n orthogonal_init: bool = False\n\n @nn.compact\n @multiple_action_q_function\n def __call__(self, observations, actions):\n x = jnp.concatenate([observations, actions], axis=-1)\n x = FullyConnectedNetwork(output_dim=1, arch=self.arch, orthogonal_init=self.orthogonal_init)(x)\n return jnp.squeeze(x, -1)\n\n @nn.nowrap\n def rng_keys(self):\n return ('params', )" }, { "identifier": "SamplerPolicy", "path": "vqn/model.py", "snippet": "class SamplerPolicy(object):\n\n def __init__(self, policy, params):\n self.policy = policy\n self.params = params\n\n def update_params(self, params):\n self.params = params\n return self\n\n @partial(jax.jit, static_argnames=('self', 'deterministic'))\n def act(self, params, rng, observations, deterministic):\n return self.policy.apply(\n params, observations, deterministic, repeat=None,\n rngs=JaxRNG(rng)(self.policy.rng_keys())\n )\n\n def __call__(self, observations, deterministic=False):\n actions, _ = self.act(self.params, next_rng(), observations, deterministic=deterministic)\n assert jnp.all(jnp.isfinite(actions))\n return jax.device_get(actions)" }, { "identifier": "StepSampler", "path": "vqn/sampler.py", "snippet": "class StepSampler(object):\n\n def __init__(self, env, max_traj_length=1000):\n self.max_traj_length = max_traj_length\n self._env = env\n self._traj_steps = 0\n self._current_observation = self.env.reset()\n\n def sample(self, policy, n_steps, deterministic=False, replay_buffer=None):\n observations = []\n actions = []\n rewards = []\n next_observations = []\n dones = []\n\n for _ in range(n_steps):\n self._traj_steps += 1\n observation = self._current_observation\n action = policy(observation.reshape(1, -1), deterministic=deterministic).reshape(-1)\n next_observation, reward, done, _ = self.env.step(action)\n observations.append(observation)\n actions.append(action)\n rewards.append(reward)\n dones.append(done)\n next_observations.append(next_observation)\n\n if replay_buffer is not None:\n replay_buffer.add_sample(\n observation, action, reward, next_observation, done\n )\n\n self._current_observation = next_observation\n\n if done or self._traj_steps >= self.max_traj_length:\n self._traj_steps = 0\n self._current_observation = self.env.reset()\n\n return dict(\n observations=np.array(observations, dtype=np.float32),\n actions=np.array(actions, dtype=np.float32),\n rewards=np.array(rewards, dtype=np.float32),\n next_observations=np.array(next_observations, dtype=np.float32),\n dones=np.array(dones, dtype=np.float32),\n )\n\n @property\n def env(self):\n return self._env" }, { "identifier": "TrajSampler", "path": "vqn/sampler.py", "snippet": "class TrajSampler(object):\n\n def __init__(self, env, max_traj_length=1000):\n self.max_traj_length = max_traj_length\n self._env = env\n\n def sample(self, policy, n_trajs, replay_buffer=None, deterministic=False):\n trajs = []\n for _ in range(n_trajs):\n observations = []\n actions = []\n rewards = []\n next_observations = []\n dones = []\n\n observation = self.env.reset()\n\n for _ in range(self.max_traj_length):\n action = policy(observation.reshape(1, -1), deterministic=deterministic).reshape(-1)\n next_observation, reward, done, _ = self.env.step(action)\n observations.append(observation)\n actions.append(action)\n rewards.append(reward)\n dones.append(done)\n next_observations.append(next_observation)\n\n if replay_buffer is not None:\n replay_buffer.add_sample(\n observation, action, reward, next_observation, done\n )\n\n observation = next_observation\n\n if done:\n break\n\n trajs.append(dict(\n observations=np.array(observations, dtype=np.float32),\n actions=np.array(actions, dtype=np.float32),\n rewards=np.array(rewards, dtype=np.float32),\n next_observations=np.array(next_observations, dtype=np.float32),\n dones=np.array(dones, dtype=np.float32),\n ))\n\n return trajs\n\n @property\n def env(self):\n return self._env" }, { "identifier": "make_dataset", "path": "vqn/robomimic_utils.py", "snippet": "def make_dataset(dataset, env_name):\n if not env_name in ENV_TO_HORIZON_MAP:\n dataset = OfflineDataset(dataset)\n return dataset" }, { "identifier": "process_robomimic_dataset", "path": "vqn/robomimic_utils.py", "snippet": "def process_robomimic_dataset(seq_dataset):\n seq_dataset = seq_dataset.getitem_cache\n\n for i in range(len(seq_dataset)):\n seq_dataset[i]['obs'] = np.concatenate([seq_dataset[i]['obs'][key] \n for key in OBS_KEYS], axis=1)\n seq_dataset[i]['next_obs'] = np.concatenate([seq_dataset[i]['next_obs'][key] \n for key in OBS_KEYS], axis=1)\n\n dataset = {'actions': np.concatenate([seq_dataset[i]['actions'] for i in range(len(seq_dataset))]),\n 'rewards': np.concatenate([seq_dataset[i]['rewards'] for i in range(len(seq_dataset))]),\n 'terminals': np.concatenate([seq_dataset[i]['dones'] for i in range(len(seq_dataset))]),\n 'observations': np.concatenate([seq_dataset[i]['obs'] for i in range(len(seq_dataset))]),\n 'next_observations': np.concatenate([seq_dataset[i]['next_obs'] for i in range(len(seq_dataset))])}\n return dataset" }, { "identifier": "D4RLDataset", "path": "vqn/robomimic_utils.py", "snippet": "class D4RLDataset(Dataset):\n def __init__(self,\n env: gym.Env,\n clip_to_eps: bool = True,\n eps: float = 1e-5,\n ignore_done: bool = False,\n custom_dataset: dict = None):\n if custom_dataset:\n if env is not None:\n dataset = d4rl.qlearning_dataset(env, dataset=custom_dataset)\n else:\n dataset = custom_dataset\n print(\"Loaded custom dataset\")\n else:\n dataset = d4rl.qlearning_dataset(env)\n if clip_to_eps:\n lim = 1 - eps\n dataset['actions'] = np.clip(dataset['actions'], -lim, lim)\n dones_float = np.zeros_like(dataset['rewards'])\n for i in range(len(dones_float) - 1):\n if ignore_done:\n if np.linalg.norm(dataset['observations'][i + 1] - dataset['next_observations'][i]) > 1e-6:\n dones_float[i] = 1\n else:\n dones_float[i] = 0\n else:\n if np.linalg.norm(dataset['observations'][i + 1] - dataset['next_observations'][i]) > 1e-6 or dataset['terminals'][i] == 1.0:\n dones_float[i] = 1\n else:\n dones_float[i] = 0\n dones_float[-1] = 1\n dataset_dict = {\n 'observations': dataset['observations'].astype(np.float32),\n 'actions': dataset['actions'].astype(np.float32),\n 'rewards': dataset['rewards'].astype(np.float32),\n 'masks': 1.0 - dataset['terminals'].astype(np.float32),\n 'dones': dones_float.astype(np.float32),\n 'next_observations': dataset['next_observations'].astype(\n np.float32)\n }\n super().__init__(dataset_dict)" }, { "identifier": "get_robomimic_env", "path": "vqn/robomimic_utils.py", "snippet": "def get_robomimic_env(dataset_path, example_action, env_name):\n # Initialize ObsUtils environment variables\n ObsUtils.initialize_obs_utils_with_config(config_factory(algo_name='iql'))\n env_meta = FileUtils.get_env_metadata_from_dataset(dataset_path)\n env = EnvUtils.create_env_from_metadata(\n env_meta=env_meta,\n render=False, \n render_offscreen=False, \n use_image_obs=False,\n )\n env = RobosuiteGymWrapper(env, ENV_TO_HORIZON_MAP[env_name], example_action)\n return env" }, { "identifier": "ENV_TO_HORIZON_MAP", "path": "vqn/robomimic_utils.py", "snippet": "ENV_TO_HORIZON_MAP = {'lift': 400,\n 'can': 400,\n 'square': 400,\n 'transport': 700,\n 'tool_hang': 700}" }, { "identifier": "OBS_KEYS", "path": "vqn/robomimic_utils.py", "snippet": "OBS_KEYS = (\"robot0_eef_pos\", \"robot0_eef_quat\", \"robot0_gripper_qpos\", \"object\")" }, { "identifier": "Timer", "path": "vqn/utils.py", "snippet": "class Timer(object):\n\n def __init__(self):\n self._time = None\n\n def __enter__(self):\n self._start_time = time.time()\n return self\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n self._time = time.time() - self._start_time\n\n def __call__(self):\n return self._time" }, { "identifier": "define_flags_with_default", "path": "vqn/utils.py", "snippet": "def define_flags_with_default(**kwargs):\n for key, val in kwargs.items():\n if isinstance(val, ConfigDict):\n config_flags.DEFINE_config_dict(key, val)\n elif isinstance(val, bool):\n # Note that True and False are instances of int.\n absl.flags.DEFINE_bool(key, val, 'automatically defined flag')\n elif isinstance(val, int):\n absl.flags.DEFINE_integer(key, val, 'automatically defined flag')\n elif isinstance(val, float):\n absl.flags.DEFINE_float(key, val, 'automatically defined flag')\n elif isinstance(val, str):\n absl.flags.DEFINE_string(key, val, 'automatically defined flag')\n else:\n raise ValueError('Incorrect value type')\n return kwargs" }, { "identifier": "set_random_seed", "path": "vqn/utils.py", "snippet": "def set_random_seed(seed):\n np.random.seed(seed)\n random.seed(seed)\n init_rng(seed)" }, { "identifier": "print_flags", "path": "vqn/utils.py", "snippet": "def print_flags(flags, flags_def):\n logging.info(\n 'Running training with hyperparameters: \\n{}'.format(\n pprint.pformat(\n ['{}: {}'.format(key, val) for key, val in get_user_flags(flags, flags_def).items()]\n )\n )\n )" }, { "identifier": "get_user_flags", "path": "vqn/utils.py", "snippet": "def get_user_flags(flags, flags_def):\n output = {}\n for key in flags_def:\n val = getattr(flags, key)\n if isinstance(val, ConfigDict):\n output.update(flatten_config_dict(val, prefix=key))\n else:\n output[key] = val\n\n return output" }, { "identifier": "prefix_metrics", "path": "vqn/utils.py", "snippet": "def prefix_metrics(metrics, prefix):\n return {\n '{}/{}'.format(prefix, key): value for key, value in metrics.items()\n }" }, { "identifier": "WandBLogger", "path": "vqn/utils.py", "snippet": "class WandBLogger(object):\n\n @staticmethod\n def get_default_config(updates=None):\n config = ConfigDict()\n config.online = False\n config.prefix = 'JaxCQL'\n config.project = ''\n config.output_dir = '/tmp/JaxCQL'\n config.random_delay = 0.0\n config.experiment_id = config_dict.placeholder(str)\n config.anonymous = config_dict.placeholder(str)\n config.notes = config_dict.placeholder(str)\n config.entity = config_dict.placeholder(str)\n\n if updates is not None:\n config.update(ConfigDict(updates).copy_and_resolve_references())\n return config\n\n def __init__(self, config, variant):\n self.config = self.get_default_config(config)\n\n if self.config.experiment_id is None:\n self.config.experiment_id = uuid.uuid4().hex\n\n if self.config.prefix != '':\n self.config.project = '{}--{}'.format(self.config.prefix, self.config.project)\n\n if self.config.output_dir == '':\n self.config.output_dir = tempfile.mkdtemp()\n else:\n self.config.output_dir = os.path.join(self.config.output_dir, self.config.experiment_id)\n os.makedirs(self.config.output_dir, exist_ok=True)\n\n self._variant = copy(variant)\n\n if 'hostname' not in self._variant:\n self._variant['hostname'] = gethostname()\n\n if self.config.random_delay > 0:\n time.sleep(np.random.uniform(0, self.config.random_delay))\n\n self.run = wandb.init(\n reinit=True,\n config=self._variant,\n project=self.config.project,\n dir=self.config.output_dir,\n entity=config.entity,\n id=self.config.experiment_id,\n anonymous=self.config.anonymous,\n notes=self.config.notes,\n settings=wandb.Settings(\n start_method=\"thread\",\n _disable_stats=True,\n ),\n mode='online' if self.config.online else 'offline',\n )\n\n def log(self, *args, **kwargs):\n self.run.log(*args, **kwargs)\n\n def save_pickle(self, obj, filename):\n with open(os.path.join(self.config.output_dir, filename), 'wb') as fout:\n pickle.dump(obj, fout)\n\n @property\n def experiment_id(self):\n return self.config.experiment_id\n\n @property\n def variant(self):\n return self.config.variant\n\n @property\n def output_dir(self):\n return self.config.output_dir" } ]
import os import time import uuid import numpy as np import jax import jax.numpy as jnp import flax import gym import d4rl import absl.app import absl.flags from copy import deepcopy from pprint import pprint from robomimic.utils.dataset import SequenceDataset from .vqn import VQN from .replay_buffer import get_d4rl_dataset, subsample_batch from .jax_utils import batch_to_jax from .model import TanhGaussianPolicy, FullyConnectedQFunction, SamplerPolicy from .sampler import StepSampler, TrajSampler from .robomimic_utils import ( make_dataset, process_robomimic_dataset, D4RLDataset, get_robomimic_env, ENV_TO_HORIZON_MAP, OBS_KEYS ) from .utils import ( Timer, define_flags_with_default, set_random_seed, print_flags, get_user_flags, prefix_metrics, WandBLogger )
7,219
FLAGS_DEF = define_flags_with_default( env='halfcheetah-medium-v2', max_traj_length=200, algorithm='vqn', seed=42, save_model=False, batch_size=256, reward_scale=1.0, reward_bias=0.0, clip_action=0.999, vqvae_n_epochs=500, dqn_n_epochs=1000, bc_epochs=1001, n_train_step_per_epoch=10, eval_period=10, eval_n_trajs=5, vqn=VQN.get_default_config(), logging=WandBLogger.get_default_config(), ) def main(argv): FLAGS = absl.flags.FLAGS variant = get_user_flags(FLAGS, FLAGS_DEF) wandb_logger = WandBLogger(config=FLAGS.logging, variant=variant)
FLAGS_DEF = define_flags_with_default( env='halfcheetah-medium-v2', max_traj_length=200, algorithm='vqn', seed=42, save_model=False, batch_size=256, reward_scale=1.0, reward_bias=0.0, clip_action=0.999, vqvae_n_epochs=500, dqn_n_epochs=1000, bc_epochs=1001, n_train_step_per_epoch=10, eval_period=10, eval_n_trajs=5, vqn=VQN.get_default_config(), logging=WandBLogger.get_default_config(), ) def main(argv): FLAGS = absl.flags.FLAGS variant = get_user_flags(FLAGS, FLAGS_DEF) wandb_logger = WandBLogger(config=FLAGS.logging, variant=variant)
set_random_seed(FLAGS.seed)
17
2023-10-18 06:31:20+00:00
12k
naver-ai/dual-teacher
tools/train.py
[ { "identifier": "__version__", "path": "mmseg/version.py", "snippet": "def parse_version_info(version_str):" }, { "identifier": "set_random_seed", "path": "mmseg/apis/train.py", "snippet": "def set_random_seed(seed, deterministic=False):\n \"\"\"Set random seed.\n Args:\n seed (int): Seed to be used.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.backends.cudnn.benchmark` to False.\n Default: False.\n \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n if deterministic:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False" }, { "identifier": "train_segmentor", "path": "mmseg/apis/train.py", "snippet": "def train_segmentor(model,\n dataset,\n cfg,\n distributed=False,\n validate=False,\n timestamp=None,\n meta=None):\n \"\"\"Launch segmentor training.\"\"\"\n logger = get_root_logger(cfg.log_level)\n\n # prepare data loaders\n dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]\n data_loaders = [\n build_dataloader(\n ds,\n cfg.data.samples_per_gpu,\n cfg.data.workers_per_gpu,\n # cfg.gpus will be ignored if distributed\n len(cfg.gpu_ids),\n dist=distributed,\n seed=cfg.seed,\n drop_last=True) for ds in dataset\n ]\n\n # put model on gpus\n if distributed:\n find_unused_parameters = cfg.get('find_unused_parameters', False)\n # Sets the `find_unused_parameters` parameter in\n # torch.nn.parallel.DistributedDataParallel\n model = MMDistributedDataParallel(\n model.cuda(),\n device_ids=[torch.cuda.current_device()],\n broadcast_buffers=False,\n find_unused_parameters=find_unused_parameters)\n else:\n model = MMDataParallel(\n model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)\n\n # build runner\n optimizer = build_optimizer(model, cfg.optimizer)\n\n if cfg.get('runner') is None:\n cfg.runner = {'type': 'IterBasedRunner', 'max_iters': cfg.total_iters}\n warnings.warn(\n 'config is now expected to have a `runner` section, '\n 'please set `runner` in your config.', UserWarning)\n\n runner = build_runner(\n cfg.runner,\n default_args=dict(\n model=model,\n batch_processor=None,\n optimizer=optimizer,\n work_dir=cfg.work_dir,\n logger=logger,\n meta=meta))\n\n # register hooks\n runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config,\n cfg.checkpoint_config, cfg.log_config,\n cfg.get('momentum_config', None))\n\n # an ugly walkaround to make the .log and .log.json filenames the same\n runner.timestamp = timestamp\n\n # register eval hooks\n if validate:\n val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))\n val_dataloader = build_dataloader(\n val_dataset,\n samples_per_gpu=1,\n workers_per_gpu=cfg.data.workers_per_gpu,\n dist=distributed,\n shuffle=False)\n eval_cfg = cfg.get('evaluation', {})\n eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner'\n eval_hook = DistEvalHook if distributed else EvalHook\n runner.register_hook(eval_hook(val_dataloader, **eval_cfg))\n\n if cfg.resume_from:\n runner.resume(cfg.resume_from)\n elif cfg.load_from:\n runner.load_checkpoint(cfg.load_from)\n runner.run(data_loaders, cfg.workflow)" }, { "identifier": "build_dataloader", "path": "mmseg/datasets/builder.py", "snippet": "def build_dataloader(dataset,\n samples_per_gpu,\n workers_per_gpu,\n num_gpus=1,\n dist=True,\n shuffle=True,\n seed=None,\n drop_last=False,\n pin_memory=True,\n dataloader_type='PoolDataLoader',\n **kwargs):\n \"\"\"Build PyTorch DataLoader.\n\n In distributed training, each GPU/process has a dataloader.\n In non-distributed training, there is only one dataloader for all GPUs.\n\n Args:\n dataset (Dataset): A PyTorch dataset.\n samples_per_gpu (int): Number of training samples on each GPU, i.e.,\n batch size of each GPU.\n workers_per_gpu (int): How many subprocesses to use for data loading\n for each GPU.\n num_gpus (int): Number of GPUs. Only used in non-distributed training.\n dist (bool): Distributed training/test or not. Default: True.\n shuffle (bool): Whether to shuffle the data at every epoch.\n Default: True.\n seed (int | None): Seed to be used. Default: None.\n drop_last (bool): Whether to drop the last incomplete batch in epoch.\n Default: False\n pin_memory (bool): Whether to use pin_memory in DataLoader.\n Default: True\n dataloader_type (str): Type of dataloader. Default: 'PoolDataLoader'\n kwargs: any keyword argument to be used to initialize DataLoader\n\n Returns:\n DataLoader: A PyTorch dataloader.\n \"\"\"\n rank, world_size = get_dist_info()\n if dist:\n sampler = DistributedSampler(\n dataset, world_size, rank, shuffle=shuffle)\n shuffle = False\n batch_size = samples_per_gpu\n num_workers = workers_per_gpu\n else:\n sampler = None\n batch_size = num_gpus * samples_per_gpu\n num_workers = num_gpus * workers_per_gpu\n\n init_fn = partial(\n worker_init_fn, num_workers=num_workers, rank=rank,\n seed=seed) if seed is not None else None\n\n assert dataloader_type in (\n 'DataLoader',\n 'PoolDataLoader'), f'unsupported dataloader {dataloader_type}'\n\n if dataloader_type == 'PoolDataLoader':\n dataloader = PoolDataLoader\n elif dataloader_type == 'DataLoader':\n dataloader = DataLoader\n\n data_loader = dataloader(\n dataset,\n batch_size=batch_size,\n sampler=sampler,\n num_workers=num_workers,\n collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),\n pin_memory=pin_memory,\n shuffle=shuffle,\n worker_init_fn=init_fn,\n drop_last=drop_last,\n **kwargs)\n\n return data_loader" }, { "identifier": "build_dataset", "path": "mmseg/datasets/builder.py", "snippet": "def build_dataset(cfg, default_args=None):\n \"\"\"Build datasets.\"\"\"\n from .dataset_wrappers import ConcatDataset, RepeatDataset\n if isinstance(cfg, (list, tuple)):\n dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])\n elif cfg['type'] == 'RepeatDataset':\n dataset = RepeatDataset(\n build_dataset(cfg['dataset'], default_args), cfg['times'])\n elif isinstance(cfg.get('img_dir'), (list, tuple)) or isinstance(\n cfg.get('split', None), (list, tuple)):\n dataset = _concat_dataset(cfg, default_args)\n else:\n dataset = build_from_cfg(cfg, DATASETS, default_args)\n\n return dataset" }, { "identifier": "build_segmentor", "path": "mmseg/models/builder.py", "snippet": "def build_segmentor(cfg, train_cfg=None, test_cfg=None):\n \"\"\"Build segmentor.\"\"\"\n if train_cfg is not None or test_cfg is not None:\n warnings.warn(\n 'train_cfg and test_cfg is deprecated, '\n 'please specify them in model', UserWarning)\n assert cfg.get('train_cfg') is None or train_cfg is None, \\\n 'train_cfg specified in both outer field and model field '\n assert cfg.get('test_cfg') is None or test_cfg is None, \\\n 'test_cfg specified in both outer field and model field '\n return build(cfg, SEGMENTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg))" }, { "identifier": "collect_env", "path": "mmseg/utils/collect_env.py", "snippet": "def collect_env():\n \"\"\"Collect the information of the running environments.\"\"\"\n env_info = collect_base_env()\n env_info['MMSegmentation'] = f'{mmseg.__version__}+{get_git_hash()[:7]}'\n\n return env_info" }, { "identifier": "get_root_logger", "path": "mmseg/utils/logger.py", "snippet": "def get_root_logger(log_file=None, log_level=logging.INFO):\n \"\"\"Get the root logger.\n\n The logger will be initialized if it has not been initialized. By default a\n StreamHandler will be added. If `log_file` is specified, a FileHandler will\n also be added. The name of the root logger is the top-level package name,\n e.g., \"mmseg\".\n\n Args:\n log_file (str | None): The log filename. If specified, a FileHandler\n will be added to the root logger.\n log_level (int): The root logger level. Note that only the process of\n rank 0 is affected, while other processes will set the level to\n \"Error\" and be silent most of the time.\n\n Returns:\n logging.Logger: The root logger.\n \"\"\"\n\n logger = get_logger(name='mmseg', log_file=log_file, log_level=log_level)\n\n return logger" }, { "identifier": "MiT_SegFormer", "path": "seg_core/model.py", "snippet": "class MiT_SegFormer(nn.Module):\n def __init__(self, backbone, num_classes=20, embedding_dim=256, pretrained=None):\n super().__init__()\n self.num_classes = num_classes\n self.embedding_dim = embedding_dim\n self.feature_strides = [4, 8, 16, 32]\n # self.in_channels = [32, 64, 160, 256]\n # self.in_channels = [64, 128, 320, 512]\n\n self.encoder = getattr(mix_transformer, backbone)()\n self.in_channels = self.encoder.embed_dims\n mit_num = backbone.split('_')[1][1]\n ## initilize encoder\n if pretrained:\n state_dict = torch.load('/home/najm/DualTeacher/pretrained/mit_b' + mit_num + '.pth')\n state_dict.pop('head.weight')\n state_dict.pop('head.bias')\n self.encoder.load_state_dict(state_dict, )\n\n self.decoder = SegFormerHead(feature_strides=self.feature_strides, in_channels=self.in_channels, embedding_dim=self.embedding_dim, num_classes=self.num_classes)\n\n self.classifier = nn.Conv2d(in_channels=self.in_channels[-1], out_channels=self.num_classes, kernel_size=1, bias=False)\n\n def _forward_cam(self, x):\n\n cam = F.conv2d(x, self.classifier.weight)\n cam = F.relu(cam)\n\n return cam\n\n def get_param_groups(self):\n\n param_groups = [[], [], []] #\n\n for name, param in list(self.encoder.named_parameters()):\n if \"norm\" in name:\n param_groups[1].append(param)\n else:\n param_groups[0].append(param)\n\n for param in list(self.decoder.parameters()):\n param_groups[2].append(param)\n\n param_groups[2].append(self.classifier.weight)\n\n return param_groups\n\n def forward(self, x):\n\n _x = self.encoder(x)\n _x1, _x2, _x3, _x4 = _x\n cls = self.classifier(_x4)\n return self.decoder(_x)" }, { "identifier": "PolyWarmupAdamW", "path": "seg_core/optimizer.py", "snippet": "class PolyWarmupAdamW(torch.optim.AdamW):\n\n def __init__(self, params, lr, weight_decay, betas, warmup_iter=None, max_iter=None, warmup_ratio=None, power=None):\n super().__init__(params, lr=lr, betas=betas, weight_decay=weight_decay, eps=1e-8)\n\n self.global_step = 0\n self.warmup_iter = warmup_iter\n self.warmup_ratio = warmup_ratio\n self.max_iter = max_iter\n self.power = power\n\n self.__init_lr = [group['lr'] for group in self.param_groups]\n\n def step(self, closure=None):\n ## adjust lr\n if self.global_step < self.warmup_iter:\n\n lr_mult = 1 - (1 - self.global_step / self.warmup_iter) * (1 - self.warmup_ratio)\n for i in range(len(self.param_groups)):\n self.param_groups[i]['lr'] = self.__init_lr[i] * lr_mult\n\n elif self.global_step < self.max_iter:\n\n lr_mult = (1 - self.global_step / self.max_iter) ** self.power\n for i in range(len(self.param_groups)):\n self.param_groups[i]['lr'] = self.__init_lr[i] * lr_mult\n\n # step\n super().step(closure)\n\n self.global_step += 1" }, { "identifier": "ClassMixLoss", "path": "seg_core/augmentations.py", "snippet": "class ClassMixLoss(nn.Module):\n def __init__(self, weight=None, reduction=None, ignore_index=None):\n super(ClassMixLoss, self).__init__()\n self.CE = nn.CrossEntropyLoss(weight=weight, reduction=reduction, ignore_index=ignore_index)\n\n def forward(self, output, target, pixel_weight):\n loss = self.CE(output, target)\n loss = torch.mean(loss * pixel_weight)\n return loss" }, { "identifier": "compute_classmix", "path": "seg_core/augmentations.py", "snippet": "def compute_classmix(b, h, w, criterion, cm_loss_fn, model, ema_model, imgs, labels, unsup_imgs, image_u_strong, threshold):\n # Unlabeled Process\n with torch.no_grad():\n logits_occluder = ema_model(unsup_imgs) # 129\n logits_occluder = F.interpolate(logits_occluder, (h, w), mode=\"bilinear\", align_corners=False) # 513\n softmax_occluder = torch.softmax(logits_occluder, dim=1)\n max_prob_occluder, argmax_occluder = torch.max(softmax_occluder, dim=1)\n\n binary_mask = get_bin_mask(b, argmax_occluder)\n binary_mask = binary_mask.squeeze(dim=1)\n if b == 2:\n shuffle_index = torch.tensor([1, 0])\n else:\n shuffle_index = torch.randperm(b).cuda()\n class_mixed_img = class_mix(occluder_mask=binary_mask, occluder=image_u_strong, occludee=image_u_strong[shuffle_index])\n\n num_labeled = len(imgs)\n outputs = model(torch.cat([imgs, class_mixed_img]))\n outputs, outputs_u = outputs[:num_labeled], outputs[num_labeled:]\n\n pred_large = F.interpolate(outputs, size=labels.shape[1:], mode='bilinear', align_corners=False)\n sup_loss = criterion(pred_large, labels.type(torch.long).clone())\n del outputs, pred_large\n torch.cuda.empty_cache()\n logits_class_mixed = F.interpolate(outputs_u, (h, w), mode=\"bilinear\", align_corners=False)\n\n class_mixed_softmax = class_mix(occluder_mask=binary_mask, occluder=softmax_occluder, occludee=softmax_occluder[shuffle_index])\n max_prob_occluder, pseudo_label = torch.max(class_mixed_softmax, dim=1)\n\n unlabeled_weight = torch.sum(max_prob_occluder.ge(threshold).long() == 1).item() / np.size(np.array(pseudo_label.cpu()))\n pixel_weight = unlabeled_weight * torch.ones(max_prob_occluder.shape).cuda()\n\n class_mix_loss = cm_loss_fn(logits_class_mixed, pseudo_label, pixel_weight)\n loss = sup_loss + class_mix_loss\n return loss" }, { "identifier": "compute_cutmix", "path": "seg_core/augmentations.py", "snippet": "def compute_cutmix(h, w, imgs, labels, criterion, model, ema_model, image_u, threshold):\n with torch.no_grad():\n pred = ema_model(image_u)\n pred = F.interpolate(pred, (h, w), mode=\"bilinear\", align_corners=False)\n pred = F.softmax(pred, dim=1)\n pred_logit, pred_label = torch.max(pred, dim=1)\n\n image_aug, label_aug = cut_mixer(image_u, pred_label.clone())\n\n image_aug, label_aug, pred_logit = \\\n batch_transform(image_aug, label_aug, pred_logit,\n crop_size=(pred_logit.shape[1], pred_logit.shape[2]), scale_size=(1.0, 1.0), apply_augmentation=True)\n\n num_labeled = len(imgs)\n outputs = model(torch.cat([imgs, image_aug]))\n outputs, outputs_u = outputs[:num_labeled], outputs[num_labeled:]\n pred_large = F.interpolate(outputs, size=labels.shape[1:], mode='bilinear', align_corners=False)\n sup_loss = criterion(pred_large, labels.type(torch.long).clone())\n\n pred_u = F.interpolate(outputs_u, (h, w), mode=\"bilinear\", align_corners=False)\n\n cutmix_loss = compute_unsupervised_loss(pred_u, label_aug.clone(), pred_logit, threshold)\n return sup_loss + cutmix_loss" }, { "identifier": "compute_ic", "path": "seg_core/augmentations.py", "snippet": "def compute_ic(model, ema_model, image_u, image_u_strong, criterion_u, label_u, h, w, threshold):\n with torch.no_grad():\n logits = ema_model(image_u) # 129\n logits = F.interpolate(logits, (h, w), mode=\"bilinear\", align_corners=False) # 513\n softmax_out = torch.softmax(logits, dim=1)\n max_probs, argmax_label = torch.max(softmax_out, dim=1)\n pred_dc = model(image_u_strong)\n pred_dc = F.interpolate(pred_dc, (h, w), mode=\"bilinear\", align_corners=False) # 513\n loss_dc = criterion_u(pred_dc, argmax_label)\n loss_dc = loss_dc * ((max_probs >= threshold) & (label_u != 255))\n loss_dc = loss_dc.sum() / (label_u != 255).sum().item()\n return loss_dc.clone()" }, { "identifier": "single_gpu_test", "path": "mmseg/apis/test.py", "snippet": "def single_gpu_test(model,\n data_loader,\n show=False,\n out_dir=None,\n efficient_test=False):\n \"\"\"Test with single GPU.\n\n Args:\n model (nn.Module): Model to be tested.\n data_loader (utils.data.Dataloader): Pytorch data loader.\n show (bool): Whether show results during infernece. Default: False.\n out_dir (str, optional): If specified, the results will be dumped into\n the directory to save output results.\n efficient_test (bool): Whether save the results as local numpy files to\n save CPU memory during evaluation. Default: False.\n\n Returns:\n list: The prediction results.\n \"\"\"\n\n model.eval()\n results = []\n dataset = data_loader.dataset\n prog_bar = mmcv.ProgressBar(len(dataset))\n for i, data in enumerate(data_loader):\n with torch.no_grad():\n result = model(return_loss=False, **data)\n\n if show or out_dir:\n img_tensor = data['img'][0]\n img_metas = data['img_metas'][0].data[0]\n imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])\n assert len(imgs) == len(img_metas)\n\n for img, img_meta in zip(imgs, img_metas):\n h, w, _ = img_meta['img_shape']\n img_show = img[:h, :w, :]\n\n ori_h, ori_w = img_meta['ori_shape'][:-1]\n img_show = mmcv.imresize(img_show, (ori_w, ori_h))\n\n if out_dir:\n out_file = osp.join(out_dir, img_meta['ori_filename'])\n else:\n out_file = None\n\n model.module.show_result(\n img_show,\n result,\n palette=dataset.PALETTE,\n show=show,\n out_file=out_file)\n\n if isinstance(result, list):\n if efficient_test:\n result = [np2tmp(_) for _ in result]\n results.extend(result)\n else:\n if efficient_test:\n result = np2tmp(result)\n results.append(result)\n\n batch_size = data['img'][0].size(0)\n for _ in range(batch_size):\n prog_bar.update()\n return results" } ]
import argparse import copy import os import os.path as osp import time import logging import mmcv import torch import numpy as np import seg_core.eval_seg as eval_seg import torch.nn.functional as F import warnings import torch.distributed as dist import random import tempfile from mmcv.runner import init_dist from mmcv.utils import Config, DictAction, get_git_hash from torchvision.transforms import ToTensor from mmseg import __version__ from mmseg.apis import set_random_seed, train_segmentor from mmseg.datasets import build_dataset, build_dataloader from mmseg.models import build_segmentor from mmseg.utils import collect_env, get_root_logger from seg_core.model import MiT_SegFormer from seg_core.optimizer import PolyWarmupAdamW from seg_core.augmentations import ClassMixLoss, compute_classmix, compute_cutmix, compute_ic from torchvision.utils import save_image from dist_helper import setup_distributed from mmseg.apis import single_gpu_test from mmcv.image import tensor2imgs from PIL import Image, ImageOps, ImageFilter from torchvision import transforms from copy import deepcopy
7,720
meta['seed'] = args.seed meta['exp_name'] = osp.basename(args.config) model = MiT_SegFormer(backbone=args.backbone, num_classes=150, embedding_dim=256, pretrained=True) if args.ddp: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) model.cuda() model_teacher = MiT_SegFormer(backbone=args.backbone + '_ema', num_classes=150, embedding_dim=256, pretrained=True).cuda() for p in model_teacher.parameters(): p.requires_grad = False model_teacher2 = MiT_SegFormer(backbone=args.backbone + '_ema', num_classes=150, embedding_dim=256, pretrained=True).cuda() for p in model_teacher2.parameters(): p.requires_grad = False param_groups = model.get_param_groups() trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad) if rank == 0: print('trainable_params:', trainable_params) shuffle = True if args.ddp: local_rank = int(os.environ["LOCAL_RANK"]) model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True, ) shuffle = False max_iters = 50000 print_iters = 100 eval_iters = 5000 optimizer = PolyWarmupAdamW( params=[ { "params": param_groups[0], "lr": cfg.optimizer.lr, "weight_decay": cfg.optimizer.weight_decay, }, { "params": param_groups[1], "lr": cfg.optimizer.lr, "weight_decay": 0.0, }, { "params": param_groups[2], "lr": cfg.optimizer.lr * 10, "weight_decay": cfg.optimizer.weight_decay, }, ], lr=cfg.optimizer.lr, weight_decay=cfg.optimizer.weight_decay, betas=cfg.optimizer.betas, warmup_iter=cfg.scheduler.warmup_iters, max_iter=max_iters, warmup_ratio=cfg.scheduler.warmup_ratio, power=cfg.scheduler.power ) supervised_full = False if supervised_full: datasets = [build_dataset(cfg.data.train)] else: datasets = [build_dataset(cfg.data.train_semi_l)] datasets_u = [build_dataset(cfg.data.train_semi_u)] datasets_val = [build_dataset(cfg.data.val)] batch_size = 4 train_loader = [ build_dataloader( ds, samples_per_gpu=batch_size, workers_per_gpu=0, num_gpus=1, dist=distributed, shuffle=shuffle, seed=cfg.seed, drop_last=True, pin_memory=True) for ds in datasets ] train_loader_u = [ build_dataloader( ds, samples_per_gpu=batch_size, workers_per_gpu=0, num_gpus=1, dist=distributed, shuffle=shuffle, seed=cfg.seed, drop_last=True, pin_memory=True) for ds in datasets_u ] val_loader = [ build_dataloader( ds, samples_per_gpu=1, workers_per_gpu=0, num_gpus=1, dist=distributed, shuffle=False, seed=cfg.seed, drop_last=False, pin_memory=True) for ds in datasets_val ] criterion = torch.nn.CrossEntropyLoss(ignore_index=255).cuda()
""" Dual-Teacher Copyright (c) 2023-present NAVER Cloud Corp. distributed under NVIDIA Source Code License for SegFormer -------------------------------------------------------- References: SegFormer: https://github.com/NVlabs/SegFormer -------------------------------------------------------- """ warnings.filterwarnings("ignore") criterion_u = torch.nn.CrossEntropyLoss(reduction='none').cuda() def train_sup(args, model, optimizer, train_loader, val_loader, criterion, max_iters, print_iters, eval_iters): train_iterator = iter(train_loader) if args.ddp: rank, world_size = dist.get_rank(), dist.get_world_size() else: rank = 0 for epoch in range(200): for i in range(len(train_loader)): model.train() try: batch_data = next(train_iterator) except: train_iterator = iter(train_loader) batch_data = next(train_iterator) image = batch_data['img'].data[0].cuda(non_blocking=True) label = batch_data['gt_semantic_seg'].data[0].squeeze(dim=1).cuda(non_blocking=True) outputs = model(image) outputs = F.interpolate(outputs, size=label.shape[1:], mode='bilinear', align_corners=False) seg_loss = criterion(outputs, label.type(torch.long)) optimizer.zero_grad() seg_loss.backward() optimizer.step() if rank == 0: lr = optimizer.param_groups[0]['lr'] logging.info("save_path:{}".format(args.save_path)) logging.info("Iter: %d; LR: %.3e; seg_loss: %f" % (i + 1, lr, seg_loss.item())) print("Iter: %d; LR: %.3e; seg_loss: %f" % (i + 1, lr, seg_loss.item())) logging.info('[iter:{}] Validation:'.format(i + 1)) print('[iter:{}] Validation:'.format(i + 1)) val_score = val(model.module, val_loader) logging.info('mIoU:{:.5f}'.format(val_score['Mean IoU'] * 100)) print('mIoU:{:.5f}'.format(val_score['Mean IoU'] * 100)) model.train() def train_dual(args, model, model_teacher, model_teacher2, optimizer, train_loader, train_loader_u, val_loader, criterion, cm_loss_fn, max_iters, print_iters, eval_iters): if args.ddp: rank, world_size = dist.get_rank(), dist.get_world_size() else: rank = 0 best_miou, best_epoch = 0, 0 for epoch in range(200): model.train() train_loader.sampler.set_epoch(epoch) train_loader_u.sampler.set_epoch(epoch) train_iterator = iter(train_loader) train_iterator_u = iter(train_loader_u) if epoch % 2 == 0: ema_model = model_teacher do_cut_mix = True do_class_mix = False else: ema_model = model_teacher2 do_cut_mix = False do_class_mix = True ema_model.train() for i in range(len(train_loader)): try: batch_data_u = next(train_iterator_u) except: train_iterator_u = iter(train_loader_u) batch_data_u = next(train_iterator_u) try: batch_data = next(train_iterator) except: train_iterator = iter(train_loader) batch_data = next(train_iterator) image = batch_data['img'].data[0].cuda(non_blocking=True) label = batch_data['gt_semantic_seg'].data[0].squeeze(dim=1).cuda(non_blocking=True) image_u = batch_data_u['img'].data[0].cuda(non_blocking=True) label_u = batch_data['gt_semantic_seg'].data[0].squeeze(dim=1).cuda(non_blocking=True) b, _, h, w = image.shape image_u_strong = deepcopy(image_u) image_u_strong = transforms.ColorJitter(0.5, 0.5, 0.5, 0.25)(image_u_strong) image_u_strong = transforms.RandomGrayscale(p=0.2)(image_u_strong) if do_class_mix: loss = compute_classmix(b, h, w, criterion, cm_loss_fn, model, ema_model, image, label, image_u, image_u_strong, threshold=0.95) if do_cut_mix: loss = compute_cutmix(h, w, image, label, criterion, model, ema_model, image_u, threshold=0.95) loss_dc = compute_ic(model, ema_model, image_u, image_u_strong, criterion_u, label_u, h, w, threshold=0.95) total_loss = loss + loss_dc * 0.2 optimizer.zero_grad() total_loss.backward() optimizer.step() if args.ddp: reduced_loss = loss.clone().detach() dist.all_reduce(reduced_loss) update_ema(model_teacher=ema_model, model=model, alpha_teacher=0.99, iteration=i) if rank == 0: if (i + 1) % print_iters == 0: lr = optimizer.param_groups[0]['lr'] logging.info("Epoch: %d; Iter: %d; LR: %.3e; loss: %f" % (epoch, i + 1, lr, loss.item())) print("Epoch: %d; Iter: %d; LR: %.3e; loss: %f" % (epoch, i + 1, lr, loss.item())) if rank == 0: logging.info('[Epoch {}] [iter:{}] Validation:'.format(epoch, i + 1)) print('[Epoch {}] [iter:{}] Validation:'.format(epoch, i + 1)) val_score = val(model.module, val_loader) miou = val_score['Mean IoU'] * 100 if miou > best_miou: best_miou = miou best_epoch = epoch logging.info('mIoU:{:.5f} Best mIOU:{:.5f} on epoch {}'.format(miou, best_miou, best_epoch)) print('mIoU:{:.5f} Best mIOU:{:.5f} on epoch {}'.format(miou, best_miou, best_epoch)) model.train() def synchronize(): if not dist.is_available(): return if not dist.is_initialized(): return world_size = dist.get_world_size() if world_size == 1: return dist.barrier() def val(model, data_loader): model.eval() preds, gts = [], [] for i, data in enumerate(data_loader): with torch.no_grad(): image = data['img'][0].cuda(non_blocking=True) label = data['gt_semantic_seg'][0].cuda(non_blocking=True) outputs = model(image) resized_outputs = F.interpolate(outputs, size=label.shape[1:], mode='bilinear', align_corners=False) preds += list(torch.argmax(resized_outputs, dim=1).cpu().numpy().astype(np.int16)) gts += list(label.cpu().numpy().astype(np.int16)) score = eval_seg.scores(gts, preds, num_classes=150) model.train() return score def val_ddp(args, epoch, model, data_loader): model.eval() preds, gts = [], [] if args.ddp: data_loader.sampler.set_epoch(epoch) rank, world_size = dist.get_rank(), dist.get_world_size() else: rank = 0 for i, data in enumerate(data_loader): with torch.no_grad(): # print(data) image = data['img'][0].cuda(non_blocking=True) label = data['gt_semantic_seg'][0].cuda(non_blocking=True) outputs = model(image) resized_outputs = F.interpolate(outputs, size=label.shape[1:], mode='bilinear', align_corners=False) preds += list(torch.argmax(resized_outputs, dim=1).cpu().numpy().astype(np.int16)) gts += list(label.cpu().numpy().astype(np.int16)) if args.ddp: preds = torch.from_numpy(np.array(preds)).cuda() gts = torch.from_numpy(np.array(gts)).cuda() dist.all_reduce(preds) dist.all_reduce(gts) gts = list(gts) preds = list(preds) score = eval_seg.scores(gts, preds, num_classes=150) return score def intersectionAndUnion(output, target, K, ignore_index): # 'K' classes, output and target sizes are N or N * L or N * H * W, each value in range 0 to K - 1. assert output.ndim in [1, 2, 3] assert output.shape == target.shape output = output.reshape(output.size).copy() target = target.reshape(target.size) output[np.where(target == ignore_index)[0]] = ignore_index intersection = output[np.where(output == target)[0]] area_intersection, _ = np.histogram(intersection, bins=np.arange(K + 1)) area_output, _ = np.histogram(output, bins=np.arange(K + 1)) area_target, _ = np.histogram(target, bins=np.arange(K + 1)) area_union = area_output + area_target - area_intersection return area_intersection, area_union, area_target def update_ema(model_teacher, model, alpha_teacher, iteration): with torch.no_grad(): alpha_teacher = min(1 - 1 / (iteration + 1), alpha_teacher) for ema_param, param in zip(model_teacher.parameters(), model.parameters()): ema_param.data[:] = alpha_teacher * ema_param[:].data[:] + (1 - alpha_teacher) * param[:].data[:] def setup_logger(filename='test.log'): ## setup logger # logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(filename)s - %(levelname)s: %(message)s') logFormatter = logging.Formatter('%(asctime)s - %(filename)s - %(levelname)s: %(message)s') logger = logging.getLogger() logger.setLevel(logging.INFO) fHandler = logging.FileHandler(filename, mode='w') fHandler.setFormatter(logFormatter) logger.addHandler(fHandler) cHandler = logging.StreamHandler() cHandler.setFormatter(logFormatter) logger.addHandler(cHandler) def parse_args(): parser = argparse.ArgumentParser(description='Train a segmentor') parser.add_argument('--ddp', default=False, action='store_true') parser.add_argument('--dual_teacher', default=False, action='store_true') parser.add_argument('--unimatch_aug', default=False, action='store_true') parser.add_argument('--save_path', type=str, help='log moemo') parser.add_argument('--out', default='work_dirs/res.pkl', help='output result file in pickle format') parser.add_argument('--config', help='train config file path') parser.add_argument('--work-dir', help='the dir to save logs and models') parser.add_argument('--load-from', help='the checkpoint file to load weights from') parser.add_argument('--resume-from', help='the checkpoint file to resume from') group_gpus = parser.add_mutually_exclusive_group() group_gpus.add_argument('--gpus', type=int, help='number of gpus to use (only applicable to non-distributed training)') group_gpus.add_argument('--gpu-ids', type=int, nargs='+', help='ids of gpus to use only applicable to non-distributed training)') parser.add_argument('--seed', type=int, default=None, help='random seed') parser.add_argument('--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.') parser.add_argument('--options', nargs='+', action=DictAction, help='custom options') parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher') parser.add_argument("--backbone", type=str) parser.add_argument("--port", default=None, type=int) parser.add_argument('--local_rank', type=int, default=0) parser.add_argument('--dc', default=False, action='store_true') args = parser.parse_args() # if 'LOCAL_RANK' not in os.environ: # os.environ['LOCAL_RANK'] = str(args.local_rank) return args def np2tmp(array, temp_file_name=None): """Save ndarray to local numpy file. Args: array (ndarray): Ndarray to save. temp_file_name (str): Numpy file name. If 'temp_file_name=None', this function will generate a file name with tempfile.NamedTemporaryFile to save ndarray. Default: None. Returns: str: The numpy file name. """ if temp_file_name is None: temp_file_name = tempfile.NamedTemporaryFile( suffix='.npy', delete=False).name np.save(temp_file_name, array) return temp_file_name def image_saver(input, name): """ :param name: "path/name" """ if input.dim() == 3: input = input.unsqueeze(dim=0) save_image(input.float(), str(name) + '.jpg') def main(): setup_logger() args = parse_args() mit_type = args.backbone[-1] if mit_type == '5': args.config = 'local_configs/segformer/B' + mit_type + '/segformer.b' + mit_type + '.640x640.ade.160k.py' else: args.config = 'local_configs/segformer/B' + mit_type + '/segformer.b' + mit_type + '.512x512.ade.160k.py' cfg = Config.fromfile(args.config) if args.options is not None: cfg.merge_from_dict(args.options) torch.backends.cudnn.benchmark = False # work_dir is determined in this priority: CLI > segment in file > filename if args.work_dir is not None: # update configs according to CLI args if args.work_dir is not None cfg.work_dir = args.work_dir elif cfg.get('work_dir', None) is None: # use config filename as default work_dir if cfg.work_dir is None cfg.work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0]) if args.load_from is not None: cfg.load_from = args.load_from if args.resume_from is not None: cfg.resume_from = args.resume_from if args.gpu_ids is not None: cfg.gpu_ids = args.gpu_ids else: cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus) distributed = False if args.ddp: rank, word_size = setup_distributed(port=args.port) distributed = True else: rank = 0 # create work_dir mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) # dump config cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config))) # init the logger before other steps timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) log_file = osp.join(cfg.work_dir, f'{timestamp}.log') logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) meta = dict() # log env info env_info_dict = collect_env() env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()]) dash_line = '-' * 60 + '\n' print('Environment info:\n' + dash_line + env_info + '\n' + dash_line) meta['env_info'] = env_info # log some basic info print(f'Config:\n{cfg.pretty_text}') # set random seeds if args.seed is not None: print(f'Set random seed to {args.seed}, deterministic: ' f'{args.deterministic}') set_random_seed(args.seed, deterministic=args.deterministic) cfg.seed = args.seed meta['seed'] = args.seed meta['exp_name'] = osp.basename(args.config) model = MiT_SegFormer(backbone=args.backbone, num_classes=150, embedding_dim=256, pretrained=True) if args.ddp: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) model.cuda() model_teacher = MiT_SegFormer(backbone=args.backbone + '_ema', num_classes=150, embedding_dim=256, pretrained=True).cuda() for p in model_teacher.parameters(): p.requires_grad = False model_teacher2 = MiT_SegFormer(backbone=args.backbone + '_ema', num_classes=150, embedding_dim=256, pretrained=True).cuda() for p in model_teacher2.parameters(): p.requires_grad = False param_groups = model.get_param_groups() trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad) if rank == 0: print('trainable_params:', trainable_params) shuffle = True if args.ddp: local_rank = int(os.environ["LOCAL_RANK"]) model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True, ) shuffle = False max_iters = 50000 print_iters = 100 eval_iters = 5000 optimizer = PolyWarmupAdamW( params=[ { "params": param_groups[0], "lr": cfg.optimizer.lr, "weight_decay": cfg.optimizer.weight_decay, }, { "params": param_groups[1], "lr": cfg.optimizer.lr, "weight_decay": 0.0, }, { "params": param_groups[2], "lr": cfg.optimizer.lr * 10, "weight_decay": cfg.optimizer.weight_decay, }, ], lr=cfg.optimizer.lr, weight_decay=cfg.optimizer.weight_decay, betas=cfg.optimizer.betas, warmup_iter=cfg.scheduler.warmup_iters, max_iter=max_iters, warmup_ratio=cfg.scheduler.warmup_ratio, power=cfg.scheduler.power ) supervised_full = False if supervised_full: datasets = [build_dataset(cfg.data.train)] else: datasets = [build_dataset(cfg.data.train_semi_l)] datasets_u = [build_dataset(cfg.data.train_semi_u)] datasets_val = [build_dataset(cfg.data.val)] batch_size = 4 train_loader = [ build_dataloader( ds, samples_per_gpu=batch_size, workers_per_gpu=0, num_gpus=1, dist=distributed, shuffle=shuffle, seed=cfg.seed, drop_last=True, pin_memory=True) for ds in datasets ] train_loader_u = [ build_dataloader( ds, samples_per_gpu=batch_size, workers_per_gpu=0, num_gpus=1, dist=distributed, shuffle=shuffle, seed=cfg.seed, drop_last=True, pin_memory=True) for ds in datasets_u ] val_loader = [ build_dataloader( ds, samples_per_gpu=1, workers_per_gpu=0, num_gpus=1, dist=distributed, shuffle=False, seed=cfg.seed, drop_last=False, pin_memory=True) for ds in datasets_val ] criterion = torch.nn.CrossEntropyLoss(ignore_index=255).cuda()
cm_loss_fn = ClassMixLoss(weight=None, reduction='none', ignore_index=255)
10
2023-10-19 04:04:31+00:00
12k
SLDGroup/G-CASCADE
lib/maxxvit_4out.py
[ { "identifier": "build_model_with_cfg", "path": "lib/models_timm/helpers.py", "snippet": "def build_model_with_cfg(\n model_cls: Callable,\n variant: str,\n pretrained: bool,\n pretrained_cfg: Optional[Dict] = None,\n model_cfg: Optional[Any] = None,\n feature_cfg: Optional[Dict] = None,\n pretrained_strict: bool = True,\n pretrained_filter_fn: Optional[Callable] = None,\n pretrained_custom_load: bool = False,\n kwargs_filter: Optional[Tuple[str]] = None,\n **kwargs):\n \"\"\" Build model with specified default_cfg and optional model_cfg\n\n This helper fn aids in the construction of a model including:\n * handling default_cfg and associated pretrained weight loading\n * passing through optional model_cfg for models with config based arch spec\n * features_only model adaptation\n * pruning config / model adaptation\n\n Args:\n model_cls (nn.Module): model class\n variant (str): model variant name\n pretrained (bool): load pretrained weights\n pretrained_cfg (dict): model's pretrained weight/task config\n model_cfg (Optional[Dict]): model's architecture config\n feature_cfg (Optional[Dict]: feature extraction adapter config\n pretrained_strict (bool): load pretrained weights strictly\n pretrained_filter_fn (Optional[Callable]): filter callable for pretrained weights\n pretrained_custom_load (bool): use custom load fn, to load numpy or other non PyTorch weights\n kwargs_filter (Optional[Tuple]): kwargs to filter before passing to model\n **kwargs: model args passed through to model __init__\n \"\"\"\n pruned = kwargs.pop('pruned', False)\n features = False\n feature_cfg = feature_cfg or {}\n\n # resolve and update model pretrained config and model kwargs\n pretrained_cfg = resolve_pretrained_cfg(variant, pretrained_cfg=pretrained_cfg)\n update_pretrained_cfg_and_kwargs(pretrained_cfg, kwargs, kwargs_filter)\n pretrained_cfg.setdefault('architecture', variant)\n\n # Setup for feature extraction wrapper done at end of this fn\n if kwargs.pop('features_only', False):\n features = True\n feature_cfg.setdefault('out_indices', (0, 1, 2, 3, 4))\n if 'out_indices' in kwargs:\n feature_cfg['out_indices'] = kwargs.pop('out_indices')\n\n # Build the model\n model = model_cls(**kwargs) if model_cfg is None else model_cls(cfg=model_cfg, **kwargs)\n model.pretrained_cfg = pretrained_cfg\n model.default_cfg = model.pretrained_cfg # alias for backwards compat\n \n if pruned:\n model = adapt_model_from_file(model, variant)\n\n # For classification models, check class attr, then kwargs, then default to 1k, otherwise 0 for feats\n num_classes_pretrained = 0 if features else getattr(model, 'num_classes', kwargs.get('num_classes', 1000))\n if pretrained:\n if pretrained_custom_load:\n # FIXME improve custom load trigger\n load_custom_pretrained(model, pretrained_cfg=pretrained_cfg)\n else:\n load_pretrained(\n model,\n pretrained_cfg=pretrained_cfg,\n num_classes=num_classes_pretrained,\n in_chans=kwargs.get('in_chans', 3),\n filter_fn=pretrained_filter_fn,\n strict=pretrained_strict)\n\n # Wrap the model in a feature extraction module if enabled\n if features:\n feature_cls = FeatureListNet\n if 'feature_cls' in feature_cfg:\n feature_cls = feature_cfg.pop('feature_cls')\n if isinstance(feature_cls, str):\n feature_cls = feature_cls.lower()\n if 'hook' in feature_cls:\n feature_cls = FeatureHookNet\n elif feature_cls == 'fx':\n feature_cls = FeatureGraphNet\n else:\n assert False, f'Unknown feature class {feature_cls}'\n model = feature_cls(model, **feature_cfg)\n model.pretrained_cfg = pretrained_cfg_for_features(pretrained_cfg) # add back default_cfg\n model.default_cfg = model.pretrained_cfg # alias for backwards compat\n \n return model" }, { "identifier": "checkpoint_seq", "path": "lib/models_timm/helpers.py", "snippet": "def checkpoint_seq(\n functions,\n x,\n every=1,\n flatten=False,\n skip_last=False,\n preserve_rng_state=True\n):\n r\"\"\"A helper function for checkpointing sequential models.\n\n Sequential models execute a list of modules/functions in order\n (sequentially). Therefore, we can divide such a sequence into segments\n and checkpoint each segment. All segments except run in :func:`torch.no_grad`\n manner, i.e., not storing the intermediate activations. The inputs of each\n checkpointed segment will be saved for re-running the segment in the backward pass.\n\n See :func:`~torch.utils.checkpoint.checkpoint` on how checkpointing works.\n\n .. warning::\n Checkpointing currently only supports :func:`torch.autograd.backward`\n and only if its `inputs` argument is not passed. :func:`torch.autograd.grad`\n is not supported.\n\n .. warning:\n At least one of the inputs needs to have :code:`requires_grad=True` if\n grads are needed for model inputs, otherwise the checkpointed part of the\n model won't have gradients.\n\n Args:\n functions: A :class:`torch.nn.Sequential` or the list of modules or functions to run sequentially.\n x: A Tensor that is input to :attr:`functions`\n every: checkpoint every-n functions (default: 1)\n flatten (bool): flatten nn.Sequential of nn.Sequentials\n skip_last (bool): skip checkpointing the last function in the sequence if True\n preserve_rng_state (bool, optional, default=True): Omit stashing and restoring\n the RNG state during each checkpoint.\n\n Returns:\n Output of running :attr:`functions` sequentially on :attr:`*inputs`\n\n Example:\n >>> model = nn.Sequential(...)\n >>> input_var = checkpoint_seq(model, input_var, every=2)\n \"\"\"\n def run_function(start, end, functions):\n def forward(_x):\n for j in range(start, end + 1):\n _x = functions[j](_x)\n return _x\n return forward\n\n if isinstance(functions, torch.nn.Sequential):\n functions = functions.children()\n if flatten:\n functions = chain.from_iterable(functions)\n if not isinstance(functions, (tuple, list)):\n functions = tuple(functions)\n\n num_checkpointed = len(functions)\n if skip_last:\n num_checkpointed -= 1\n end = -1\n for start in range(0, num_checkpointed, every):\n end = min(start + every - 1, num_checkpointed - 1)\n x = checkpoint(run_function(start, end, functions), x, preserve_rng_state=preserve_rng_state)\n if skip_last:\n return run_function(end + 1, len(functions) - 1, functions)(x)\n return x" }, { "identifier": "named_apply", "path": "lib/models_timm/helpers.py", "snippet": "def named_apply(fn: Callable, module: nn.Module, name='', depth_first=True, include_root=False) -> nn.Module:\n if not depth_first and include_root:\n fn(module=module, name=name)\n for child_name, child_module in module.named_children():\n child_name = '.'.join((name, child_name)) if name else child_name\n named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True)\n if depth_first and include_root:\n fn(module=module, name=name)\n return module" }, { "identifier": "register_notrace_function", "path": "lib/models_timm/fx_features.py", "snippet": "def register_notrace_function(func: Callable):\n \"\"\"\n Decorator for functions which ought not to be traced through\n \"\"\"\n _autowrap_functions.add(func)\n return func" }, { "identifier": "ClassifierHead", "path": "lib/models_timm/layers/classifier.py", "snippet": "class ClassifierHead(nn.Module):\n \"\"\"Classifier head w/ configurable global pooling and dropout.\"\"\"\n\n def __init__(self, in_chs, num_classes, pool_type='avg', drop_rate=0., use_conv=False):\n super(ClassifierHead, self).__init__()\n self.drop_rate = drop_rate\n self.global_pool, num_pooled_features = _create_pool(in_chs, num_classes, pool_type, use_conv=use_conv)\n self.fc = _create_fc(num_pooled_features, num_classes, use_conv=use_conv)\n self.flatten = nn.Flatten(1) if use_conv and pool_type else nn.Identity()\n\n def forward(self, x, pre_logits: bool = False):\n x = self.global_pool(x)\n if self.drop_rate:\n x = F.dropout(x, p=float(self.drop_rate), training=self.training)\n if pre_logits:\n return x.flatten(1)\n else:\n x = self.fc(x)\n return self.flatten(x)" }, { "identifier": "DropPath", "path": "lib/models_timm/layers/drop.py", "snippet": "class DropPath(nn.Module):\n \"\"\"Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).\n \"\"\"\n def __init__(self, drop_prob: float = 0., scale_by_keep: bool = True):\n super(DropPath, self).__init__()\n self.drop_prob = drop_prob\n self.scale_by_keep = scale_by_keep\n\n def forward(self, x):\n return drop_path(x, self.drop_prob, self.training, self.scale_by_keep)\n\n def extra_repr(self):\n return f'drop_prob={round(self.drop_prob,3):0.3f}'" }, { "identifier": "Mlp", "path": "lib/models_timm/layers/mlp.py", "snippet": "class Mlp(nn.Module):\n \"\"\" MLP as used in Vision Transformer, MLP-Mixer and related networks\n \"\"\"\n def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, bias=True, drop=0.):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n bias = to_2tuple(bias)\n drop_probs = to_2tuple(drop)\n\n self.fc1 = nn.Linear(in_features, hidden_features, bias=bias[0])\n self.act = act_layer()\n self.drop1 = nn.Dropout(drop_probs[0])\n self.fc2 = nn.Linear(hidden_features, out_features, bias=bias[1])\n self.drop2 = nn.Dropout(drop_probs[1])\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.act(x)\n x = self.drop1(x)\n x = self.fc2(x)\n x = self.drop2(x)\n return x" }, { "identifier": "ConvMlp", "path": "lib/models_timm/layers/mlp.py", "snippet": "class ConvMlp(nn.Module):\n \"\"\" MLP using 1x1 convs that keeps spatial dims\n \"\"\"\n def __init__(\n self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU,\n norm_layer=None, bias=True, drop=0.):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n bias = to_2tuple(bias)\n\n self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=bias[0])\n self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity()\n self.act = act_layer()\n self.drop = nn.Dropout(drop)\n self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=bias[1])\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.norm(x)\n x = self.act(x)\n x = self.drop(x)\n x = self.fc2(x)\n return x" }, { "identifier": "LayerNorm", "path": "lib/models_timm/layers/norm.py", "snippet": "class LayerNorm(nn.LayerNorm):\n \"\"\" LayerNorm w/ fast norm option\n \"\"\"\n def __init__(self, num_channels, eps=1e-6, affine=True):\n super().__init__(num_channels, eps=eps, elementwise_affine=affine)\n self._fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n if self._fast_norm:\n x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)\n else:\n x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)\n return x" }, { "identifier": "LayerNorm2d", "path": "lib/models_timm/layers/norm.py", "snippet": "class LayerNorm2d(nn.LayerNorm):\n \"\"\" LayerNorm for channels of '2D' spatial NCHW tensors \"\"\"\n def __init__(self, num_channels, eps=1e-6, affine=True):\n super().__init__(num_channels, eps=eps, elementwise_affine=affine)\n self._fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = x.permute(0, 2, 3, 1)\n if self._fast_norm:\n x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)\n else:\n x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)\n x = x.permute(0, 3, 1, 2)\n return x" }, { "identifier": "trunc_normal_tf_", "path": "lib/models_timm/layers/weight_init.py", "snippet": "def trunc_normal_tf_(tensor, mean=0., std=1., a=-2., b=2.):\n # type: (Tensor, float, float, float, float) -> Tensor\n r\"\"\"Fills the input Tensor with values drawn from a truncated\n normal distribution. The values are effectively drawn from the\n normal distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)`\n with values outside :math:`[a, b]` redrawn until they are within\n the bounds. The method used for generating the random values works\n best when :math:`a \\leq \\text{mean} \\leq b`.\n\n NOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the\n bounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0\n and the result is subsquently scaled and shifted by the mean and std args.\n\n Args:\n tensor: an n-dimensional `torch.Tensor`\n mean: the mean of the normal distribution\n std: the standard deviation of the normal distribution\n a: the minimum cutoff value\n b: the maximum cutoff value\n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.trunc_normal_(w)\n \"\"\"\n with torch.no_grad():\n _trunc_normal_(tensor, 0, 1.0, a, b)\n tensor.mul_(std).add_(mean)\n return tensor" }, { "identifier": "create_attn", "path": "lib/models_timm/layers/create_attn.py", "snippet": "def create_attn(attn_type, channels, **kwargs):\n module_cls = get_attn(attn_type)\n if module_cls is not None:\n # NOTE: it's expected the first (positional) argument of all attention layers is the # input channels\n return module_cls(channels, **kwargs)\n return None" }, { "identifier": "create_conv2d", "path": "lib/models_timm/layers/create_conv2d.py", "snippet": "def create_conv2d(in_channels, out_channels, kernel_size, **kwargs):\n \"\"\" Select a 2d convolution implementation based on arguments\n Creates and returns one of torch.nn.Conv2d, Conv2dSame, MixedConv2d, or CondConv2d.\n\n Used extensively by EfficientNet, MobileNetv3 and related networks.\n \"\"\"\n if isinstance(kernel_size, list):\n assert 'num_experts' not in kwargs # MixNet + CondConv combo not supported currently\n if 'groups' in kwargs:\n groups = kwargs.pop('groups')\n if groups == in_channels:\n kwargs['depthwise'] = True\n else:\n assert groups == 1\n # We're going to use only lists for defining the MixedConv2d kernel groups,\n # ints, tuples, other iterables will continue to pass to normal conv and specify h, w.\n m = MixedConv2d(in_channels, out_channels, kernel_size, **kwargs)\n else:\n depthwise = kwargs.pop('depthwise', False)\n # for DW out_channels must be multiple of in_channels as must have out_channels % groups == 0\n groups = in_channels if depthwise else kwargs.pop('groups', 1)\n if 'num_experts' in kwargs and kwargs['num_experts'] > 0:\n m = CondConv2d(in_channels, out_channels, kernel_size, groups=groups, **kwargs)\n else:\n m = create_conv2d_pad(in_channels, out_channels, kernel_size, groups=groups, **kwargs)\n return m" }, { "identifier": "get_act_layer", "path": "lib/models_timm/layers/create_act.py", "snippet": "def get_act_layer(name: Union[Type[nn.Module], str] = 'relu'):\n \"\"\" Activation Layer Factory\n Fetching activation layers by name with this function allows export or torch script friendly\n functions to be returned dynamically based on current config.\n \"\"\"\n if not name:\n return None\n if not isinstance(name, str):\n # callable, module, etc\n return name\n if not (is_no_jit() or is_exportable() or is_scriptable()):\n if name in _ACT_LAYER_ME:\n return _ACT_LAYER_ME[name]\n if not (is_no_jit() or is_exportable()):\n if name in _ACT_LAYER_JIT:\n return _ACT_LAYER_JIT[name]\n return _ACT_LAYER_DEFAULT[name]" }, { "identifier": "get_norm_layer", "path": "lib/models_timm/layers/create_norm.py", "snippet": "def get_norm_layer(norm_layer):\n assert isinstance(norm_layer, (type, str, types.FunctionType, functools.partial))\n norm_kwargs = {}\n\n # unbind partial fn, so args can be rebound later\n if isinstance(norm_layer, functools.partial):\n norm_kwargs.update(norm_layer.keywords)\n norm_layer = norm_layer.func\n\n if isinstance(norm_layer, str):\n layer_name = norm_layer.replace('_', '')\n norm_layer = _NORM_MAP.get(layer_name, None)\n elif norm_layer in _NORM_TYPES:\n norm_layer = norm_layer\n elif isinstance(norm_layer, types.FunctionType):\n # if function type, assume it is a lambda/fn that creates a norm layer\n norm_layer = norm_layer\n else:\n type_name = norm_layer.__name__.lower().replace('_', '')\n norm_layer = _NORM_MAP.get(type_name, None)\n assert norm_layer is not None, f\"No equivalent norm layer for {type_name}\"\n\n if norm_kwargs:\n norm_layer = functools.partial(norm_layer, **norm_kwargs) # bind/rebind args\n return norm_layer" }, { "identifier": "get_norm_act_layer", "path": "lib/models_timm/layers/create_norm_act.py", "snippet": "def get_norm_act_layer(norm_layer, act_layer=None):\n assert isinstance(norm_layer, (type, str, types.FunctionType, functools.partial))\n assert act_layer is None or isinstance(act_layer, (type, str, types.FunctionType, functools.partial))\n norm_act_kwargs = {}\n\n # unbind partial fn, so args can be rebound later\n if isinstance(norm_layer, functools.partial):\n norm_act_kwargs.update(norm_layer.keywords)\n norm_layer = norm_layer.func\n\n if isinstance(norm_layer, str):\n layer_name = norm_layer.replace('_', '').lower().split('-')[0]\n norm_act_layer = _NORM_ACT_MAP.get(layer_name, None)\n elif norm_layer in _NORM_ACT_TYPES:\n norm_act_layer = norm_layer\n elif isinstance(norm_layer, types.FunctionType):\n # if function type, must be a lambda/fn that creates a norm_act layer\n norm_act_layer = norm_layer\n else:\n type_name = norm_layer.__name__.lower()\n if type_name.startswith('batchnorm'):\n norm_act_layer = BatchNormAct2d\n elif type_name.startswith('groupnorm'):\n norm_act_layer = GroupNormAct\n elif type_name.startswith('groupnorm1'):\n norm_act_layer = functools.partial(GroupNormAct, num_groups=1)\n elif type_name.startswith('layernorm2d'):\n norm_act_layer = LayerNormAct2d\n elif type_name.startswith('layernorm'):\n norm_act_layer = LayerNormAct\n else:\n assert False, f\"No equivalent norm_act layer for {type_name}\"\n\n if norm_act_layer in _NORM_ACT_REQUIRES_ARG:\n # pass `act_layer` through for backwards compat where `act_layer=None` implies no activation.\n # In the future, may force use of `apply_act` with `act_layer` arg bound to relevant NormAct types\n norm_act_kwargs.setdefault('act_layer', act_layer)\n if norm_act_kwargs:\n norm_act_layer = functools.partial(norm_act_layer, **norm_act_kwargs) # bind/rebind args\n return norm_act_layer" }, { "identifier": "to_2tuple", "path": "lib/models_timm/layers/helpers.py", "snippet": "def _ntuple(n):\n def parse(x):\ndef make_divisible(v, divisor=8, min_value=None, round_limit=.9):\ndef extend_tuple(x, n):" }, { "identifier": "_assert", "path": "lib/models_timm/layers/trace_utils.py", "snippet": "def _assert(condition: bool, message: str):\n assert condition, message" }, { "identifier": "register_model", "path": "lib/models_timm/registry.py", "snippet": "def register_model(fn):\n # lookup containing module\n mod = sys.modules[fn.__module__]\n module_name_split = fn.__module__.split('.')\n module_name = module_name_split[-1] if len(module_name_split) else ''\n\n # add model to __all__ in module\n model_name = fn.__name__\n if hasattr(mod, '__all__'):\n mod.__all__.append(model_name)\n else:\n mod.__all__ = [model_name]\n\n # add entries to registry dict/sets\n _model_entrypoints[model_name] = fn\n _model_to_module[model_name] = module_name\n _module_to_models[module_name].add(model_name)\n has_valid_pretrained = False # check if model has a pretrained url to allow filtering on this\n if hasattr(mod, 'default_cfgs') and model_name in mod.default_cfgs:\n # this will catch all models that have entrypoint matching cfg key, but miss any aliasing\n # entrypoints or non-matching combos\n cfg = mod.default_cfgs[model_name]\n has_valid_pretrained = (\n ('url' in cfg and 'http' in cfg['url']) or\n ('file' in cfg and cfg['file']) or\n ('hf_hub_id' in cfg and cfg['hf_hub_id'])\n )\n _model_pretrained_cfgs[model_name] = mod.default_cfgs[model_name]\n if has_valid_pretrained:\n _model_has_pretrained.add(model_name)\n return fn" }, { "identifier": "RelPosMlp", "path": "lib/models_timm/vision_transformer_relpos.py", "snippet": "class RelPosMlp(nn.Module):\n def __init__(\n self,\n window_size,\n num_heads=8,\n hidden_dim=128,\n prefix_tokens=0,\n mode='cr',\n pretrained_window_size=(0, 0)\n ):\n super().__init__()\n self.window_size = window_size\n self.window_area = self.window_size[0] * self.window_size[1]\n self.prefix_tokens = prefix_tokens\n self.num_heads = num_heads\n self.bias_shape = (self.window_area,) * 2 + (num_heads,)\n if mode == 'swin':\n self.bias_act = nn.Sigmoid()\n self.bias_gain = 16\n mlp_bias = (True, False)\n elif mode == 'rw':\n self.bias_act = nn.Tanh()\n self.bias_gain = 4\n mlp_bias = True\n else:\n self.bias_act = nn.Identity()\n self.bias_gain = None\n mlp_bias = True\n\n self.mlp = Mlp(\n 2, # x, y\n hidden_features=hidden_dim,\n out_features=num_heads,\n act_layer=nn.ReLU,\n bias=mlp_bias,\n drop=(0.125, 0.)\n )\n\n self.register_buffer(\n \"relative_position_index\",\n gen_relative_position_index(window_size),\n persistent=False)\n\n # get relative_coords_table\n self.register_buffer(\n \"rel_coords_log\",\n gen_relative_log_coords(window_size, pretrained_window_size, mode=mode),\n persistent=False)\n\n def get_bias(self) -> torch.Tensor:\n relative_position_bias = self.mlp(self.rel_coords_log)\n if self.relative_position_index is not None:\n relative_position_bias = relative_position_bias.view(-1, self.num_heads)[\n self.relative_position_index.view(-1)] # Wh*Ww,Wh*Ww,nH\n relative_position_bias = relative_position_bias.view(self.bias_shape)\n relative_position_bias = relative_position_bias.permute(2, 0, 1)\n relative_position_bias = self.bias_act(relative_position_bias)\n if self.bias_gain is not None:\n relative_position_bias = self.bias_gain * relative_position_bias\n if self.prefix_tokens:\n relative_position_bias = F.pad(relative_position_bias, [self.prefix_tokens, 0, self.prefix_tokens, 0])\n return relative_position_bias.unsqueeze(0).contiguous()\n\n def forward(self, attn, shared_rel_pos: Optional[torch.Tensor] = None):\n return attn + self.get_bias()" }, { "identifier": "RelPosBias", "path": "lib/models_timm/vision_transformer_relpos.py", "snippet": "class RelPosBias(nn.Module):\n\n def __init__(self, window_size, num_heads, prefix_tokens=0):\n super().__init__()\n assert prefix_tokens <= 1\n self.window_size = window_size\n self.window_area = window_size[0] * window_size[1]\n self.bias_shape = (self.window_area + prefix_tokens,) * 2 + (num_heads,)\n\n num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 * prefix_tokens\n self.relative_position_bias_table = nn.Parameter(torch.zeros(num_relative_distance, num_heads))\n self.register_buffer(\n \"relative_position_index\",\n gen_relative_position_index(self.window_size, class_token=prefix_tokens > 0),\n persistent=False,\n )\n\n self.init_weights()\n\n def init_weights(self):\n trunc_normal_(self.relative_position_bias_table, std=.02)\n\n def get_bias(self) -> torch.Tensor:\n relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)]\n # win_h * win_w, win_h * win_w, num_heads\n relative_position_bias = relative_position_bias.view(self.bias_shape).permute(2, 0, 1)\n return relative_position_bias.unsqueeze(0).contiguous()\n\n def forward(self, attn, shared_rel_pos: Optional[torch.Tensor] = None):\n return attn + self.get_bias()" } ]
import math import torch from collections import OrderedDict from dataclasses import dataclass, replace from functools import partial from typing import Callable, Optional, Union, Tuple, List from torch import nn from torch.utils.checkpoint import checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from lib.models_timm.helpers import build_model_with_cfg, checkpoint_seq, named_apply from lib.models_timm.fx_features import register_notrace_function from lib.models_timm.layers import Mlp, ConvMlp, DropPath, ClassifierHead, trunc_normal_tf_, LayerNorm2d, LayerNorm from lib.models_timm.layers import create_attn, get_act_layer, get_norm_layer, get_norm_act_layer, create_conv2d from lib.models_timm.layers import to_2tuple, extend_tuple, make_divisible, _assert from lib.models_timm.registry import register_model from lib.models_timm.vision_transformer_relpos import RelPosMlp, RelPosBias # FIXME move these to common location
9,584
def init_weights(self, scheme=''): named_apply(partial(_init_transformer, scheme=scheme), self.attn_block) named_apply(partial(_init_transformer, scheme=scheme), self.attn_grid) named_apply(partial(_init_conv, scheme=scheme), self.conv) def forward(self, x): # NCHW format x = self.conv(x) if not self.nchw_attn: x = x.permute(0, 2, 3, 1) # to NHWC (channels-last) x = self.attn_block(x) x = self.attn_grid(x) if not self.nchw_attn: x = x.permute(0, 3, 1, 2) # back to NCHW return x class ParallelMaxxVitBlock(nn.Module): """ MaxVit block with parallel cat(window + grid), one FF Experimental timm block. """ def __init__( self, dim, dim_out, stride=1, num_conv=2, conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(), transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), drop_path=0., ): super().__init__() conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock if num_conv > 1: convs = [conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path)] convs += [conv_cls(dim_out, dim_out, cfg=conv_cfg, drop_path=drop_path)] * (num_conv - 1) self.conv = nn.Sequential(*convs) else: self.conv = conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path) self.attn = ParallelPartitionAttention(dim=dim_out, cfg=transformer_cfg, drop_path=drop_path) def init_weights(self, scheme=''): named_apply(partial(_init_transformer, scheme=scheme), self.attn) named_apply(partial(_init_conv, scheme=scheme), self.conv) def forward(self, x): x = self.conv(x) x = x.permute(0, 2, 3, 1) x = self.attn(x) x = x.permute(0, 3, 1, 2) return x class MaxxVitStage(nn.Module): def __init__( self, in_chs: int, out_chs: int, stride: int = 2, depth: int = 4, feat_size: Tuple[int, int] = (14, 14), block_types: Union[str, Tuple[str]] = 'C', transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(), drop_path: Union[float, List[float]] = 0., ): super().__init__() self.grad_checkpointing = False block_types = extend_tuple(block_types, depth) blocks = [] for i, t in enumerate(block_types): block_stride = stride if i == 0 else 1 assert t in ('C', 'T', 'M', 'PM') if t == 'C': conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock blocks += [conv_cls( in_chs, out_chs, stride=block_stride, cfg=conv_cfg, drop_path=drop_path[i], )] elif t == 'T': rel_pos_cls = get_rel_pos_cls(transformer_cfg, feat_size) blocks += [TransformerBlock2d( in_chs, out_chs, stride=block_stride, rel_pos_cls=rel_pos_cls, cfg=transformer_cfg, drop_path=drop_path[i], )] elif t == 'M': blocks += [MaxxVitBlock( in_chs, out_chs, stride=block_stride, conv_cfg=conv_cfg, transformer_cfg=transformer_cfg, drop_path=drop_path[i], )] elif t == 'PM': blocks += [ParallelMaxxVitBlock( in_chs, out_chs, stride=block_stride, conv_cfg=conv_cfg, transformer_cfg=transformer_cfg, drop_path=drop_path[i], )] in_chs = out_chs self.blocks = nn.Sequential(*blocks) def forward(self, x): if self.grad_checkpointing and not torch.jit.is_scripting():
""" MaxVit and CoAtNet Vision Transformer - CNN Hybrids in PyTorch This is a from-scratch implementation of both CoAtNet and MaxVit in PyTorch. 99% of the implementation was done from papers, however last minute some adjustments were made based on the (as yet unfinished?) public code release https://github.com/google-research/maxvit There are multiple sets of models defined for both architectures. Typically, names with a `_rw` suffix are my own original configs prior to referencing https://github.com/google-research/maxvit. These configs work well and appear to be a bit faster / lower resource than the paper. The models without extra prefix / suffix' (coatnet_0_224, maxvit_tiny_224, etc), are intended to match paper, BUT, without any official pretrained weights it's difficult to confirm a 100% match. # FIXME / WARNING This impl remains a WIP, some configs and models may vanish or change... Papers: MaxViT: Multi-Axis Vision Transformer - https://arxiv.org/abs/2204.01697 @article{tu2022maxvit, title={MaxViT: Multi-Axis Vision Transformer}, author={Tu, Zhengzhong and Talebi, Hossein and Zhang, Han and Yang, Feng and Milanfar, Peyman and Bovik, Alan and Li, Yinxiao}, journal={ECCV}, year={2022}, } CoAtNet: Marrying Convolution and Attention for All Data Sizes - https://arxiv.org/abs/2106.04803 @article{DBLP:journals/corr/abs-2106-04803, author = {Zihang Dai and Hanxiao Liu and Quoc V. Le and Mingxing Tan}, title = {CoAtNet: Marrying Convolution and Attention for All Data Sizes}, journal = {CoRR}, volume = {abs/2106.04803}, year = {2021} } Hacked together by / Copyright 2022, Ross Wightman """ __all__ = ['MaxxVitCfg', 'MaxxVitConvCfg', 'MaxxVitTransformerCfg', 'MaxxVit'] def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.95, 'interpolation': 'bicubic', 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), 'first_conv': 'stem.conv1', 'classifier': 'head.fc', 'fixed_input_size': True, **kwargs } default_cfgs = { # Fiddling with configs / defaults / still pretraining 'coatnet_pico_rw_224': _cfg(url=''), 'coatnet_nano_rw_224': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_nano_rw_224_sw-f53093b4.pth', crop_pct=0.9), 'coatnet_0_rw_224': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_0_rw_224_sw-a6439706.pth'), 'coatnet_1_rw_224': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_1_rw_224_sw-5cae1ea8.pth' ), 'coatnet_2_rw_224': _cfg(url=''), 'coatnet_3_rw_224': _cfg(url=''), # Highly experimental configs 'coatnet_bn_0_rw_224': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_bn_0_rw_224_sw-c228e218.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, crop_pct=0.95), 'coatnet_rmlp_nano_rw_224': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_rmlp_nano_rw_224_sw-bd1d51b3.pth', crop_pct=0.9), 'coatnet_rmlp_0_rw_224': _cfg(url=''), 'coatnet_rmlp_1_rw_224': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_rmlp_1_rw_224_sw-9051e6c3.pth'), 'coatnet_rmlp_2_rw_224': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_rmlp_2_rw_224_sw-5ccfac55.pth'), 'coatnet_rmlp_3_rw_224': _cfg(url=''), 'coatnet_nano_cc_224': _cfg(url=''), 'coatnext_nano_rw_224': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnext_nano_rw_224_ad-22cb71c2.pth', crop_pct=0.9), # Trying to be like the CoAtNet paper configs 'coatnet_0_224': _cfg(url=''), 'coatnet_1_224': _cfg(url=''), 'coatnet_2_224': _cfg(url=''), 'coatnet_3_224': _cfg(url=''), 'coatnet_4_224': _cfg(url=''), 'coatnet_5_224': _cfg(url=''), # Experimental configs 'maxvit_pico_rw_256': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxvit_nano_rw_256': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_nano_rw_256_sw-fb127241.pth', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxvit_tiny_rw_224': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_tiny_rw_224_sw-7d0dffeb.pth'), 'maxvit_tiny_rw_256': _cfg( url='', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxvit_rmlp_pico_rw_256': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_pico_rw_256_sw-8d82f2c6.pth', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxvit_rmlp_nano_rw_256': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_nano_rw_256_sw-c17bb0d6.pth', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxvit_rmlp_tiny_rw_256': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_tiny_rw_256_sw-bbef0ff5.pth', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxvit_rmlp_small_rw_224': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_small_rw_224_sw-6ef0ae4f.pth', crop_pct=0.9, ), 'maxvit_rmlp_small_rw_256': _cfg( url='', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxvit_tiny_pm_256': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxxvit_rmlp_nano_rw_256': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxxvit_rmlp_nano_rw_256_sw-0325d459.pth', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxxvit_rmlp_tiny_rw_256': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxxvit_rmlp_small_rw_256': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxxvit_rmlp_small_rw_256_sw-37e217ff.pth', input_size=(3, 256, 256), pool_size=(8, 8)), # Trying to be like the MaxViT paper configs 'maxvit_tiny_224': _cfg(url=''), 'maxvit_small_224': _cfg(url=''), 'maxvit_base_224': _cfg(url=''), 'maxvit_large_224': _cfg(url=''), 'maxvit_xlarge_224': _cfg(url=''), } @dataclass class MaxxVitTransformerCfg: dim_head: int = 32 expand_ratio: float = 4.0 expand_first: bool = True shortcut_bias: bool = True attn_bias: bool = True attn_drop: float = 0. proj_drop: float = 0. pool_type: str = 'avg2' rel_pos_type: str = 'bias' rel_pos_dim: int = 512 # for relative position types w/ MLP partition_ratio: int = 32 window_size: Optional[Tuple[int, int]] = None grid_size: Optional[Tuple[int, int]] = None init_values: Optional[float] = None act_layer: str = 'gelu' norm_layer: str = 'layernorm2d' norm_layer_cl: str = 'layernorm' norm_eps: float = 1e-6 def __post_init__(self): if self.grid_size is not None: self.grid_size = to_2tuple(self.grid_size) if self.window_size is not None: self.window_size = to_2tuple(self.window_size) if self.grid_size is None: self.grid_size = self.window_size @dataclass class MaxxVitConvCfg: block_type: str = 'mbconv' expand_ratio: float = 4.0 expand_output: bool = True # calculate expansion channels from output (vs input chs) kernel_size: int = 3 group_size: int = 1 # 1 == depthwise pre_norm_act: bool = False # activation after pre-norm output_bias: bool = True # bias for shortcut + final 1x1 projection conv stride_mode: str = 'dw' # stride done via one of 'pool', '1x1', 'dw' pool_type: str = 'avg2' downsample_pool_type: str = 'avg2' attn_early: bool = False # apply attn between conv2 and norm2, instead of after norm2 attn_layer: str = 'se' attn_act_layer: str = 'silu' attn_ratio: float = 0.25 init_values: Optional[float] = 1e-6 # for ConvNeXt block, ignored by MBConv act_layer: str = 'gelu' norm_layer: str = '' norm_layer_cl: str = '' norm_eps: Optional[float] = None def __post_init__(self): # mbconv vs convnext blocks have different defaults, set in post_init to avoid explicit config args assert self.block_type in ('mbconv', 'convnext') use_mbconv = self.block_type == 'mbconv' if not self.norm_layer: self.norm_layer = 'batchnorm2d' if use_mbconv else 'layernorm2d' if not self.norm_layer_cl and not use_mbconv: self.norm_layer_cl = 'layernorm' if self.norm_eps is None: self.norm_eps = 1e-5 if use_mbconv else 1e-6 self.downsample_pool_type = self.downsample_pool_type or self.pool_type @dataclass class MaxxVitCfg: embed_dim: Tuple[int, ...] = (96, 192, 384, 768) depths: Tuple[int, ...] = (2, 3, 5, 2) block_type: Tuple[Union[str, Tuple[str, ...]], ...] = ('C', 'C', 'T', 'T') stem_width: Union[int, Tuple[int, int]] = 64 stem_bias: bool = True conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg() transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg() weight_init: str = 'vit_eff' def _rw_coat_cfg( stride_mode='pool', pool_type='avg2', conv_output_bias=False, conv_attn_early=False, conv_attn_act_layer='relu', conv_norm_layer='', transformer_shortcut_bias=True, transformer_norm_layer='layernorm2d', transformer_norm_layer_cl='layernorm', init_values=None, rel_pos_type='bias', rel_pos_dim=512, ): # 'RW' timm variant models were created and trained before seeing https://github.com/google-research/maxvit # Common differences for initial timm models: # - pre-norm layer in MZBConv included an activation after norm # - mbconv expansion calculated from input instead of output chs # - mbconv shortcut and final 1x1 conv did not have a bias # - SE act layer was relu, not silu # - mbconv uses silu in timm, not gelu # - expansion in attention block done via output proj, not input proj # Variable differences (evolved over training initial models): # - avg pool with kernel_size=2 favoured downsampling (instead of maxpool for coat) # - SE attention was between conv2 and norm/act # - default to avg pool for mbconv downsample instead of 1x1 or dw conv # - transformer block shortcut has no bias return dict( conv_cfg=MaxxVitConvCfg( stride_mode=stride_mode, pool_type=pool_type, pre_norm_act=True, expand_output=False, output_bias=conv_output_bias, attn_early=conv_attn_early, attn_act_layer=conv_attn_act_layer, act_layer='silu', norm_layer=conv_norm_layer, ), transformer_cfg=MaxxVitTransformerCfg( expand_first=False, shortcut_bias=transformer_shortcut_bias, pool_type=pool_type, init_values=init_values, norm_layer=transformer_norm_layer, norm_layer_cl=transformer_norm_layer_cl, rel_pos_type=rel_pos_type, rel_pos_dim=rel_pos_dim, ), ) def _rw_max_cfg( stride_mode='dw', pool_type='avg2', conv_output_bias=False, conv_attn_ratio=1 / 16, conv_norm_layer='', transformer_norm_layer='layernorm2d', transformer_norm_layer_cl='layernorm', window_size=None, dim_head=32, init_values=None, rel_pos_type='bias', rel_pos_dim=512, ): # 'RW' timm variant models were created and trained before seeing https://github.com/google-research/maxvit # Differences of initial timm models: # - mbconv expansion calculated from input instead of output chs # - mbconv shortcut and final 1x1 conv did not have a bias # - mbconv uses silu in timm, not gelu # - expansion in attention block done via output proj, not input proj return dict( conv_cfg=MaxxVitConvCfg( stride_mode=stride_mode, pool_type=pool_type, expand_output=False, output_bias=conv_output_bias, attn_ratio=conv_attn_ratio, act_layer='silu', norm_layer=conv_norm_layer, ), transformer_cfg=MaxxVitTransformerCfg( expand_first=False, pool_type=pool_type, dim_head=dim_head, window_size=window_size, init_values=init_values, norm_layer=transformer_norm_layer, norm_layer_cl=transformer_norm_layer_cl, rel_pos_type=rel_pos_type, rel_pos_dim=rel_pos_dim, ), ) def _next_cfg( stride_mode='dw', pool_type='avg2', conv_norm_layer='layernorm2d', conv_norm_layer_cl='layernorm', transformer_norm_layer='layernorm2d', transformer_norm_layer_cl='layernorm', window_size=None, init_values=1e-6, rel_pos_type='mlp', # MLP by default for maxxvit rel_pos_dim=512, ): # For experimental models with convnext instead of mbconv init_values = to_2tuple(init_values) return dict( conv_cfg=MaxxVitConvCfg( block_type='convnext', stride_mode=stride_mode, pool_type=pool_type, expand_output=False, init_values=init_values[0], norm_layer=conv_norm_layer, norm_layer_cl=conv_norm_layer_cl, ), transformer_cfg=MaxxVitTransformerCfg( expand_first=False, pool_type=pool_type, window_size=window_size, init_values=init_values[1], norm_layer=transformer_norm_layer, norm_layer_cl=transformer_norm_layer_cl, rel_pos_type=rel_pos_type, rel_pos_dim=rel_pos_dim, ), ) model_cfgs = dict( # Fiddling with configs / defaults / still pretraining coatnet_pico_rw_224=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(2, 3, 5, 2), stem_width=(32, 64), **_rw_max_cfg( # using newer max defaults here conv_output_bias=True, conv_attn_ratio=0.25, ), ), coatnet_nano_rw_224=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(3, 4, 6, 3), stem_width=(32, 64), **_rw_max_cfg( # using newer max defaults here stride_mode='pool', conv_output_bias=True, conv_attn_ratio=0.25, ), ), coatnet_0_rw_224=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 3, 7, 2), # deeper than paper '0' model stem_width=(32, 64), **_rw_coat_cfg( conv_attn_early=True, transformer_shortcut_bias=False, ), ), coatnet_1_rw_224=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 6, 14, 2), stem_width=(32, 64), **_rw_coat_cfg( stride_mode='dw', conv_attn_early=True, transformer_shortcut_bias=False, ) ), coatnet_2_rw_224=MaxxVitCfg( embed_dim=(128, 256, 512, 1024), depths=(2, 6, 14, 2), stem_width=(64, 128), **_rw_coat_cfg( stride_mode='dw', conv_attn_act_layer='silu', init_values=1e-6, ), ), coatnet_3_rw_224=MaxxVitCfg( embed_dim=(192, 384, 768, 1536), depths=(2, 6, 14, 2), stem_width=(96, 192), **_rw_coat_cfg( stride_mode='dw', conv_attn_act_layer='silu', init_values=1e-6, ), ), # Highly experimental configs coatnet_bn_0_rw_224=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 3, 7, 2), # deeper than paper '0' model stem_width=(32, 64), **_rw_coat_cfg( stride_mode='dw', conv_attn_early=True, transformer_shortcut_bias=False, transformer_norm_layer='batchnorm2d', ) ), coatnet_rmlp_nano_rw_224=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(3, 4, 6, 3), stem_width=(32, 64), **_rw_max_cfg( conv_output_bias=True, conv_attn_ratio=0.25, rel_pos_type='mlp', rel_pos_dim=384, ), ), coatnet_rmlp_0_rw_224=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 3, 7, 2), # deeper than paper '0' model stem_width=(32, 64), **_rw_coat_cfg( stride_mode='dw', rel_pos_type='mlp', ), ), coatnet_rmlp_1_rw_224=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 6, 14, 2), stem_width=(32, 64), **_rw_coat_cfg( pool_type='max', conv_attn_early=True, transformer_shortcut_bias=False, rel_pos_type='mlp', rel_pos_dim=384, # was supposed to be 512, woops ), ), coatnet_rmlp_2_rw_224=MaxxVitCfg( embed_dim=(128, 256, 512, 1024), depths=(2, 6, 14, 2), stem_width=(64, 128), **_rw_coat_cfg( stride_mode='dw', conv_attn_act_layer='silu', init_values=1e-6, rel_pos_type='mlp' ), ), coatnet_rmlp_3_rw_224=MaxxVitCfg( embed_dim=(192, 384, 768, 1536), depths=(2, 6, 14, 2), stem_width=(96, 192), **_rw_coat_cfg( stride_mode='dw', conv_attn_act_layer='silu', init_values=1e-6, rel_pos_type='mlp' ), ), coatnet_nano_cc_224=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(3, 4, 6, 3), stem_width=(32, 64), block_type=('C', 'C', ('C', 'T'), ('C', 'T')), **_rw_coat_cfg(), ), coatnext_nano_rw_224=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(3, 4, 6, 3), stem_width=(32, 64), weight_init='normal', **_next_cfg( rel_pos_type='bias', init_values=(1e-5, None) ), ), # Trying to be like the CoAtNet paper configs coatnet_0_224=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 3, 5, 2), stem_width=64, ), coatnet_1_224=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 6, 14, 2), stem_width=64, ), coatnet_2_224=MaxxVitCfg( embed_dim=(128, 256, 512, 1024), depths=(2, 6, 14, 2), stem_width=128, ), coatnet_3_224=MaxxVitCfg( embed_dim=(192, 384, 768, 1536), depths=(2, 6, 14, 2), stem_width=192, ), coatnet_4_224=MaxxVitCfg( embed_dim=(192, 384, 768, 1536), depths=(2, 12, 28, 2), stem_width=192, ), coatnet_5_224=MaxxVitCfg( embed_dim=(256, 512, 1280, 2048), depths=(2, 12, 28, 2), stem_width=192, ), # Experimental MaxVit configs maxvit_pico_rw_256=MaxxVitCfg( embed_dim=(32, 64, 128, 256), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=(24, 32), **_rw_max_cfg(), ), maxvit_nano_rw_256=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(1, 2, 3, 1), block_type=('M',) * 4, stem_width=(32, 64), **_rw_max_cfg(), ), maxvit_tiny_rw_224=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=(32, 64), **_rw_max_cfg(), ), maxvit_tiny_rw_256=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=(32, 64), **_rw_max_cfg(), ), maxvit_rmlp_pico_rw_256=MaxxVitCfg( embed_dim=(32, 64, 128, 256), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=(24, 32), **_rw_max_cfg(rel_pos_type='mlp'), ), maxvit_rmlp_nano_rw_256=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(1, 2, 3, 1), block_type=('M',) * 4, stem_width=(32, 64), **_rw_max_cfg(rel_pos_type='mlp'), ), maxvit_rmlp_tiny_rw_256=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=(32, 64), **_rw_max_cfg(rel_pos_type='mlp'), ), maxvit_rmlp_small_rw_224=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=(32, 64), **_rw_max_cfg( rel_pos_type='mlp', init_values=1e-6, ), ), maxvit_rmlp_small_rw_256=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=(32, 64), **_rw_max_cfg( rel_pos_type='mlp', init_values=1e-6, ), ), maxvit_tiny_pm_256=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(2, 2, 5, 2), block_type=('PM',) * 4, stem_width=(32, 64), **_rw_max_cfg(), ), maxxvit_rmlp_nano_rw_256=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(1, 2, 3, 1), block_type=('M',) * 4, stem_width=(32, 64), weight_init='normal', **_next_cfg(), ), maxxvit_rmlp_tiny_rw_256=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=(32, 64), **_next_cfg(), ), maxxvit_rmlp_small_rw_256=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=(48, 96), **_next_cfg(), ), # Trying to be like the MaxViT paper configs maxvit_tiny_224=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=64, ), maxvit_small_224=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=64, ), maxvit_base_224=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 6, 14, 2), block_type=('M',) * 4, stem_width=64, ), maxvit_large_224=MaxxVitCfg( embed_dim=(128, 256, 512, 1024), depths=(2, 6, 14, 2), block_type=('M',) * 4, stem_width=128, ), maxvit_xlarge_224=MaxxVitCfg( embed_dim=(192, 384, 768, 1536), depths=(2, 6, 14, 2), block_type=('M',) * 4, stem_width=192, ), ) class Attention2d(nn.Module): """ multi-head attention for 2D NCHW tensors""" def __init__( self, dim: int, dim_out: Optional[int] = None, dim_head: int = 32, bias: bool = True, expand_first: bool = True, rel_pos_cls: Callable = None, attn_drop: float = 0., proj_drop: float = 0. ): super().__init__() dim_out = dim_out or dim dim_attn = dim_out if expand_first else dim self.num_heads = dim_attn // dim_head self.dim_head = dim_head self.scale = dim_head ** -0.5 self.qkv = nn.Conv2d(dim, dim_attn * 3, 1, bias=bias) self.rel_pos = rel_pos_cls(num_heads=self.num_heads) if rel_pos_cls else None self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Conv2d(dim_attn, dim_out, 1, bias=bias) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None): B, C, H, W = x.shape q, k, v = self.qkv(x).view(B, self.num_heads, self.dim_head * 3, -1).chunk(3, dim=2) attn = (q.transpose(-2, -1) @ k) * self.scale if self.rel_pos is not None: attn = self.rel_pos(attn) elif shared_rel_pos is not None: attn = attn + shared_rel_pos attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (v @ attn.transpose(-2, -1)).view(B, -1, H, W) x = self.proj(x) x = self.proj_drop(x) return x class AttentionCl(nn.Module): """ Channels-last multi-head attention (B, ..., C) """ def __init__( self, dim: int, dim_out: Optional[int] = None, dim_head: int = 32, bias: bool = True, expand_first: bool = True, rel_pos_cls: Callable = None, attn_drop: float = 0., proj_drop: float = 0. ): super().__init__() dim_out = dim_out or dim dim_attn = dim_out if expand_first and dim_out > dim else dim assert dim_attn % dim_head == 0, 'attn dim should be divisible by head_dim' self.num_heads = dim_attn // dim_head self.dim_head = dim_head self.scale = dim_head ** -0.5 self.qkv = nn.Linear(dim, dim_attn * 3, bias=bias) self.rel_pos = rel_pos_cls(num_heads=self.num_heads) if rel_pos_cls else None self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim_attn, dim_out, bias=bias) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None): B = x.shape[0] restore_shape = x.shape[:-1] q, k, v = self.qkv(x).view(B, -1, self.num_heads, self.dim_head * 3).transpose(1, 2).chunk(3, dim=3) attn = (q @ k.transpose(-2, -1)) * self.scale if self.rel_pos is not None: attn = self.rel_pos(attn, shared_rel_pos=shared_rel_pos) elif shared_rel_pos is not None: attn = attn + shared_rel_pos attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(restore_shape + (-1,)) x = self.proj(x) x = self.proj_drop(x) return x class LayerScale(nn.Module): def __init__(self, dim, init_values=1e-5, inplace=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): gamma = self.gamma return x.mul_(gamma) if self.inplace else x * gamma class LayerScale2d(nn.Module): def __init__(self, dim, init_values=1e-5, inplace=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): gamma = self.gamma.view(1, -1, 1, 1) return x.mul_(gamma) if self.inplace else x * gamma class Downsample2d(nn.Module): """ A downsample pooling module supporting several maxpool and avgpool modes * 'max' - MaxPool2d w/ kernel_size 3, stride 2, padding 1 * 'max2' - MaxPool2d w/ kernel_size = stride = 2 * 'avg' - AvgPool2d w/ kernel_size 3, stride 2, padding 1 * 'avg2' - AvgPool2d w/ kernel_size = stride = 2 """ def __init__( self, dim: int, dim_out: int, pool_type: str = 'avg2', bias: bool = True, ): super().__init__() assert pool_type in ('max', 'max2', 'avg', 'avg2') if pool_type == 'max': self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) elif pool_type == 'max2': self.pool = nn.MaxPool2d(2) # kernel_size == stride == 2 elif pool_type == 'avg': self.pool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1, count_include_pad=False) else: self.pool = nn.AvgPool2d(2) # kernel_size == stride == 2 if dim != dim_out: self.expand = nn.Conv2d(dim, dim_out, 1, bias=bias) else: self.expand = nn.Identity() def forward(self, x): x = self.pool(x) # spatial downsample x = self.expand(x) # expand chs return x def _init_transformer(module, name, scheme=''): if isinstance(module, (nn.Conv2d, nn.Linear)): if scheme == 'normal': nn.init.normal_(module.weight, std=.02) if module.bias is not None: nn.init.zeros_(module.bias) elif scheme == 'trunc_normal': trunc_normal_tf_(module.weight, std=.02) if module.bias is not None: nn.init.zeros_(module.bias) elif scheme == 'xavier_normal': nn.init.xavier_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) else: # vit like nn.init.xavier_uniform_(module.weight) if module.bias is not None: if 'mlp' in name: nn.init.normal_(module.bias, std=1e-6) else: nn.init.zeros_(module.bias) class TransformerBlock2d(nn.Module): """ Transformer block with 2D downsampling '2D' NCHW tensor layout Some gains can be seen on GPU using a 1D / CL block, BUT w/ the need to switch back/forth to NCHW for spatial pooling, the benefit is minimal so ended up using just this variant for CoAt configs. This impl was faster on TPU w/ PT XLA than the 1D experiment. """ def __init__( self, dim: int, dim_out: int, stride: int = 1, rel_pos_cls: Callable = None, cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), drop_path: float = 0., ): super().__init__() norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps) act_layer = get_act_layer(cfg.act_layer) if stride == 2: self.shortcut = Downsample2d(dim, dim_out, pool_type=cfg.pool_type, bias=cfg.shortcut_bias) self.norm1 = nn.Sequential(OrderedDict([ ('norm', norm_layer(dim)), ('down', Downsample2d(dim, dim, pool_type=cfg.pool_type)), ])) else: assert dim == dim_out self.shortcut = nn.Identity() self.norm1 = norm_layer(dim) self.attn = Attention2d( dim, dim_out, dim_head=cfg.dim_head, expand_first=cfg.expand_first, bias=cfg.attn_bias, rel_pos_cls=rel_pos_cls, attn_drop=cfg.attn_drop, proj_drop=cfg.proj_drop ) self.ls1 = LayerScale2d(dim_out, init_values=cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim_out) self.mlp = ConvMlp( in_features=dim_out, hidden_features=int(dim_out * cfg.expand_ratio), act_layer=act_layer, drop=cfg.proj_drop) self.ls2 = LayerScale2d(dim_out, init_values=cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def init_weights(self, scheme=''): named_apply(partial(_init_transformer, scheme=scheme), self) def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None): x = self.shortcut(x) + self.drop_path1(self.ls1(self.attn(self.norm1(x), shared_rel_pos=shared_rel_pos))) x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) return x def _init_conv(module, name, scheme=''): if isinstance(module, nn.Conv2d): if scheme == 'normal': nn.init.normal_(module.weight, std=.02) if module.bias is not None: nn.init.zeros_(module.bias) elif scheme == 'trunc_normal': trunc_normal_tf_(module.weight, std=.02) if module.bias is not None: nn.init.zeros_(module.bias) elif scheme == 'xavier_normal': nn.init.xavier_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) else: # efficientnet like fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels fan_out //= module.groups nn.init.normal_(module.weight, 0, math.sqrt(2.0 / fan_out)) if module.bias is not None: nn.init.zeros_(module.bias) def num_groups(group_size, channels): if not group_size: # 0 or None return 1 # normal conv with 1 group else: # NOTE group_size == 1 -> depthwise conv assert channels % group_size == 0 return channels // group_size class MbConvBlock(nn.Module): """ Pre-Norm Conv Block - 1x1 - kxk - 1x1, w/ inverted bottleneck (expand) """ def __init__( self, in_chs: int, out_chs: int, stride: int = 1, dilation: Tuple[int, int] = (1, 1), cfg: MaxxVitConvCfg = MaxxVitConvCfg(), drop_path: float = 0. ): super(MbConvBlock, self).__init__() norm_act_layer = partial(get_norm_act_layer(cfg.norm_layer, cfg.act_layer), eps=cfg.norm_eps) mid_chs = make_divisible((out_chs if cfg.expand_output else in_chs) * cfg.expand_ratio) groups = num_groups(cfg.group_size, mid_chs) if stride == 2: self.shortcut = Downsample2d(in_chs, out_chs, pool_type=cfg.pool_type, bias=cfg.output_bias) else: self.shortcut = nn.Identity() assert cfg.stride_mode in ('pool', '1x1', 'dw') stride_pool, stride_1, stride_2 = 1, 1, 1 if cfg.stride_mode == 'pool': # NOTE this is not described in paper, experiment to find faster option that doesn't stride in 1x1 stride_pool, dilation_2 = stride, dilation[1] # FIXME handle dilation of avg pool elif cfg.stride_mode == '1x1': # NOTE I don't like this option described in paper, 1x1 w/ stride throws info away stride_1, dilation_2 = stride, dilation[1] else: stride_2, dilation_2 = stride, dilation[0] self.pre_norm = norm_act_layer(in_chs, apply_act=cfg.pre_norm_act) if stride_pool > 1: self.down = Downsample2d(in_chs, in_chs, pool_type=cfg.downsample_pool_type) else: self.down = nn.Identity() self.conv1_1x1 = create_conv2d(in_chs, mid_chs, 1, stride=stride_1) self.norm1 = norm_act_layer(mid_chs) self.conv2_kxk = create_conv2d( mid_chs, mid_chs, cfg.kernel_size, stride=stride_2, dilation=dilation_2, groups=groups) attn_kwargs = {} if isinstance(cfg.attn_layer, str): if cfg.attn_layer == 'se' or cfg.attn_layer == 'eca': attn_kwargs['act_layer'] = cfg.attn_act_layer attn_kwargs['rd_channels'] = int(cfg.attn_ratio * (out_chs if cfg.expand_output else mid_chs)) # two different orderings for SE and norm2 (due to some weights and trials using SE before norm2) if cfg.attn_early: self.se_early = create_attn(cfg.attn_layer, mid_chs, **attn_kwargs) self.norm2 = norm_act_layer(mid_chs) self.se = None else: self.se_early = None self.norm2 = norm_act_layer(mid_chs) self.se = create_attn(cfg.attn_layer, mid_chs, **attn_kwargs) self.conv3_1x1 = create_conv2d(mid_chs, out_chs, 1, bias=cfg.output_bias) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def init_weights(self, scheme=''): named_apply(partial(_init_conv, scheme=scheme), self) def forward(self, x): shortcut = self.shortcut(x) x = self.pre_norm(x) x = self.down(x) # 1x1 expansion conv & norm-act x = self.conv1_1x1(x) x = self.norm1(x) # depthwise / grouped 3x3 conv w/ SE (or other) channel attention & norm-act x = self.conv2_kxk(x) if self.se_early is not None: x = self.se_early(x) x = self.norm2(x) if self.se is not None: x = self.se(x) # 1x1 linear projection to output width x = self.conv3_1x1(x) x = self.drop_path(x) + shortcut return x class ConvNeXtBlock(nn.Module): """ ConvNeXt Block """ def __init__( self, in_chs: int, out_chs: Optional[int] = None, kernel_size: int = 7, stride: int = 1, dilation: Tuple[int, int] = (1, 1), cfg: MaxxVitConvCfg = MaxxVitConvCfg(), conv_mlp: bool = True, drop_path: float = 0. ): super().__init__() out_chs = out_chs or in_chs act_layer = get_act_layer(cfg.act_layer) if conv_mlp: norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps) mlp_layer = ConvMlp else: assert 'layernorm' in cfg.norm_layer norm_layer = LayerNorm mlp_layer = Mlp self.use_conv_mlp = conv_mlp if stride == 2: self.shortcut = Downsample2d(in_chs, out_chs) elif in_chs != out_chs: self.shortcut = nn.Conv2d(in_chs, out_chs, kernel_size=1, bias=cfg.output_bias) else: self.shortcut = nn.Identity() assert cfg.stride_mode in ('pool', 'dw') stride_pool, stride_dw = 1, 1 # FIXME handle dilation? if cfg.stride_mode == 'pool': stride_pool = stride else: stride_dw = stride if stride_pool == 2: self.down = Downsample2d(in_chs, in_chs, pool_type=cfg.downsample_pool_type) else: self.down = nn.Identity() self.conv_dw = create_conv2d( in_chs, out_chs, kernel_size=kernel_size, stride=stride_dw, dilation=dilation[1], depthwise=True, bias=cfg.output_bias) self.norm = norm_layer(out_chs) self.mlp = mlp_layer(out_chs, int(cfg.expand_ratio * out_chs), bias=cfg.output_bias, act_layer=act_layer) if conv_mlp: self.ls = LayerScale2d(out_chs, cfg.init_values) if cfg.init_values else nn.Identity() else: self.ls = LayerScale(out_chs, cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): shortcut = self.shortcut(x) x = self.down(x) x = self.conv_dw(x) if self.use_conv_mlp: x = self.norm(x) x = self.mlp(x) x = self.ls(x) else: x = x.permute(0, 2, 3, 1) x = self.norm(x) x = self.mlp(x) x = self.ls(x) x = x.permute(0, 3, 1, 2) x = self.drop_path(x) + shortcut return x def window_partition(x, window_size: List[int]): B, H, W, C = x.shape _assert(H % window_size[0] == 0, f'height ({H}) must be divisible by window ({window_size[0]})') _assert(W % window_size[1] == 0, '') x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C) return windows @register_notrace_function # reason: int argument is a Proxy def window_reverse(windows, window_size: List[int], img_size: List[int]): H, W = img_size C = windows.shape[-1] x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C) return x def grid_partition(x, grid_size: List[int]): B, H, W, C = x.shape _assert(H % grid_size[0] == 0, f'height {H} must be divisible by grid {grid_size[0]}') _assert(W % grid_size[1] == 0, '') x = x.view(B, grid_size[0], H // grid_size[0], grid_size[1], W // grid_size[1], C) windows = x.permute(0, 2, 4, 1, 3, 5).contiguous().view(-1, grid_size[0], grid_size[1], C) return windows @register_notrace_function # reason: int argument is a Proxy def grid_reverse(windows, grid_size: List[int], img_size: List[int]): H, W = img_size C = windows.shape[-1] x = windows.view(-1, H // grid_size[0], W // grid_size[1], grid_size[0], grid_size[1], C) x = x.permute(0, 3, 1, 4, 2, 5).contiguous().view(-1, H, W, C) return x def get_rel_pos_cls(cfg: MaxxVitTransformerCfg, window_size): rel_pos_cls = None if cfg.rel_pos_type == 'mlp': rel_pos_cls = partial(RelPosMlp, window_size=window_size, hidden_dim=cfg.rel_pos_dim) elif cfg.rel_pos_type == 'bias': rel_pos_cls = partial(RelPosBias, window_size=window_size) return rel_pos_cls class PartitionAttentionCl(nn.Module): """ Grid or Block partition + Attn + FFN. NxC 'channels last' tensor layout. """ def __init__( self, dim: int, partition_type: str = 'block', cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), drop_path: float = 0., ): super().__init__() norm_layer = partial(get_norm_layer(cfg.norm_layer_cl), eps=cfg.norm_eps) # NOTE this block is channels-last act_layer = get_act_layer(cfg.act_layer) self.partition_block = partition_type == 'block' self.partition_size = to_2tuple(cfg.window_size if self.partition_block else cfg.grid_size) rel_pos_cls = get_rel_pos_cls(cfg, self.partition_size) self.norm1 = norm_layer(dim) self.attn = AttentionCl( dim, dim, dim_head=cfg.dim_head, bias=cfg.attn_bias, rel_pos_cls=rel_pos_cls, attn_drop=cfg.attn_drop, proj_drop=cfg.proj_drop, ) self.ls1 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = Mlp( in_features=dim, hidden_features=int(dim * cfg.expand_ratio), act_layer=act_layer, drop=cfg.proj_drop) self.ls2 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def _partition_attn(self, x): img_size = x.shape[1:3] if self.partition_block: partitioned = window_partition(x, self.partition_size) else: partitioned = grid_partition(x, self.partition_size) partitioned = self.attn(partitioned) if self.partition_block: x = window_reverse(partitioned, self.partition_size, img_size) else: x = grid_reverse(partitioned, self.partition_size, img_size) return x def forward(self, x): x = x + self.drop_path1(self.ls1(self._partition_attn(self.norm1(x)))) x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) return x class ParallelPartitionAttention(nn.Module): """ Experimental. Grid and Block partition + single FFN NxC tensor layout. """ def __init__( self, dim: int, cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), drop_path: float = 0., ): super().__init__() assert dim % 2 == 0 norm_layer = partial(get_norm_layer(cfg.norm_layer_cl), eps=cfg.norm_eps) # NOTE this block is channels-last act_layer = get_act_layer(cfg.act_layer) assert cfg.window_size == cfg.grid_size self.partition_size = to_2tuple(cfg.window_size) rel_pos_cls = get_rel_pos_cls(cfg, self.partition_size) self.norm1 = norm_layer(dim) self.attn_block = AttentionCl( dim, dim // 2, dim_head=cfg.dim_head, bias=cfg.attn_bias, rel_pos_cls=rel_pos_cls, attn_drop=cfg.attn_drop, proj_drop=cfg.proj_drop, ) self.attn_grid = AttentionCl( dim, dim // 2, dim_head=cfg.dim_head, bias=cfg.attn_bias, rel_pos_cls=rel_pos_cls, attn_drop=cfg.attn_drop, proj_drop=cfg.proj_drop, ) self.ls1 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = Mlp( in_features=dim, hidden_features=int(dim * cfg.expand_ratio), out_features=dim, act_layer=act_layer, drop=cfg.proj_drop) self.ls2 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def _partition_attn(self, x): img_size = x.shape[1:3] partitioned_block = window_partition(x, self.partition_size) partitioned_block = self.attn_block(partitioned_block) x_window = window_reverse(partitioned_block, self.partition_size, img_size) partitioned_grid = grid_partition(x, self.partition_size) partitioned_grid = self.attn_grid(partitioned_grid) x_grid = grid_reverse(partitioned_grid, self.partition_size, img_size) return torch.cat([x_window, x_grid], dim=-1) def forward(self, x): x = x + self.drop_path1(self.ls1(self._partition_attn(self.norm1(x)))) x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) return x def window_partition_nchw(x, window_size: List[int]): B, C, H, W = x.shape _assert(H % window_size[0] == 0, f'height ({H}) must be divisible by window ({window_size[0]})') _assert(W % window_size[1] == 0, '') x = x.view(B, C, H // window_size[0], window_size[0], W // window_size[1], window_size[1]) windows = x.permute(0, 2, 4, 1, 3, 5).contiguous().view(-1, C, window_size[0], window_size[1]) return windows @register_notrace_function # reason: int argument is a Proxy def window_reverse_nchw(windows, window_size: List[int], img_size: List[int]): H, W = img_size C = windows.shape[1] x = windows.view(-1, H // window_size[0], W // window_size[1], C, window_size[0], window_size[1]) x = x.permute(0, 3, 1, 4, 2, 5).contiguous().view(-1, C, H, W) return x def grid_partition_nchw(x, grid_size: List[int]): B, C, H, W = x.shape _assert(H % grid_size[0] == 0, f'height {H} must be divisible by grid {grid_size[0]}') _assert(W % grid_size[1] == 0, '') x = x.view(B, C, grid_size[0], H // grid_size[0], grid_size[1], W // grid_size[1]) windows = x.permute(0, 3, 5, 1, 2, 4).contiguous().view(-1, C, grid_size[0], grid_size[1]) return windows @register_notrace_function # reason: int argument is a Proxy def grid_reverse_nchw(windows, grid_size: List[int], img_size: List[int]): H, W = img_size C = windows.shape[1] x = windows.view(-1, H // grid_size[0], W // grid_size[1], C, grid_size[0], grid_size[1]) x = x.permute(0, 3, 4, 1, 5, 2).contiguous().view(-1, C, H, W) return x class PartitionAttention2d(nn.Module): """ Grid or Block partition + Attn + FFN '2D' NCHW tensor layout. """ def __init__( self, dim: int, partition_type: str = 'block', cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), drop_path: float = 0., ): super().__init__() norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps) # NOTE this block is channels-last act_layer = get_act_layer(cfg.act_layer) self.partition_block = partition_type == 'block' self.partition_size = to_2tuple(cfg.window_size if self.partition_block else cfg.grid_size) rel_pos_cls = get_rel_pos_cls(cfg, self.partition_size) self.norm1 = norm_layer(dim) self.attn = Attention2d( dim, dim, dim_head=cfg.dim_head, bias=cfg.attn_bias, rel_pos_cls=rel_pos_cls, attn_drop=cfg.attn_drop, proj_drop=cfg.proj_drop, ) self.ls1 = LayerScale2d(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = ConvMlp( in_features=dim, hidden_features=int(dim * cfg.expand_ratio), act_layer=act_layer, drop=cfg.proj_drop) self.ls2 = LayerScale2d(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def _partition_attn(self, x): img_size = x.shape[-2:] if self.partition_block: partitioned = window_partition_nchw(x, self.partition_size) else: partitioned = grid_partition_nchw(x, self.partition_size) partitioned = self.attn(partitioned) if self.partition_block: x = window_reverse_nchw(partitioned, self.partition_size, img_size) else: x = grid_reverse_nchw(partitioned, self.partition_size, img_size) return x def forward(self, x): x = x + self.drop_path1(self.ls1(self._partition_attn(self.norm1(x)))) x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) return x class MaxxVitBlock(nn.Module): """ MaxVit conv, window partition + FFN , grid partition + FFN """ def __init__( self, dim: int, dim_out: int, stride: int = 1, conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(), transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), use_nchw_attn: bool = False, # FIXME move to cfg? True is ~20-30% faster on TPU, 5-10% slower on GPU drop_path: float = 0., ): super().__init__() conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock self.conv = conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path) attn_kwargs = dict(dim=dim_out, cfg=transformer_cfg, drop_path=drop_path) partition_layer = PartitionAttention2d if use_nchw_attn else PartitionAttentionCl self.nchw_attn = use_nchw_attn self.attn_block = partition_layer(**attn_kwargs) self.attn_grid = partition_layer(partition_type='grid', **attn_kwargs) def init_weights(self, scheme=''): named_apply(partial(_init_transformer, scheme=scheme), self.attn_block) named_apply(partial(_init_transformer, scheme=scheme), self.attn_grid) named_apply(partial(_init_conv, scheme=scheme), self.conv) def forward(self, x): # NCHW format x = self.conv(x) if not self.nchw_attn: x = x.permute(0, 2, 3, 1) # to NHWC (channels-last) x = self.attn_block(x) x = self.attn_grid(x) if not self.nchw_attn: x = x.permute(0, 3, 1, 2) # back to NCHW return x class ParallelMaxxVitBlock(nn.Module): """ MaxVit block with parallel cat(window + grid), one FF Experimental timm block. """ def __init__( self, dim, dim_out, stride=1, num_conv=2, conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(), transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), drop_path=0., ): super().__init__() conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock if num_conv > 1: convs = [conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path)] convs += [conv_cls(dim_out, dim_out, cfg=conv_cfg, drop_path=drop_path)] * (num_conv - 1) self.conv = nn.Sequential(*convs) else: self.conv = conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path) self.attn = ParallelPartitionAttention(dim=dim_out, cfg=transformer_cfg, drop_path=drop_path) def init_weights(self, scheme=''): named_apply(partial(_init_transformer, scheme=scheme), self.attn) named_apply(partial(_init_conv, scheme=scheme), self.conv) def forward(self, x): x = self.conv(x) x = x.permute(0, 2, 3, 1) x = self.attn(x) x = x.permute(0, 3, 1, 2) return x class MaxxVitStage(nn.Module): def __init__( self, in_chs: int, out_chs: int, stride: int = 2, depth: int = 4, feat_size: Tuple[int, int] = (14, 14), block_types: Union[str, Tuple[str]] = 'C', transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(), drop_path: Union[float, List[float]] = 0., ): super().__init__() self.grad_checkpointing = False block_types = extend_tuple(block_types, depth) blocks = [] for i, t in enumerate(block_types): block_stride = stride if i == 0 else 1 assert t in ('C', 'T', 'M', 'PM') if t == 'C': conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock blocks += [conv_cls( in_chs, out_chs, stride=block_stride, cfg=conv_cfg, drop_path=drop_path[i], )] elif t == 'T': rel_pos_cls = get_rel_pos_cls(transformer_cfg, feat_size) blocks += [TransformerBlock2d( in_chs, out_chs, stride=block_stride, rel_pos_cls=rel_pos_cls, cfg=transformer_cfg, drop_path=drop_path[i], )] elif t == 'M': blocks += [MaxxVitBlock( in_chs, out_chs, stride=block_stride, conv_cfg=conv_cfg, transformer_cfg=transformer_cfg, drop_path=drop_path[i], )] elif t == 'PM': blocks += [ParallelMaxxVitBlock( in_chs, out_chs, stride=block_stride, conv_cfg=conv_cfg, transformer_cfg=transformer_cfg, drop_path=drop_path[i], )] in_chs = out_chs self.blocks = nn.Sequential(*blocks) def forward(self, x): if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
1
2023-10-24 17:49:10+00:00
12k
StackTipsLab/bloggy
bloggy/urls.py
[ { "identifier": "settings", "path": "bloggy/settings.py", "snippet": "BASE_DIR = Path(__file__).resolve().parent.parent\nSECRET_KEY = os.getenv(\"SECRET_KEY\", get_random_secret_key())\nDEBUG = os.getenv(\"DEBUG\", \"False\") == \"True\"\nALLOWED_HOSTS = os.getenv(\"ALLOWED_HOSTS\", \"127.0.0.1, localhost\").split(\",\")\nINTERNAL_IPS = ['127.0.0.1']\nSITE_URL = os.getenv(\"SITE_URL\")\nINSTALLED_APPS = [\n 'bloggy',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n # sitemap\n 'django.contrib.sitemaps',\n\n # 'tinymce',\n 'widget_tweaks',\n 'django_summernote',\n 'whitenoise.runserver_nostatic',\n\n 'rest_framework',\n 'bloggy_api',\n 'mail_templated', # Used for templated email https://github.com/artemrizhov/django-mail-templated\n 'storages',\n 'debug_toolbar', # dev only\n\n 'hitcount',\n 'colorfield'\n]\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.middleware.gzip.GZipMiddleware',\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'bloggy.middleware.slash_middleware.AppendOrRemoveSlashMiddleware', # Remove slash from url\n\n # Cache\n 'django.middleware.cache.UpdateCacheMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.cache.FetchFromCacheMiddleware',\n # Cache\n\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n\n # Social login\n # 'social_django.middleware.SocialAuthExceptionMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n 'bloggy.middleware.redirect.RedirectMiddleware', # new articles mismatch url redirect\n]\nROOT_URLCONF = 'bloggy.urls'\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': os.path.join(BASE_DIR, '/bloggy/templates'),\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'bloggy.context_processors.seo_attrs',\n 'bloggy.context_processors.app_settings',\n\n # Social login\n # 'social_django.context_processors.backends',\n # 'social_django.context_processors.login_redirect',\n ],\n },\n },\n]\nWSGI_APPLICATION = 'bloggy.wsgi.application'\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': os.getenv('DB_NAME'),\n 'USER': os.getenv('DB_USER'),\n 'PASSWORD': os.getenv('DB_PASSWORD'),\n 'HOST': os.getenv('DB_HOST'),\n 'PORT': os.getenv('DB_PORT'),\n 'OPTIONS': {'charset': 'utf8mb4', 'use_unicode': True},\n }\n}\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nDEFAULT_AUTO_FIELD = 'django.db.models.AutoField'\nSTATIC_URL = '/static/'\nUSE_SPACES = os.getenv('USE_SPACES') == 'True'\nAWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')\nAWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')\nAWS_STORAGE_BUCKET_NAME = os.getenv('AWS_STORAGE_BUCKET_NAME')\nAWS_S3_ENDPOINT_URL = f'https://{os.getenv(\"AWS_S3_ENDPOINT_URL\")}'\n AWS_DEFAULT_ACL = 'public-read'\n AWS_QUERYSTRING_AUTH = False\n AWS_S3_OBJECT_PARAMETERS = {'CacheControl': 'max-age=86400'}\n AWS_LOCATION = 'static'\n STATIC_URL = f'{os.getenv(\"ASSETS_DOMAIN\")}/static/'\n STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'\n MEDIA_URL = '/media/'\n DEFAULT_FILE_STORAGE = 'bloggy.storage_backends.PublicMediaStorage'\n PRIVATE_MEDIA_LOCATION = 'private'\n PRIVATE_FILE_STORAGE = 'bloggy.storage_backends.PrivateMediaStorage'\n AWS_S3_CUSTOM_DOMAIN = 'media.stacktips.com'\n STATIC_URL = '/static/'\n STATIC_ROOT = os.path.join(BASE_DIR, 'bloggy/static')\n MEDIA_URL = '/media/'\n MEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nTINYMCE_DEFAULT_CONFIG = {\n 'plugins': 'code',\n 'toolbar': 'code',\n}\nLOGIN_URL = 'login'\nLOGOUT_URL = 'logout'\nLOGIN_REDIRECT_URL = '/'\nLOGOUT_REDIRECT_URL = '/'\nAUTH_USER_MODEL = 'bloggy.User'\nAUTH_USER_DEFAULT_GROUP = 'bloggy-members'\nSUMMERNOTE_THEME = 'bs4'\nSUMMERNOTE_CONFIG = {\n 'iframe': True,\n 'summernote': {\n 'width': '1000',\n 'height': '720',\n 'styleTags': [\n 'p',\n {\n 'title': 'Blockquote',\n 'tag': 'blockquote',\n 'className': 'blockquote',\n 'value': 'blockquote'\n },\n {\n 'title': 'Code Block',\n 'tag': 'pre',\n 'className': 'prettyprint lang-java',\n 'value': 'pre'\n },\n 'h1', 'h2', 'h3', 'h4', 'h5', 'h6'\n ],\n\n 'airMode': False,\n 'toolbar': [\n ['style', ['style']],\n ['font', ['bold', 'underline', 'clear']],\n ['color', ['color']],\n ['para', ['ul', 'ol', 'paragraph']],\n ['table', ['table']],\n ['insert', ['link', 'picture', 'code']],\n ['view', ['fullscreen', 'codeview', 'help']],\n ],\n },\n\n 'codemirror': {\n 'mode': 'htmlmixed',\n 'lineNumbers': 'true',\n 'theme': 'monokai',\n },\n\n 'css': (\n '//cdnjs.cloudflare.com/ajax/libs/codemirror/5.29.0/theme/monokai.min.css',\n ),\n 'attachment_require_authentication': True,\n 'attachment_upload_to': 'uploads/summernote',\n 'attachment_model': 'bloggy.Media',\n 'attachment_absolute_uri': False\n\n}\nMESSAGE_STORAGE = \"django.contrib.messages.storage.cookie.CookieStorage\"\nSITE_TITLE = os.getenv(\"SITE_TITLE\", \"Bloggy\")\nSITE_TAGLINE = os.getenv(\"SITE_TAGLINE\", \"A perfectly crafted blog that developers love.\")\nSITE_DESCRIPTION = os.getenv(\"SITE_DESCRIPTION\")\nSITE_LOGO = os.getenv(\"SITE_LOGO\")\nASSETS_DOMAIN = os.getenv(\"ASSETS_DOMAIN\")\nGOOGLE_RECAPTHCA_SECRET_KEY = os.getenv('GOOGLE_RECAPTHCA_SECRET_KEY')\nGOOGLE_RECAPTHCA_TOKEN_VERIFY_URL = 'https://www.google.com/recaptcha/api/siteverify'\nREST_FRAMEWORK = {\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n ),\n 'DEFAULT_PAGINATION_CLASS': 'bloggy_api.pagination.CustomPaginatedResponse',\n 'PAGE_SIZE': 30,\n\n 'EXCEPTION_HANDLER': 'rest_framework.views.exception_handler',\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.SessionAuthentication'\n ]\n}\nCACHE_TTL = 60 * 15\nCACHE_MIDDLEWARE_ALIAS = 'default' # which cache alias to use\nCACHE_MIDDLEWARE_SECONDS = CACHE_TTL # number of seconds to cache a page for (TTL)\nCACHE_MIDDLEWARE_KEY_PREFIX = '' # should be used if the cache is shared across multiple sites that use the same\nENABLE_CACHING = os.getenv(\"ENABLE_CACHING\", \"False\") == \"True\"\n CACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',\n 'LOCATION': os.getenv(\"MEMCACHIER_SERVERS\", \"127.0.0.1:11211\"),\n \"OPTIONS\": {\n \"binary\": True,\n # 'username': os.getenv(\"MEMCACHIER_USERNAME\", \"\"),\n # 'password': os.getenv(\"MEMCACHIER_PASSWORD\", \"\"),\n \"behaviors\": {\n \"ketama\": True,\n },\n },\n }\n }\n CACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.dummy.DummyCache',\n }\n }\nHITCOUNT_KEEP_HIT_ACTIVE = {'days': 0}\nHITCOUNT_KEEP_HIT_IN_DATABASE = {'days': 77}\nHITCOUNT_HITS_PER_IP_LIMIT = 0\nSHORTCODES_YOUTUBE_JQUERY = False\nPING_INDEX_NOW_POST_UPDATE = os.getenv(\"PING_INDEX_NOW_POST_UPDATE\", \"True\")\nPING_GOOGLE_POST_UPDATE = os.getenv(\"PING_GOOGLE_POST_UPDATE\", \"True\")\nINDEX_NOW_API_KEY = os.getenv(\"INDEX_NOW_API_KEY\", )\nEMAIL_BACKEND = os.getenv('EMAIL_BACKEND')\nEMAIL_HOST = os.getenv('EMAIL_HOST')\nEMAIL_PORT = os.getenv('EMAIL_PORT')\nEMAIL_HOST_USER = os.getenv('EMAIL_HOST_USER')\nEMAIL_HOST_PASSWORD = os.getenv('EMAIL_HOST_PASSWORD')\nEMAIL_USE_TLS = os.getenv('EMAIL_USE_TLS', \"True\")\nDEFAULT_FROM_EMAIL = os.getenv('DEFAULT_FROM_EMAIL')\nEMAIL_FILE_PATH = os.getenv('EMAIL_FILE_PATH', os.path.join(BASE_DIR, 'test-emails'))\nPOST_TYPE_CHOICES = os.getenv('POST_TYPE_CHOICES')\nSHOW_EMTPY_CATEGORIES = os.getenv(\"SHOW_EMTPY_CATEGORIES\", \"False\") == \"True\"\nLOAD_GOOGLE_TAG_MANAGER = os.getenv(\"LOAD_GOOGLE_TAG_MANAGER\", \"False\") == \"True\"\nLOAD_GOOGLE_ADS = os.getenv(\"LOAD_GOOGLE_ADS\", \"False\") == \"True\"\nMY_ADS_TXT_CONTENT = os.getenv('MY_ADS_TXT_CONTENT')\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n },\n },\n \"root\": {\n \"handlers\": [\"console\"],\n \"level\": \"DEBUG\",\n },\n \"loggers\": {\n \"django\": {\n \"handlers\": [\"console\"],\n \"level\": os.getenv(\"DJANGO_LOG_LEVEL\", \"INFO\"),\n \"propagate\": False,\n },\n },\n}\ndef get_post_types():" }, { "identifier": "EditProfileView", "path": "bloggy/views/edit_profile_view.py", "snippet": "class EditProfileView(FormView):\n template_name = \"profile/edit_profile.html\"\n model = User\n form_class = EditProfileForm\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['meta_title'] = \"Update Profile\"\n context[\n 'meta_description'] = f\"Update my profile. You need a {settings.SITE_TITLE} account to sign in and view your profile.\"\n context['meta_image'] = static('static/media/logo.png')\n return context\n\n def get_initial(self):\n initial = super().get_initial()\n username = self.request.user.username\n user = get_object_or_404(User, username=username)\n\n # update initial field defaults with custom set default values:\n initial.update({\n 'profile_photo': user.profile_photo,\n 'username': user.username,\n 'name': user.name,\n 'bio': user.bio,\n 'website': user.website,\n 'linkedin': user.linkedin,\n 'twitter': user.twitter,\n 'youtube': user.youtube,\n 'github': user.github,\n })\n\n return initial\n\n def get_success_url(self):\n return self.request.get_full_path()\n\n def form_valid(self, form):\n # This method is called when valid form data has been POSTed.\n # It should return an HttpResponse.\n\n if self.request.FILES.get(\"profile_photo\", None) is not None:\n # file_path = self.save_media_file(self.request.FILES[\"profile_photo\"])\n User.objects.filter(username=self.request.user.username).update(\n profile_photo=self.request.FILES[\"profile_photo\"],\n name=form.cleaned_data[\"name\"],\n bio=form.cleaned_data[\"bio\"],\n website=sanitize_url(form.cleaned_data[\"website\"]),\n twitter=sanitize_url(form.cleaned_data[\"twitter\"]),\n youtube=sanitize_url(form.cleaned_data[\"youtube\"]),\n linkedin=sanitize_url(form.cleaned_data[\"linkedin\"]),\n github=sanitize_url(form.cleaned_data[\"github\"])\n )\n else:\n User.objects.filter(username=self.request.user.username).update(\n name=form.cleaned_data[\"name\"],\n bio=form.cleaned_data[\"bio\"],\n website=sanitize_url(form.cleaned_data[\"website\"]),\n twitter=sanitize_url(form.cleaned_data[\"twitter\"]),\n youtube=sanitize_url(form.cleaned_data[\"youtube\"]),\n linkedin=sanitize_url(form.cleaned_data[\"linkedin\"]),\n github=sanitize_url(form.cleaned_data[\"github\"])\n )\n\n return super().form_valid(form)\n\n def save_media_file(self, image):\n # This will generate random folder for saving your image using UUID\n media_path = f'uploads/user/{self.request.user.username}/{image.name}'\n file_path = f'media/{media_path}'\n\n if not os.path.exists(file_path):\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n\n # Create image save path with title\n with open(file_path, \"wb+\") as f:\n for chunk in image.chunks():\n f.write(chunk)\n\n return media_path" }, { "identifier": "CoursesListView", "path": "bloggy/views/courses_view.py", "snippet": "class CoursesListView(TemplateView):\n model = Course\n template_name = \"pages/archive/courses.html\"\n paginate_by = DEFAULT_PAGE_SIZE\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n courses = Course.objects.filter(publish_status=\"LIVE\").order_by(\"-display_order\")\n paginator = Paginator(courses, self.paginate_by)\n page = self.request.GET.get('page')\n\n try:\n courses = paginator.page(page)\n except PageNotAnInteger:\n courses = paginator.page(1)\n except EmptyPage:\n courses = paginator.page(paginator.num_pages)\n\n context['courses'] = courses\n return context" }, { "identifier": "CourseDetailsView", "path": "bloggy/views/courses_view.py", "snippet": "class CourseDetailsView(HitCountDetailView):\n model = Course\n template_name = \"pages/single/course.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n set_seo_settings(post=self.object, context=context)\n return context" }, { "identifier": "LessonDetailsView", "path": "bloggy/views/courses_view.py", "snippet": "class LessonDetailsView(TemplateView):\n template_name = \"pages/single/lesson.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n guide_slug = context[\"course\"]\n post = Post.objects.filter(course__slug=guide_slug).filter(slug=context[\"slug\"]).order_by(\n \"display_order\").first()\n if not post:\n raise Http404\n\n context[\"post\"] = post\n course = post.course\n context[\"course\"] = course\n set_seo_settings(post=course, context=context)\n return context" }, { "identifier": "IndexView", "path": "bloggy/views/pages.py", "snippet": "class IndexView(TemplateView):\n template_name = \"pages/home.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['posts'] = Post.objects.prefetch_related(\"category\").filter(publish_status=\"LIVE\").order_by(\n \"-published_date\")[:12]\n context['courses'] = Course.objects.filter(publish_status=\"LIVE\").all()[:6]\n return context" }, { "identifier": "CategoriesView", "path": "bloggy/views/category_view.py", "snippet": "class CategoriesView(TemplateView):\n template_name = \"pages/archive/categories.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n if settings.SHOW_EMTPY_CATEGORIES:\n categories = Category.objects.order_by(\"-article_count\").all()\n else:\n categories = Category.objects.filter(article_count__gt=0).order_by(\"-article_count\").all()\n\n logger.debug('Loading categories: %s', categories)\n context['categories'] = categories\n\n return context" }, { "identifier": "CategoryDetailsView", "path": "bloggy/views/category_view.py", "snippet": "class CategoryDetailsView(ListView):\n model = Post\n template_name = \"pages/archive/posts.html\"\n paginate_by = 20\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n category_param = self.kwargs['slug']\n\n try:\n category = Category.objects.get(slug=category_param)\n context['selected_category'] = category\n except Category.DoesNotExist:\n raise Http404\n\n posts = Post.objects.filter(category__slug__in=[category_param], publish_status=\"LIVE\").order_by(\n \"-published_date\")\n paginator = Paginator(posts, self.paginate_by)\n page = self.request.GET.get('page')\n\n try:\n posts = paginator.page(page)\n except PageNotAnInteger:\n posts = paginator.page(1)\n except EmptyPage:\n posts = paginator.page(paginator.num_pages)\n\n context['posts'] = posts\n context['categories'] = Category.objects.filter(article_count__gt=0).order_by(\"-article_count\").all()\n\n set_seo_settings(post=category, context=context)\n return context" }, { "identifier": "sitemaps_list", "path": "bloggy/services/sitemaps.py", "snippet": "class StaticPagesSitemap(sitemaps.Sitemap):\n def items(self):\n def location(self, item):" }, { "identifier": "RegisterView", "path": "bloggy/views/register.py", "snippet": "class RegisterView(View):\n\n def get(self, request):\n return render(request, 'auth/register.html', {'form': SignUpForm()})\n\n def post(self, request):\n form = SignUpForm(request.POST)\n if form.is_valid():\n user = form.save()\n\n verification_token = create_token(user, token_type=\"signup\")\n email_service.email_registration_token(request, user, verification_token)\n return redirect(reverse('login'))\n\n return render(request, 'auth/register.html', {'form': form})" }, { "identifier": "AccountActivationView", "path": "bloggy/views/account.py", "snippet": "class AccountActivationView(View):\n def get(self, request, uuid, token):\n\n verification_token = get_token(uuid, token, token_type=\"signup\")\n if is_token_expired(verification_token):\n messages.error(request, \"The verification link is expired or malformed.\")\n return redirect('index')\n\n # activate user\n user = User.objects.get(email=verification_token.user.email)\n user.is_active = True\n user.is_staff = False\n group = Group.objects.get_or_create(name=settings.AUTH_USER_DEFAULT_GROUP)\n user.groups.add(group[0].id)\n user.save()\n\n # delete token as it\n verification_token.delete()\n\n messages.success(request, \"You're all set! Your account is now active and ready to use.\")\n return redirect('login')" }, { "identifier": "PostListView", "path": "bloggy/views/posts.py", "snippet": "class PostListView(ListView):\n model = Post\n template_name = \"pages/archive/posts.html\"\n paginate_by = 20\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['posts'] = get_recent_feed(page=self.request.GET.get('page'))\n context['courses'] = Course.objects.filter(publish_status=\"LIVE\").all()[:2]\n context['categories'] = (Category.objects.filter(article_count__gt=0)\n .order_by(\"-article_count\").all())\n return context" }, { "identifier": "PostDetailsView", "path": "bloggy/views/posts.py", "snippet": "class PostDetailsView(HitCountDetailView):\n model = Post\n count_hit = True\n\n def get_template_names(self):\n if self.template_name:\n return f\"pages/single/{self.object.post_type}-{self.template_name}.html\"\n\n return f\"pages/single/{self.object.post_type}.html\"\n\n def dispatch(self, request, *args, **kwargs):\n return super().dispatch(request, *args, **kwargs)\n\n def get_client_ip(self):\n x_forwarded_for = self.request.META.get('HTTP_X_FORWARDED_FOR')\n if x_forwarded_for:\n ip = x_forwarded_for.split(',')[0]\n else:\n ip = self.request.META.get('REMOTE_ADDR')\n return ip\n\n def get_context_data(self, **kwargs):\n\n # check if article is published? if live no issues.\n if self.object.publish_status == \"DRAFT\":\n logged_user = self.request.user\n\n # If not live, check for the context parameter and the user login status\n # If user is the owner of the post or user is an admin, can preview the post\n if not logged_user:\n raise HttpResponse('Unauthorized', status=401)\n if not (logged_user.username.__eq__(self.object.author.username) or logged_user.is_superuser):\n raise HttpResponse('Unauthorized', status=401)\n\n context = super().get_context_data(**kwargs)\n set_seo_settings(post=self.object, context=context)\n return context" }, { "identifier": "MyLoginView", "path": "bloggy/views/login.py", "snippet": "class MyLoginView(LoginView):\n\n def get_success_url(self):\n redirect_url = self.request.GET.get('next')\n if redirect_url:\n return redirect_url\n\n return reverse('index')" }, { "identifier": "AdsTextView", "path": "bloggy/views/pages.py", "snippet": "class AdsTextView(View):\n def get(self, request, *args, **kwargs):\n return HttpResponse(settings.MY_ADS_TXT_CONTENT, content_type='text/plain')" }, { "identifier": "robots", "path": "bloggy/views/pages.py", "snippet": "@cache_page(60 * 60 * 24)\ndef robots(request):\n \"\"\"\n generates robots.txt, which pretty much does not change\n \"\"\"\n domain = settings.SITE_URL\n\n data = f\"\"\"User-agent: *\nDisallow: /admin/\nDisallow: /media/\nDisallow: /static/\nDisallow: /api/\n\nSitemap: {domain}/sitemap.xml\n\"\"\"\n\n return HttpResponse(data, content_type='text/plain')" }, { "identifier": "PageDetailsView", "path": "bloggy/views/pages.py", "snippet": "class PageDetailsView(TemplateView):\n template_name = \"pages/page.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n url = context[\"url\"]\n page = Page.objects.filter(url=url).filter(publish_status=\"LIVE\").first()\n if page:\n context[\"page\"] = page\n set_seo_settings(post=page, context=context)\n return context\n raise Http404" }, { "identifier": "QuizListView", "path": "bloggy/views/quizzes_view.py", "snippet": "class QuizListView(ListView):\n model = Quiz\n template_name = \"pages/archive/quizzes.html\"\n paginate_by = DEFAULT_PAGE_SIZE\n\n def get_context_data(self, **kwargs):\n context = super(QuizListView, self).get_context_data(**kwargs)\n context['quizzes'] = get_recent_quizzes()\n return context" }, { "identifier": "QuizDetailView", "path": "bloggy/views/quizzes_view.py", "snippet": "class QuizDetailView(HitCountDetailView):\n model = Quiz\n template_name = \"pages/single/quiz.html\"\n\n def dispatch(self, request, *args, **kwargs):\n return super().dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n set_seo_settings(post=self.object, context=context)\n return context" }, { "identifier": "PostsRssFeed", "path": "bloggy/views/rss.py", "snippet": "class PostsRssFeed(BaseRssFeedView):\n title = f'Posts from {settings.SITE_TITLE}'\n link = \"/articles\"\n\n def item_enclosure_url(self, item):\n thumbnail = static('static/media/default-banner.png')\n if item.thumbnail:\n thumbnail = settings.ASSETS_DOMAIN + item.thumbnail.url\n return thumbnail\n\n def items(self):\n return Post.objects.filter(publish_status=\"LIVE\").order_by('-published_date')[:30]\n\n def item_description(self, item):\n content = (f\"{item.content}\\n<small>Originally published at \"\n f\"<a href='{settings.SITE_URL + item.get_absolute_url()}' \"\n f\"target='_blank'>{settings.SITE_URL}</a></small>\")\n\n thumbnail = static('static/media/default-banner.png')\n if item.thumbnail:\n thumbnail = settings.ASSETS_DOMAIN + item.thumbnail.url\n return f'{content}<img src=\"{thumbnail}\" alt=\"{item.title}\" style=\"display:none;\">'\n\n def item_author_name(self, item):\n author = item.author\n return author.username if author else None\n\n def item_author_link(self, item):\n author = item.author\n return settings.SITE_URL + item.get_absolute_url() if author else \"/\"" }, { "identifier": "CoursesRssFeed", "path": "bloggy/views/rss.py", "snippet": "class CoursesRssFeed(BaseRssFeedView):\n title = \"Courses\"\n link = \"/courses\"\n\n def items(self):\n return Course.objects.filter(publish_status=\"LIVE\").order_by('-published_date')[:30]\n\n def item_description(self, item):\n content = f\"{item.excerpt}\\n<small>Take the free course from <a href='{settings.SITE_URL + item.get_absolute_url()}' target='_blank'>{settings.SITE_URL}</a></small>\"\n return content\n\n def item_categories(self, obj):\n return []" }, { "identifier": "SearchListView", "path": "bloggy/views/search.py", "snippet": "class SearchListView(ListView):\n model = Post\n template_name = \"pages/search_result.html\"\n paginate_by = DEFAULT_PAGE_SIZE\n\n def get_context_data(self, **kwargs):\n search_query = self.request.GET.get(\"q\")\n context = super().get_context_data(**kwargs)\n\n if StringUtils.is_not_blank(search_query):\n categories = Category.objects.filter(slug__icontains=search_query)[:5]\n results = chain(\n Post.objects.filter(title__icontains=search_query, excerpt__icontains=search_query, publish_status=\"LIVE\"),\n )\n\n context['posts'] = results\n context['categories'] = categories\n context['search_query'] = search_query\n context['meta_title'] = f\"Search result for {search_query}\"\n context['meta_description'] = \"Search articles\"\n\n return context" }, { "identifier": "MyProfileView", "path": "bloggy/views/user.py", "snippet": "class MyProfileView(DetailView):\n # template_name = \"pages/user.html\"\n template_name = \"profile/user_dashboard.html\"\n\n def get_object(self, **kwargs):\n username = self.request.user # self.kwargs.get(\"username\")\n return get_object_or_404(User, username=username)\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n user = self.get_object()\n\n posts = user.posts.order_by(\"-published_date\").filter(publish_status=\"LIVE\")\n paginator = Paginator(posts, DEFAULT_PAGE_SIZE)\n page = self.request.GET.get('page')\n try:\n posts = paginator.page(page)\n except PageNotAnInteger:\n posts = paginator.page(1)\n except EmptyPage:\n posts = paginator.page(paginator.num_pages)\n\n context.update({\n 'posts': posts,\n 'userProfile': user,\n 'userType': \"self\",\n })\n\n context['meta_title'] = \"My Profile\"\n context[\n 'meta_description'] = f'My profile. Access your {settings.SITE_TITLE} profile, account settings My Profile.'\n if user.profile_photo:\n context['meta_image'] = settings.SITE_LOGO\n\n return context" }, { "identifier": "PublicProfileView", "path": "bloggy/views/user.py", "snippet": "class PublicProfileView(SingleObjectMixin, View):\n template_name = \"pages/user.html\"\n\n def get_object(self, **kwargs):\n username = self.kwargs.get(\"username\")\n return get_object_or_404(User, username=username)\n\n def get(self, request, *args, **kwargs):\n username = kwargs.get(\"username\")\n if username == 'siteadmin' or username == 'admin' or username == 'superadmin' or username == 'wp-admin':\n raise Http404\n\n self.object = self.get_object()\n context = self.get_context_data(object=self.object)\n posts = self.object.posts.filter(publish_status=\"LIVE\").order_by(\"-published_date\")\n\n paginator = Paginator(posts, DEFAULT_PAGE_SIZE)\n page = self.request.GET.get('page')\n try:\n posts = paginator.page(page)\n except PageNotAnInteger:\n posts = paginator.page(1)\n except EmptyPage:\n posts = paginator.page(paginator.num_pages)\n\n context['meta_title'] = self.object.get_full_name()\n description = f\"{settings.SITE_TITLE} Author. {self.object.get_full_name()}. {self.object.bio}\"\n context['meta_description'] = strip_tags(description)\n context['meta_image'] = self.object.get_avatar()\n\n context.update({\n 'posts': posts,\n 'user': self.object\n })\n\n return render(request, self.template_name, context)" }, { "identifier": "AuthorsListView", "path": "bloggy/views/user.py", "snippet": "class AuthorsListView(TemplateView):\n template_name = \"pages/authors.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n authors = User.objects.filter(is_active=True).filter(is_staff=True).exclude(\n username__in=[\"siteadmin\", \"superadmin\", \"admin\"]).all()\n context.update({\n \"authors\": authors\n })\n return context" }, { "identifier": "UserBookmarksView", "path": "bloggy/views/user_collections.py", "snippet": "class UserBookmarksView(TemplateView):\n template_name = \"profile/user_bookmarks.html\"\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n username = self.request.user.username\n user = get_object_or_404(User, username=username)\n\n articles = Post.objects.raw('''\n select a.id as id, a.title as title, a.slug as slug, a.publish_status as publish_status, a.thumbnail as thumbnail, b.updated_date as bookmark_date from bloggy_article a JOIN bloggy_bookmarks b on a.id=b.post_id where b.user_id=%s and b.post_type=%s\n ''', ([user.id], \"article\"))\n\n context.update({\n 'articles': articles,\n 'userProfile': user,\n 'userType': \"self\",\n })\n return context" } ]
from django.conf.urls.static import static from django.contrib import admin from django.contrib.auth.decorators import login_required from django.contrib.auth.views import LogoutView from django.contrib.auth.views import PasswordChangeView from django.contrib.sitemaps.views import sitemap, index from django.contrib.staticfiles.urls import staticfiles_urlpatterns from django.urls import path, include from django.views.generic.base import TemplateView from bloggy import settings from bloggy.views import EditProfileView from bloggy.views.courses_view import CoursesListView, CourseDetailsView, LessonDetailsView from bloggy.views.pages import IndexView from bloggy.views.category_view import CategoriesView, CategoryDetailsView from .services.sitemaps import sitemaps_list from .views import RegisterView from .views.account import AccountActivationView from .views.posts import PostListView, PostDetailsView from .views.login import MyLoginView from .views.pages import AdsTextView, robots from .views.pages import PageDetailsView from .views.quizzes_view import QuizListView, QuizDetailView from .views.rss import PostsRssFeed, CoursesRssFeed from .views.search import SearchListView from .views.user import MyProfileView, PublicProfileView, AuthorsListView from .views.user_collections import UserBookmarksView
8,311
"""bloggy URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.1/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ urlpatterns = [ path('admin/', admin.site.urls), path('admin/password_change/', PasswordChangeView.as_view(), name='password_change'), path('', IndexView.as_view(), name='index'), path('articles', PostListView.as_view(), name='posts'), path('articles/<slug:slug>', PostDetailsView.as_view(), name='post_single'), path('topics', CategoriesView.as_view(), name='categories'), path('topics/<str:slug>', CategoryDetailsView.as_view(), name='categories_single'), path('search', SearchListView.as_view(), name='search'), path('courses', CoursesListView.as_view(), name='courses'), path('courses/<slug:slug>', CourseDetailsView.as_view(), name='courses_single'), path('courses/<str:course>/<slug:slug>', LessonDetailsView.as_view(), name='lesson_single'), path('quizzes', QuizListView.as_view(), name='quizzes'), path('quizzes/<slug:slug>', QuizDetailView.as_view(), name='quiz_single'), path('login', MyLoginView.as_view(template_name="auth/login.html"), name='login'), path('logout', LogoutView.as_view(), name='logout'), path('register', RegisterView.as_view(), name='register'), path('activate/<str:uuid>/<str:token>', AccountActivationView.as_view(), name='activate_account'), path('authors', AuthorsListView.as_view(), name="authors"), path('user/<str:username>', PublicProfileView.as_view(), name="user_profile"), path('edit-profile', login_required(EditProfileView.as_view()), name="profile.edit_profile"), # path('dashboard', login_required(MyProfileView.as_view()), name="profile.dashboard"), path('bookmarks', login_required(UserBookmarksView.as_view()), name="profile.bookmarks"), path('contact', TemplateView.as_view(template_name="pages/contact.html"), name='pages.contact'), path("rss/articles", PostsRssFeed(), name="articles_feed"), path("rss/courses", CoursesRssFeed(), name="courses_feed"), path('sitemap.xml', index, {'sitemaps': sitemaps_list}, name='django.contrib.sitemaps.views.index'), path('sitemap/<str:section>.xml', sitemap, {'sitemaps': sitemaps_list}, name='django.contrib.sitemaps.views.sitemap'), # static files for SEO or other reasons path('robots.txt', robots, name='robots'),
"""bloggy URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.1/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ urlpatterns = [ path('admin/', admin.site.urls), path('admin/password_change/', PasswordChangeView.as_view(), name='password_change'), path('', IndexView.as_view(), name='index'), path('articles', PostListView.as_view(), name='posts'), path('articles/<slug:slug>', PostDetailsView.as_view(), name='post_single'), path('topics', CategoriesView.as_view(), name='categories'), path('topics/<str:slug>', CategoryDetailsView.as_view(), name='categories_single'), path('search', SearchListView.as_view(), name='search'), path('courses', CoursesListView.as_view(), name='courses'), path('courses/<slug:slug>', CourseDetailsView.as_view(), name='courses_single'), path('courses/<str:course>/<slug:slug>', LessonDetailsView.as_view(), name='lesson_single'), path('quizzes', QuizListView.as_view(), name='quizzes'), path('quizzes/<slug:slug>', QuizDetailView.as_view(), name='quiz_single'), path('login', MyLoginView.as_view(template_name="auth/login.html"), name='login'), path('logout', LogoutView.as_view(), name='logout'), path('register', RegisterView.as_view(), name='register'), path('activate/<str:uuid>/<str:token>', AccountActivationView.as_view(), name='activate_account'), path('authors', AuthorsListView.as_view(), name="authors"), path('user/<str:username>', PublicProfileView.as_view(), name="user_profile"), path('edit-profile', login_required(EditProfileView.as_view()), name="profile.edit_profile"), # path('dashboard', login_required(MyProfileView.as_view()), name="profile.dashboard"), path('bookmarks', login_required(UserBookmarksView.as_view()), name="profile.bookmarks"), path('contact', TemplateView.as_view(template_name="pages/contact.html"), name='pages.contact'), path("rss/articles", PostsRssFeed(), name="articles_feed"), path("rss/courses", CoursesRssFeed(), name="courses_feed"), path('sitemap.xml', index, {'sitemaps': sitemaps_list}, name='django.contrib.sitemaps.views.index'), path('sitemap/<str:section>.xml', sitemap, {'sitemaps': sitemaps_list}, name='django.contrib.sitemaps.views.sitemap'), # static files for SEO or other reasons path('robots.txt', robots, name='robots'),
path('ads.txt', AdsTextView.as_view(), name='ads_txt'),
14
2023-10-17 14:50:39+00:00
12k
zabbix/python-zabbix-utils
.github/scripts/compatibility_api_test_6.py
[ { "identifier": "Getter", "path": "zabbix_utils/getter.py", "snippet": "class Getter():\n \"\"\"Zabbix get implementation.\n\n Args:\n host (str, optional): Zabbix agent address. Defaults to `'127.0.0.1'`.\n\n port (int, optional): Zabbix agent port. Defaults to `10050`.\n\n timeout (int, optional): Connection timeout value. Defaults to `10`.\n\n use_ipv6 (bool, optional): Specifying IPv6 use instead of IPv4. Defaults to `False`.\n\n source_ip (str, optional): IP from which to establish connection. Defaults to `None`.\n\n socket_wrapper (Callable, optional): Func(`conn`) to wrap socket. Defaults to `None`.\n \"\"\"\n\n def __init__(self, host: str = '127.0.0.1', port: int = 10050, timeout: int = 10,\n use_ipv6: bool = False, source_ip: Union[str, None] = None,\n socket_wrapper: Union[Callable, None] = None):\n self.host = host\n self.port = port\n self.timeout = timeout\n self.use_ipv6 = use_ipv6\n self.source_ip = source_ip\n\n self.socket_wrapper = socket_wrapper\n if self.socket_wrapper:\n if not isinstance(self.socket_wrapper, Callable):\n raise TypeError('Value \"socket_wrapper\" should be a function.')\n\n def __get_response(self, conn: socket) -> Union[str, None]:\n result = ZabbixProtocol.parse_packet(conn, log, ProcessingError)\n\n log.debug('Received data: %s', result)\n\n return result\n\n def get(self, key: str) -> Union[str, None]:\n \"\"\"Gets item value from Zabbix agent by specified key.\n\n Args:\n key (str): Zabbix item key.\n\n Returns:\n str: Value from Zabbix agent for specified key.\n \"\"\"\n\n packet = ZabbixProtocol.create_packet(key, log)\n\n try:\n if self.use_ipv6:\n connection = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)\n else:\n connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n except socket.error:\n raise ProcessingError(\n f\"Error creating socket for {self.host}:{self.port}\") from None\n\n connection.settimeout(self.timeout)\n\n if self.source_ip:\n connection.bind((self.source_ip, 0,))\n\n try:\n connection.connect((self.host, self.port))\n if self.socket_wrapper is not None:\n connection = self.socket_wrapper(connection)\n connection.sendall(packet)\n except (TimeoutError, socket.timeout) as err:\n log.error(\n 'The connection to %s timed out after %d seconds',\n f\"{self.host}:{self.port}\",\n self.timeout\n )\n connection.close()\n raise err\n except (ConnectionRefusedError, socket.gaierror) as err:\n log.error(\n 'An error occurred while trying to connect to %s: %s',\n f\"{self.host}:{self.port}\",\n getattr(err, 'msg', str(err))\n )\n connection.close()\n raise err\n except (OSError, socket.error) as err:\n log.warning(\n 'An error occurred while trying to send to %s: %s',\n f\"{self.host}:{self.port}\",\n getattr(err, 'msg', str(err))\n )\n connection.close()\n raise err\n\n try:\n response = self.__get_response(connection)\n except ConnectionResetError as err:\n log.debug('Get value error: %s', err)\n log.warning('Check access restrictions in Zabbix agent configuration.')\n raise err\n log.debug('Response from [%s:%s]: %s', self.host, self.port, response)\n\n try:\n connection.close()\n except socket.error:\n pass\n\n return response" }, { "identifier": "APIRequestError", "path": "zabbix_utils/exceptions.py", "snippet": "class APIRequestError(ModuleBaseException):\n \"\"\"Exception class when Zabbix API returns error by request.\n\n Args:\n api_error (Union[str, dict]): Raw error message from Zabbix API.\n \"\"\"\n def __init__(self, api_error: Union[str, dict]):\n if isinstance(api_error, dict):\n api_error['body'] = ModuleUtils.hide_private(api_error['body'])\n super().__init__(\"{message} {data}\".format(**api_error))\n for key, value in api_error.items():\n setattr(self, key, value)\n else:\n super().__init__(api_error)" }, { "identifier": "ZabbixAPI", "path": "zabbix_utils/api.py", "snippet": "class ZabbixAPI():\n \"\"\"Provide interface for working with Zabbix API.\n\n Args:\n url (str, optional): Zabbix API URL. Defaults to `http://localhost/zabbix/api_jsonrpc.php`.\n token (str, optional): Zabbix API token. Defaults to `None`.\n user (str, optional): Zabbix API username. Defaults to `None`.\n password (str, optional): Zabbix API user's password. Defaults to `None`.\n http_user (str, optional): Basic Authentication username. Defaults to `None`.\n http_password (str, optional): Basic Authentication password. Defaults to `None`.\n skip_version_check (bool, optional): Skip version compatibility check. Defaults to `False`.\n validate_certs (bool, optional): Specifying certificate validation. Defaults to `True`.\n timeout (int, optional): Connection timeout to Zabbix API. Defaults to `30`.\n \"\"\"\n\n __version = None\n __use_token = False\n __session_id = None\n __basic_cred = None\n\n def __init__(self, url: Union[str, None] = None, token: Union[str, None] = None,\n user: Union[str, None] = None, password: Union[str, None] = None,\n http_user: Union[str, None] = None, http_password: Union[str, None] = None,\n skip_version_check: bool = False, validate_certs: bool = True, timeout: int = 30):\n\n url = url or env.get('ZABBIX_URL') or 'http://localhost/zabbix/api_jsonrpc.php'\n user = user or env.get('ZABBIX_USER') or None\n password = password or env.get('ZABBIX_PASSWORD') or None\n token = token or env.get('ZABBIX_TOKEN') or None\n\n self.url = ModuleUtils.check_url(url)\n self.validate_certs = validate_certs\n self.timeout = timeout\n\n if http_user and http_password:\n self.__basic_auth(http_user, http_password)\n\n self.__check_version(skip_version_check)\n\n if token or user or password:\n self.login(token, user, password)\n\n def __getattr__(self, name: str) -> Callable:\n \"\"\"Dynamic creation of an API object.\n\n Args:\n name (str): Zabbix API method name.\n\n Returns:\n APIObject: Zabbix API object instance.\n \"\"\"\n\n return APIObject(name, self)\n\n def __enter__(self) -> Self:\n return self\n\n def __exit__(self, *args) -> None:\n self.logout()\n\n def __basic_auth(self, user: str, password: str) -> Self:\n \"\"\"Enable Basic Authentication using.\n\n Args:\n user (str): Basic Authentication username.\n password (str): Basic Authentication password.\n \"\"\"\n\n log.debug(\n \"Enable Basic Authentication with username:%s password:%s\",\n user,\n ModuleUtils.HIDING_MASK\n )\n\n self.__basic_cred = base64.b64encode(\n f\"{user}:{password}\".encode()\n ).decode()\n\n def api_version(self) -> APIVersion:\n \"\"\"Return object of Zabbix API version.\n\n Returns:\n APIVersion: Object of Zabbix API version\n \"\"\"\n\n if self.__version is None:\n self.__version = APIVersion(self.apiinfo.version())\n return self.__version\n\n @property\n def version(self) -> APIVersion:\n \"\"\"Return object of Zabbix API version.\n\n Returns:\n APIVersion: Object of Zabbix API version.\n \"\"\"\n\n return self.api_version()\n\n def login(self, token: Union[str, None] = None, user: Union[str, None] = None,\n password: Union[str, None] = None) -> Self:\n \"\"\"Login to Zabbix API.\n\n Args:\n token (str, optional): Zabbix API token. Defaults to `None`.\n user (str, optional): Zabbix API username. Defaults to `None`.\n password (str, optional): Zabbix API user's password. Defaults to `None`.\n \"\"\"\n\n if token:\n if self.version < 5.4:\n raise APINotSupported(\n message=\"Token usage\",\n version=self.version\n )\n if user or password:\n raise ProcessingError(\n \"Token cannot be used with username and password\")\n self.__use_token = True\n self.__session_id = token\n return\n\n if not user:\n raise ProcessingError(\"Username is missing\")\n if not password:\n raise ProcessingError(\"User password is missing\")\n\n if self.version < 5.4:\n user_cred = {\n \"user\": user,\n \"password\": password\n }\n else:\n user_cred = {\n \"username\": user,\n \"password\": password\n }\n\n log.debug(\n \"Login to Zabbix API using username:%s password:%s\", user, ModuleUtils.HIDING_MASK\n )\n self.__use_token = False\n self.__session_id = self.user.login(**user_cred)\n\n log.debug(\"Connected to Zabbix API version %s: %s\", self.version, self.url)\n\n def logout(self) -> None:\n \"\"\"Logout from Zabbix API.\"\"\"\n\n if self.__session_id:\n if self.__use_token:\n self.__session_id = None\n self.__use_token = False\n return\n\n log.debug(\"Logout from Zabbix API\")\n self.user.logout()\n self.__session_id = None\n else:\n log.debug(\"You're not logged in Zabbix API\")\n\n def check_auth(self) -> bool:\n \"\"\"Check authentication status in Zabbix API.\n\n Returns:\n bool: User authentication status (`True`, `False`)\n \"\"\"\n\n if not self.__session_id:\n log.debug(\"You're not logged in Zabbix API\")\n return False\n\n if self.__use_token:\n log.debug(\"Check auth session using token in Zabbix API\")\n refresh_resp = self.user.checkAuthentication(token=self.__session_id)\n else:\n log.debug(\"Check auth session using sessionid in Zabbix API\")\n refresh_resp = self.user.checkAuthentication(sessionid=self.__session_id)\n\n return bool(refresh_resp.get('userid'))\n\n def send_api_request(self, method: str, params: Union[dict, None] = None,\n need_auth=True) -> dict:\n \"\"\"Function for sending request to Zabbix API.\n\n Args:\n method (str): Zabbix API method name.\n params (dict, optional): Params for request body. Defaults to `None`.\n need_auth (bool, optional): Authorization using flag. Defaults to `False`.\n\n Raises:\n ProcessingError: Wrapping built-in exceptions during request processing.\n APIRequestError: Wrapping errors from Zabbix API.\n\n Returns:\n dict: Dictionary with Zabbix API response.\n \"\"\"\n\n request_json = {\n 'jsonrpc': '2.0',\n 'method': method,\n 'params': params or {},\n 'id': str(uuid4()),\n }\n\n headers = {\n 'Accept': 'application/json',\n 'Content-Type': 'application/json-rpc',\n 'User-Agent': f\"{__name__}/{__version__}\"\n }\n\n if need_auth:\n if not self.__session_id:\n raise ProcessingError(\"You're not logged in Zabbix API\")\n if self.version < 6.4 or self.__basic_cred is not None:\n request_json['auth'] = self.__session_id\n else:\n headers[\"Authorization\"] = f\"Bearer {self.__session_id}\"\n\n if self.__basic_cred is not None:\n headers[\"Authorization\"] = f\"Basic {self.__basic_cred}\"\n\n log.debug(\n \"Sending request to %s with body: %s\",\n self.url,\n request_json\n )\n\n req = ul.Request(\n self.url,\n data=json.dumps(request_json).encode(\"utf-8\"),\n headers=headers,\n method='POST'\n )\n req.timeout = self.timeout\n\n # Disable SSL certificate validation if needed.\n if not self.validate_certs:\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n else:\n ctx = None\n\n try:\n resp = ul.urlopen(req, context=ctx)\n resp_json = json.loads(resp.read().decode('utf-8'))\n except URLError as err:\n raise ProcessingError(f\"Unable to connect to {self.url}:\", err) from None\n except ValueError as err:\n raise ProcessingError(\"Unable to parse json:\", err) from None\n\n if method not in ModuleUtils.FILES_METHODS:\n log.debug(\n \"Received response body: %s\",\n resp_json\n )\n else:\n debug_json = resp_json.copy()\n if debug_json.get('result'):\n debug_json['result'] = shorten(debug_json['result'], 200, placeholder='...')\n log.debug(\n \"Received response body (clipped): %s\",\n json.dumps(debug_json, indent=4, separators=(',', ': '))\n )\n\n if 'error' in resp_json:\n err = resp_json['error'].copy()\n err['body'] = request_json.copy()\n raise APIRequestError(err)\n\n return resp_json\n\n def __check_version(self, skip_check: bool) -> None:\n\n skip_check_help = \"If you're sure zabbix_utils will work properly with your current \\\nZabbix version you can skip this check by \\\nspecifying skip_version_check=True when create ZabbixAPI object.\"\n if self.version < __min_supported__:\n if skip_check:\n log.debug(\n \"Version of Zabbix API [%s] is less than the library supports. %s\",\n self.version,\n \"Further library use at your own risk!\"\n )\n else:\n raise APINotSupported(\n f\"Version of Zabbix API [{self.version}] is not supported by the library. \" +\n f\"The oldest supported version is {__min_supported__}.0. \" + skip_check_help\n )\n\n if self.version > __max_supported__:\n if skip_check:\n log.debug(\n \"Version of Zabbix API [%s] is more than the library was tested on. %s\",\n self.version,\n \"Recommended to update the library. Further library use at your own risk!\"\n )\n else:\n raise APINotSupported(\n f\"Version of Zabbix API [{self.version}] was not tested with the library. \" +\n f\"The latest tested version is {__max_supported__}.0. \" + skip_check_help\n )" }, { "identifier": "APIVersion", "path": "zabbix_utils/api.py", "snippet": "class APIVersion():\n \"\"\"Zabbix API version object.\n\n Args:\n apiver (str): Raw version in string format.\n \"\"\"\n\n def __init__(self, apiver: str):\n self.__raw = apiver\n self.__first, self.__second, self.__third = self.__parse_version(self.__raw)\n\n def __getitem__(self, index: int) -> Any:\n # Get a symbol from the raw version string by index\n # For compatibility with using Zabbix version as a string\n return self.__raw[index]\n\n def is_lts(self) -> bool:\n \"\"\"Check if the current version is LTS.\n\n Returns:\n bool: `True` if the current version is LTS.\n \"\"\"\n\n return self.__second == 0\n\n @property\n def major(self) -> float:\n \"\"\"Get major version number.\n\n Returns:\n float: A major version number.\n \"\"\"\n\n return float(f\"{self.__first}.{self.__second}\")\n\n @property\n def minor(self) -> int:\n \"\"\"Get minor version number.\n\n Returns:\n int: A minor version number.\n \"\"\"\n\n return self.__third\n\n def __parse_version(self, ver: str) -> List[Any]:\n # Parse the version string into a list of integers.\n match = re.fullmatch(r'(\\d+)\\.(\\d+)\\.(\\d+)', ver)\n if match is None:\n raise ValueError(\n f\"Unable to parse version of Zabbix API: {ver}. \" +\n f\"Default '{__max_supported__}.0' format is expected.\"\n ) from None\n return list(map(int, match.groups()))\n\n def __str__(self) -> str:\n return self.__raw\n\n def __repr__(self) -> str:\n return self.__raw\n\n def __eq__(self, other: Union[float, str]) -> bool:\n if isinstance(other, float):\n return self.major == other\n if isinstance(other, str):\n return [self.__first, self.__second, self.__third] == self.__parse_version(other)\n raise TypeError(\n f\"'==' not supported between instances of '{type(self).__name__}' and \\\n'{type(other).__name__}', only 'float' or 'str' is expected\"\n )\n\n def __gt__(self, other: Union[float, str]) -> bool:\n if isinstance(other, float):\n return self.major > other\n if isinstance(other, str):\n return [self.__first, self.__second, self.__third] > self.__parse_version(other)\n raise TypeError(\n f\"'>' not supported between instances of '{type(self).__name__}' and \\\n'{type(other).__name__}', only 'float' or 'str' is expected\"\n )\n\n def __lt__(self, other: Union[float, str]) -> bool:\n if isinstance(other, float):\n return self.major < other\n if isinstance(other, str):\n return [self.__first, self.__second, self.__third] < self.__parse_version(other)\n raise TypeError(\n f\"'<' not supported between instances of '{type(self).__name__}' and \\\n'{type(other).__name__}', only 'float' or 'str' is expected\"\n )\n\n def __ne__(self, other: Any) -> bool:\n return not self.__eq__(other)\n\n def __ge__(self, other: Any) -> bool:\n return not self.__lt__(other)\n\n def __le__(self, other: Any) -> bool:\n return not self.__gt__(other)" }, { "identifier": "ItemValue", "path": "zabbix_utils/sender.py", "snippet": "class ItemValue():\n \"\"\"Contains data of a single item value.\n\n Args:\n host (str): Specify host name the item belongs to (as registered in Zabbix frontend).\n key (str): Specify item key to send value to.\n value (str): Specify item value.\n clock (int, optional): Specify time in Unix timestamp format. Defaults to `None`.\n ns (int, optional): Specify time expressed in nanoseconds. Defaults to `None`.\n \"\"\"\n\n def __init__(self, host: str, key: str, value: str,\n clock: Union[int, None] = None, ns: Union[int, None] = None):\n self.host = str(host)\n self.key = str(key)\n self.value = str(value)\n self.clock = None\n self.ns = None\n\n if clock is not None:\n try:\n self.clock = int(clock)\n except ValueError:\n raise ValueError(\n 'The clock value must be expressed in the Unix Timestamp format') from None\n\n if ns is not None:\n try:\n self.ns = int(ns)\n except ValueError:\n raise ValueError(\n 'The ns value must be expressed in the integer value of nanoseconds') from None\n\n def __str__(self) -> str:\n return json.dumps(self.to_json(), ensure_ascii=False)\n\n def __repr__(self) -> str:\n return self.__str__()\n\n def to_json(self) -> dict:\n \"\"\"Represents ItemValue object in dictionary for json.\n\n Returns:\n dict: Object attributes in dictionary.\n \"\"\"\n\n return {k: v for k, v in self.__dict__.items() if v is not None}" }, { "identifier": "Sender", "path": "zabbix_utils/sender.py", "snippet": "class Sender():\n \"\"\"Zabbix sender implementation.\n\n Args:\n server (str, optional): Zabbix server address. Defaults to `'127.0.0.1'`.\n port (int, optional): Zabbix server port. Defaults to `10051`.\n use_config (bool, optional): Specifying configuration use. Defaults to `False`.\n timeout (int, optional): Connection timeout value. Defaults to `10`.\n use_ipv6 (bool, optional): Specifying IPv6 use instead of IPv4. Defaults to `False`.\n source_ip (str, optional): IP from which to establish connection. Defaults to `None`.\n chunk_size (int, optional): Number of packets in one chunk. Defaults to `250`.\n socket_wrapper (Callable, optional): Func(`conn`,`tls`) to wrap socket. Defaults to `None`.\n compression (bool, optional): Specifying compression use. Defaults to `False`.\n config_path (str, optional): Path to Zabbix agent configuration file. Defaults to \\\n`/etc/zabbix/zabbix_agentd.conf`.\n \"\"\"\n\n def __init__(self, server: str = '127.0.0.1', port: int = 10051,\n use_config: bool = False, timeout: int = 10, use_ipv6: bool = False,\n source_ip: Union[str, None] = None, chunk_size: int = 250,\n socket_wrapper: Union[Callable, None] = None, compression: bool = False,\n config_path: Union[str, None] = '/etc/zabbix/zabbix_agentd.conf'):\n self.timeout = timeout\n self.use_ipv6 = use_ipv6\n self.tls = {}\n\n self.source_ip = None\n self.chunk_size = chunk_size\n self.compression = compression\n\n if socket_wrapper is not None:\n if not isinstance(socket_wrapper, Callable):\n raise TypeError('Value \"socket_wrapper\" should be a function.')\n self.socket_wrapper = socket_wrapper\n\n if use_config:\n self.clusters = []\n self.__load_config(config_path)\n else:\n self.clusters = [Cluster(f\"{server}:{port}\")]\n\n if source_ip is not None:\n self.source_ip = source_ip\n\n def __read_config(self, config: configparser.SectionProxy) -> None:\n server_row = config.get('ServerActive') or config.get('Server') or '127.0.0.1:10051'\n\n for cluster in server_row.split(','):\n self.clusters.append(Cluster(cluster.strip()))\n\n if 'SourceIP' in config:\n self.source_ip = config.get('SourceIP')\n\n for key in config:\n if key.startswith('tls'):\n self.tls[key] = config.get(key)\n\n def __load_config(self, filepath: str) -> None:\n config = configparser.ConfigParser(strict=False)\n\n with open(filepath, 'r', encoding='utf-8') as cfg:\n config.read_string('[root]\\n' + cfg.read())\n self.__read_config(config['root'])\n\n def __get_response(self, conn: socket) -> Union[str, None]:\n try:\n result = json.loads(\n ZabbixProtocol.parse_packet(conn, log, ProcessingError)\n )\n except json.decoder.JSONDecodeError as err:\n log.debug('Unexpected response was received from Zabbix.')\n raise err\n\n log.debug('Received data: %s', result)\n\n return result\n\n def __create_request(self, items: list) -> dict:\n return {\n \"request\": \"sender data\",\n \"data\": [i.to_json() for i in items]\n }\n\n def __chunk_send(self, items: list) -> dict:\n responses = {}\n\n packet = ZabbixProtocol.create_packet(self.__create_request(items), log, self.compression)\n\n for cluster in self.clusters:\n active_node = None\n\n for i, node in enumerate(cluster.nodes):\n\n log.debug('Trying to send data to %s', node)\n\n try:\n if self.use_ipv6:\n connection = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)\n else:\n connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n except socket.error:\n raise ProcessingError(f\"Error creating socket for {node}\") from None\n\n connection.settimeout(self.timeout)\n\n if self.source_ip:\n connection.bind((self.source_ip, 0,))\n\n try:\n connection.connect((node.address, node.port))\n except (TimeoutError, socket.timeout):\n log.debug(\n 'The connection to %s timed out after %d seconds',\n node,\n self.timeout\n )\n except (ConnectionRefusedError, socket.gaierror) as err:\n log.debug(\n 'An error occurred while trying to connect to %s: %s',\n node,\n getattr(err, 'msg', str(err))\n )\n else:\n if i > 0:\n cluster.nodes[0], cluster.nodes[i] = cluster.nodes[i], cluster.nodes[0]\n active_node = node\n break\n\n if active_node is None:\n log.error(\n 'Couldn\\'t connect to all of cluster nodes: %s',\n str(list(cluster.nodes))\n )\n connection.close()\n raise ProcessingError(\n f\"Couldn't connect to all of cluster nodes: {list(cluster.nodes)}\"\n )\n\n if self.socket_wrapper is not None:\n connection = self.socket_wrapper(connection, self.tls)\n\n try:\n connection.sendall(packet)\n except (TimeoutError, socket.timeout) as err:\n log.error(\n 'The connection to %s timed out after %d seconds while trying to send',\n active_node,\n self.timeout\n )\n connection.close()\n raise err\n except (OSError, socket.error) as err:\n log.warning(\n 'An error occurred while trying to send to %s: %s',\n active_node,\n getattr(err, 'msg', str(err))\n )\n connection.close()\n raise err\n\n try:\n response = self.__get_response(connection)\n except ConnectionResetError as err:\n log.debug('Get value error: %s', err)\n raise err\n log.debug('Response from %s: %s', active_node, response)\n\n if response and response.get('response') != 'success':\n raise socket.error(response)\n\n responses[active_node] = response\n\n try:\n connection.close()\n except socket.error:\n pass\n\n return responses\n\n def send(self, items: list, merge_responses: bool = True) -> dict:\n \"\"\"Sends packets and receives an answer from Zabbix.\n\n Args:\n items (list): List of ItemValue objects.\n merge_responses (bool, optional): Whether to merge all responses data \\\nto a single one. Defaults to `True`.\n\n Returns:\n dict: Dictionary of TrapperResponse objects for each Node object.\n \"\"\"\n\n result = {}\n\n if not all(isinstance(item, ItemValue) for item in items):\n log.debug('Received unexpected item list. It must be a list of ItemValue objects: %s',\n json.dumps(items))\n raise ProcessingError(f\"Received unexpected item list. \\\nIt must be a list of ItemValue objects: {json.dumps(items)}\")\n\n chunks = [items[i:i + self.chunk_size] for i in range(0, len(items), self.chunk_size)]\n for i, chunk in enumerate(chunks):\n\n resp_by_node = self.__chunk_send(chunk)\n\n for node, resp in resp_by_node.items():\n if merge_responses:\n if node not in result:\n result[node] = TrapperResponse()\n result[node].add(resp, i + 1)\n else:\n if node not in result:\n result[node] = []\n result[node].append(TrapperResponse(i+1).add(resp))\n\n return result\n\n def send_value(self, host: str, key: str,\n value: str, clock: Union[int, None] = None,\n ns: Union[int, None] = None, merge_responses: bool = True) -> dict:\n \"\"\"Sends one value and receives an answer from Zabbix.\n\n Args:\n host (str): Specify host name the item belongs to (as registered in Zabbix frontend).\n key (str): Specify item key to send value to.\n value (str): Specify item value.\n clock (int, optional): Specify time in Unix timestamp format. Defaults to `None`.\n ns (int, optional): Specify time expressed in nanoseconds. Defaults to `None`.\n merge_responses (bool, optional): Whether to merge all responses data \\\nto a single one. Defaults to `True`.\n\n Returns:\n dict: Dictionary of TrapperResponse object for each Node object.\n \"\"\"\n\n return self.send([ItemValue(host, key, value, clock, ns)], merge_responses)" }, { "identifier": "TrapperResponse", "path": "zabbix_utils/sender.py", "snippet": "class TrapperResponse():\n \"\"\"Contains response from Zabbix server/proxy.\n\n Args:\n chunk (int, optional): Current chunk number. Defaults to `1`.\n \"\"\"\n\n def __init__(self, chunk: int = 1):\n self.__processed = 0\n self.__failed = 0\n self.__total = 0\n self.__time = 0\n self.__chunk = chunk\n\n def __repr__(self) -> str:\n result = {}\n for key, value in self.__dict__.items():\n result[\n key[len(f\"_{self.__class__.__name__}__\"):]\n ] = str(value) if isinstance(value, Decimal) else value\n\n return json.dumps(result)\n\n def parse(self, response: dict) -> dict:\n \"\"\"Parse response from Zabbix.\n\n Args:\n response (dict): Raw response from Zabbix.\n\n Raises:\n ProcessingError: Raises if unexpected response received\n \"\"\"\n\n fields = {\n \"processed\": ('[Pp]rocessed', r'\\d+'),\n \"failed\": ('[Ff]ailed', r'\\d+'),\n \"total\": ('[Tt]otal', r'\\d+'),\n \"time\": ('[Ss]econds spent', r'\\d+\\.\\d+')\n }\n\n pattern = re.compile(\n r\";\\s+?\".join([rf\"{r[0]}:\\s+?(?P<{k}>{r[1]})\" for k, r in fields.items()])\n )\n\n info = response.get('info')\n if not info:\n log.debug('Received unexpected response: %s', response)\n raise ProcessingError(f\"Received unexpected response: {response}\")\n\n res = pattern.search(info).groupdict()\n\n return res\n\n def add(self, response: dict, chunk: Union[int, None] = None) -> Self:\n \"\"\"Add and merge response data from Zabbix.\n\n Args:\n response (dict): Raw response from Zabbix.\n chunk (Union[int, None], optional): Chunk number. Defaults to `None`.\n \"\"\"\n\n resp = self.parse(response)\n\n def add_value(cls, key, value):\n setattr(\n cls,\n key,\n getattr(cls, key) + value\n )\n\n for k, v in resp.items():\n add_value(\n self,\n f\"_{self.__class__.__name__}__{k}\",\n Decimal(v) if '.' in v else int(v)\n )\n if chunk is not None:\n self.__chunk = chunk\n\n return self\n\n @property\n def processed(self) -> int:\n \"\"\"Returns number of processed packets.\n\n Returns:\n int: Number of processed packets.\n \"\"\"\n\n return self.__processed\n\n @property\n def failed(self) -> int:\n \"\"\"Returns number of failed packets.\n\n Returns:\n int: Number of failed packets.\n \"\"\"\n\n return self.__failed\n\n @property\n def total(self) -> int:\n \"\"\"Returns total number of packets.\n\n Returns:\n int: Total number of packets.\n \"\"\"\n\n return self.__total\n\n @property\n def time(self) -> int:\n \"\"\"Returns value of spent time.\n\n Returns:\n int: Spent time for the packets sending.\n \"\"\"\n\n return self.__time\n\n @property\n def chunk(self) -> int:\n \"\"\"Returns current chunk number.\n\n Returns:\n int: Number of the current chunk.\n \"\"\"\n\n return self.__chunk" } ]
import sys import time import unittest from zabbix_utils.getter import Getter from zabbix_utils.exceptions import APIRequestError from zabbix_utils.api import ZabbixAPI, APIVersion from zabbix_utils.sender import ItemValue, Sender, TrapperResponse
8,658
#!/usr/bin/env python # Copyright (C) 2001-2023 Zabbix SIA # # Zabbix SIA licenses this file under the MIT License. # See the LICENSE file in the project root for more information. sys.path.append('.') ZABBIX_URL = 'localhost' ZABBIX_USER = 'Admin' ZABBIX_PASSWORD = 'zabbix' class CompatibilityAPITest(unittest.TestCase): """Compatibility test with Zabbix API version 6.0""" def setUp(self): self.url = 'localhost' self.user = 'Admin' self.password = 'zabbix' self.token_id = None self.token = None self.zapi = ZabbixAPI( url=self.url ) self._create_token() def _create_token(self): """Tests auth using username and password""" self.assertEqual( type(self.zapi), ZabbixAPI, "Creating ZabbixAPI object was going wrong") self.assertEqual( type(self.zapi.api_version()), APIVersion, "Version getting was going wrong") self.zapi.login( user=self.user, password=self.password ) self.assertIsNotNone(self.zapi._ZabbixAPI__session_id, "Login was going wrong") resp = self.zapi.user.checkAuthentication(sessionid=self.zapi._ZabbixAPI__session_id) self.assertEqual( type(resp), dict, "Request user.checkAuthentication was going wrong") tokens = self.zapi.token.get( filter={'name': f"{self.user} [{self.__class__.__name__}]"}, output=['tokenid'] ) if tokens: self.token_id = int(tokens[0]['tokenid']) self.assertEqual( type(self.token_id), int, "Request token.get was going wrong") else: self.token_id = int(self.zapi.token.create( name=f"{self.user} [{self.__class__.__name__}]" )['tokenids'][0]) self.assertEqual( type(self.token_id), int, "Request token.create was going wrong") self.token = self.zapi.token.generate(*[self.token_id])[0]['token'] self.assertEqual(type(self.token), str, "Request token.generate was going wrong") self.zapi.logout() self.assertIsNone(self.zapi._ZabbixAPI__session_id, "Logout was going wrong")
#!/usr/bin/env python # Copyright (C) 2001-2023 Zabbix SIA # # Zabbix SIA licenses this file under the MIT License. # See the LICENSE file in the project root for more information. sys.path.append('.') ZABBIX_URL = 'localhost' ZABBIX_USER = 'Admin' ZABBIX_PASSWORD = 'zabbix' class CompatibilityAPITest(unittest.TestCase): """Compatibility test with Zabbix API version 6.0""" def setUp(self): self.url = 'localhost' self.user = 'Admin' self.password = 'zabbix' self.token_id = None self.token = None self.zapi = ZabbixAPI( url=self.url ) self._create_token() def _create_token(self): """Tests auth using username and password""" self.assertEqual( type(self.zapi), ZabbixAPI, "Creating ZabbixAPI object was going wrong") self.assertEqual( type(self.zapi.api_version()), APIVersion, "Version getting was going wrong") self.zapi.login( user=self.user, password=self.password ) self.assertIsNotNone(self.zapi._ZabbixAPI__session_id, "Login was going wrong") resp = self.zapi.user.checkAuthentication(sessionid=self.zapi._ZabbixAPI__session_id) self.assertEqual( type(resp), dict, "Request user.checkAuthentication was going wrong") tokens = self.zapi.token.get( filter={'name': f"{self.user} [{self.__class__.__name__}]"}, output=['tokenid'] ) if tokens: self.token_id = int(tokens[0]['tokenid']) self.assertEqual( type(self.token_id), int, "Request token.get was going wrong") else: self.token_id = int(self.zapi.token.create( name=f"{self.user} [{self.__class__.__name__}]" )['tokenids'][0]) self.assertEqual( type(self.token_id), int, "Request token.create was going wrong") self.token = self.zapi.token.generate(*[self.token_id])[0]['token'] self.assertEqual(type(self.token), str, "Request token.generate was going wrong") self.zapi.logout() self.assertIsNone(self.zapi._ZabbixAPI__session_id, "Logout was going wrong")
with self.assertRaises(APIRequestError,
1
2023-10-16 12:49:35+00:00
12k
YefanZhou/TempBalance
main_tb.py
[ { "identifier": "Tempbalance", "path": "tempbalance.py", "snippet": "class Tempbalance(object):\n def __init__(self, \n net, \n EVALS_THRESH=0.00001,\n bins=100, \n conv_norm=0.5,\n pl_fitting='median',\n xmin_pos=2,\n filter_zeros=False,\n remove_first_layer=True,\n remove_last_layer=True,\n eigs_thresh=50,\n esd_metric_for_tb='alpha',\n assign_func='tb_linear_map',\n lr_min_ratio=0.5,\n lr_max_ratio=1.5,\n batchnorm=True,\n batchnorm_type='name',\n layernorm=False\n ):\n \"\"\"init function\n Args:\n net (nn.module): net to train\n EVALS_THRESH (float, ): threshold to filter small eigenvalue. Defaults to 0.00001.\n bins (int, int): ESD bins. Defaults to 100.\n conv_norm (float, ): conv norm. Defaults to 0.5.\n pl_fitting (str, ): powerlaw fitting method. Defaults to median, ['median', 'goodness-of-fit', 'fix-finger']\n xmin_pos (int, ): set the position of minimum eigenvalue in the tail. Defaults to 2.\n filter_zeros (bool, ): filter small eigenvalues or not. Defaults to False.\n remove_first_layer (bool, ): whether exclude first layer in TB. Defaults to True.\n remove_last_layer (bool, ): whether exclude last layer in TB. Defaults to True.\n esd_metric_for_tb (str, ): metric for TB scheduling. Defaults to 'alpha'.\n assign_func (str, ): learning rate assignment function. Defaults to 'tb_linear_map'.\n lr_min_ratio (float, ): learning rate lower bound. Defaults to 0.5.\n lr_max_ratio (float, ): learning rate upper bound. Defaults to 1.5.\n batchnorm (bool, ): whether adjust batch norm learning rate using TB. Defaults to True.\n batchnorm_type (str, ): how to set learning rate for batchnorm layers\n layernorm (bool, ): whether adjust layer norm learning rate using TB. Defaults to True.\n \"\"\"\n self.net = net\n self.EVALS_THRESH = EVALS_THRESH\n self.bins = bins\n self.conv_norm = conv_norm\n self.pl_fitting = pl_fitting\n self.xmin_pos = xmin_pos\n self.filter_zeros = filter_zeros\n self.remove_first_layer = remove_first_layer\n self.remove_last_layer = remove_last_layer\n self.eigs_thresh = eigs_thresh\n self.esd_metric_for_tb = esd_metric_for_tb\n self.assign_func = assign_func\n self.lr_min_ratio = lr_min_ratio\n self.lr_max_ratio = lr_max_ratio\n self.batchnorm = batchnorm\n self.layernorm = layernorm\n self.bn_to_conv = {}\n self.ln_to_linear = {}\n # print('EVALS_THRESH', self.EVALS_THRESH, type(self.EVALS_THRESH) )\n # print('bins', self.bins, type(self.bins) )\n # print('conv_norm', self.conv_norm, type(self.conv_norm) )\n # print('pl_fitting', self.pl_fitting, type(self.pl_fitting) )\n # print('xmin_pos', self.xmin_pos, type(self.xmin_pos) )\n # print('filter_zeros', self.filter_zeros, type(self.filter_zeros) )\n # print('remove_first_layer', self.remove_first_layer, type(self.remove_first_layer) )\n # print('remove_last_layer', self.remove_last_layer, type(self.remove_last_layer) )\n # print('esd_metric_for_tb', self.esd_metric_for_tb, type(self.esd_metric_for_tb) )\n # print('assign_func', self.assign_func, type(self.assign_func) )\n # print('lr_min_ratio', self.lr_min_ratio, type(self.lr_min_ratio) )\n # print('lr_max_ratio', self.lr_max_ratio, type(self.lr_max_ratio) )\n # print('batchnorm', self.batchnorm, type(self.batchnorm) )\n \n if batchnorm and batchnorm_type == 'name':\n # let the batch norm layer change lr corresponding to the layer\n # with the same layer name \n longname_lst = []\n for name, m in self.net.named_modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n longname_lst.append(name)\n for name, module in self.net.named_modules():\n if isinstance(module, nn.BatchNorm2d) \\\n and name.replace('bn', 'conv') in longname_lst:\n self.bn_to_conv[name] = name.replace('bn', 'conv')\n \n elif batchnorm and batchnorm_type == 'order':\n # let the batch norm layer change lr corresponding to the \n # conv layer before current layer\n longname_lst = []\n type_lst = []\n for name, module in self.net.named_modules():\n if isinstance(module, nn.Conv2d):\n longname_lst.append(name)\n type_lst.append('nn.Conv2d')\n if isinstance(module, nn.BatchNorm2d):\n if type_lst[-1] == 'nn.Conv2d':\n self.bn_to_conv[name] = longname_lst[-1]\n longname_lst.append(name)\n type_lst.append('nn.BatchNorm2d')\n \n if self.layernorm:\n longname_lst = []\n type_lst = []\n for name, module in self.net.named_modules():\n if isinstance(module, nn.Linear):\n longname_lst.append(name)\n type_lst.append('nn.Linear')\n if isinstance(module, nn.LayerNorm):\n if type_lst[-1] == 'nn.Linear':\n self.ln_to_linear[name] = longname_lst[-1]\n longname_lst.append(name)\n type_lst.append('nn.LayerNorm')\n \n \n def build_optimizer_param_group(self, untuned_lr=0.1, initialize=True):\n \"\"\"build the parameter group for optimizer\n\n Args:\n untuned_lr (float, ): global learning rate that is not tuned. Defaults to 0.1.\n initialize (bool, ): if True, build a list of dictionary, if False, build a list of learning rate . Defaults to True.\n\n Returns:\n _type_: _description_\n \"\"\"\n metrics = self.net_esd_estimator()\n layer_stats = pd.DataFrame({key:metrics[key] for key in metrics if key!='eigs'})\n \n if self.remove_first_layer:\n layer_stats = layer_stats.drop(labels=0, axis=0)\n # index must be reset otherwise may delete the wrong row \n layer_stats.index = list(range(len(layer_stats[self.esd_metric_for_tb])))\n if self.remove_last_layer:\n layer_stats = layer_stats.drop(labels=len(layer_stats) - 1, axis=0)\n # index must be reset otherwise may delete the wrong row \n layer_stats.index = list(range(len(layer_stats[self.esd_metric_for_tb])))\n \n # remove layers with number of eigs less than a threshold\n layer_stats = layer_stats[layer_stats['eigs_num'] >= self.eigs_thresh]\n layer_stats.index = list(range(len(layer_stats[self.esd_metric_for_tb])))\n \n metric_scores = np.array(layer_stats[self.esd_metric_for_tb])\n scheduled_lr = self.get_layer_temps(assign_func=self.assign_func, \n metric_scores=metric_scores, \n untuned_lr=untuned_lr)\n \n layer_stats['scheduled_lr'] = scheduled_lr\n layer_name_to_tune = list(layer_stats['longname'])\n opt_params_groups = []\n params_to_tune_ids = []\n layer_count = 0\n # these params should be tuned\n for name, module in self.net.named_modules():\n \n # these are the conv layers analyzed by ESD\n if name in layer_name_to_tune:\n params_to_tune_ids += list(map(id, module.parameters()))\n scheduled_lr = layer_stats[layer_stats['longname'] == name]['scheduled_lr'].item()\n if initialize:\n # append a dictionary for initialize optimizer\n opt_params_groups.append({'params': module.parameters(), 'lr': scheduled_lr})\n else:\n # append tuned learning rate \n opt_params_groups.append(scheduled_lr)\n layer_count += 1\n # decide should we tune the batch norm accordingly\n elif self.batchnorm \\\n and isinstance(module, nn.BatchNorm2d) \\\n and name in self.bn_to_conv \\\n and self.bn_to_conv[name] in layer_name_to_tune:\n \n params_to_tune_ids += list(map(id, module.parameters()))\n scheduled_lr = layer_stats[layer_stats['longname'] == self.bn_to_conv[name]]['scheduled_lr'].item()\n if initialize:\n # append a dictionary for initialize optimizer\n opt_params_groups.append({'params': module.parameters(), 'lr': scheduled_lr})\n else:\n # append tuned learning rate \n opt_params_groups.append(scheduled_lr)\n layer_count += 1\n \n elif self.layernorm \\\n and isinstance(module, nn.LayerNorm) \\\n and name in self.ln_to_linear \\\n and self.ln_to_linear[name] in layer_name_to_tune:\n \n params_to_tune_ids += list(map(id, module.parameters()))\n scheduled_lr = layer_stats[layer_stats['longname'] == self.ln_to_linear[name]]['scheduled_lr'].item()\n if initialize:\n opt_params_groups.append({'params': module.parameters(), 'lr': scheduled_lr})\n else:\n opt_params_groups.append(scheduled_lr)\n layer_count += 1\n \n if initialize:\n # those params are untuned\n untuned_params = \\\n filter(lambda p: id(p) not in params_to_tune_ids, self.net.parameters())\n opt_params_groups.append({'params': untuned_params, 'lr': untuned_lr}) \n return opt_params_groups, layer_count\n else:\n return opt_params_groups, layer_count\n \n def step(self, optimizer, untuned_lr):\n opt_params_groups, layer_count = \\\n self.build_optimizer_param_group(untuned_lr=untuned_lr, initialize=False)\n for index, param_group in enumerate(optimizer.param_groups):\n if index <= layer_count - 1:\n param_group['lr'] = opt_params_groups[index]\n else:\n param_group['lr'] = untuned_lr\n \n def net_esd_estimator(\n self,\n verbose=False):\n \"\"\"evaluate the ESD of the conv nets\n Args:\n verbose: \n Returns:\n _type_: _description_\n \"\"\"\n results = {\n 'alphahat':[],\n 'alpha':[],\n 'spectral_norm': [],\n 'D': [],\n 'longname':[],\n 'eigs':[],\n 'norm':[],\n 'eigs_num':[]\n }\n if verbose:\n print(\"=================================\")\n print(f\"pl_fitting: {self.pl_fitting}, xmin_pos: {self.xmin_pos}, conv_norm: {self.conv_norm}, filter_zeros: {self.filter_zeros}\")\n print(\"=================================\")\n # iterate through layers\n for name, m in self.net.named_modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n matrix = m.weight.data.clone()\n # normalization and tranpose Conv2d\n if isinstance(m, nn.Conv2d):\n matrix = torch.flatten(matrix, start_dim=2) * math.sqrt(self.conv_norm)\n matrix = matrix.transpose(1, 2).transpose(0, 1)\n eigs = torch.square(torch.linalg.svdvals(matrix).flatten())\n # ascending order \n eigs, _ = torch.sort(eigs, descending=False)\n spectral_norm = eigs[-1].item()\n fnorm = torch.sum(eigs).item()\n \n if self.filter_zeros:\n nz_eigs = eigs[eigs > self.EVALS_THRESH]\n N = len(nz_eigs)\n # somethines N may equal 0, if that happens, we don't filter eigs\n if N == 0:\n nz_eigs = eigs\n N = len(nz_eigs)\n else:\n nz_eigs = eigs\n N = len(nz_eigs)\n\n log_nz_eigs = torch.log(nz_eigs)\n\n if self.pl_fitting == 'median':\n i = int(len(nz_eigs) / self.xmin_pos) \n xmin = nz_eigs[i]\n n = float(N - i)\n seq = torch.arange(n).cuda()\n final_alpha = 1 + n / (torch.sum(log_nz_eigs[i:]) - n * log_nz_eigs[i])\n final_D = torch.max(torch.abs(\n 1 - (nz_eigs[i:] / xmin) ** (-final_alpha + 1) - seq / n \n ))\n else:\n alphas = torch.zeros(N-1)\n Ds = torch.ones(N-1)\n if self.pl_fitting == 'fix-finger':\n hist_nz_eigs = torch.log10(nz_eigs)\n min_e, max_e = hist_nz_eigs.min(), hist_nz_eigs.max()\n counts = torch.histc(hist_nz_eigs, self.bins, min=min_e, max=max_e)\n boundaries = torch.linspace(min_e, max_e, self.bins + 1)\n h = counts, boundaries\n ih = torch.argmax(h[0]) # \n xmin2 = 10 ** h[1][ih]\n xmin_min = torch.log10(0.95 * xmin2)\n xmin_max = 1.5 * xmin2\n \n for i, xmin in enumerate(nz_eigs[:-1]):\n if self.pl_fitting == 'fix-finger':\n if xmin < xmin_min:\n continue\n if xmin > xmin_max:\n break\n\n n = float(N - i)\n seq = torch.arange(n).cuda()\n alpha = 1 + n / (torch.sum(log_nz_eigs[i:]) - n * log_nz_eigs[i])\n alphas[i] = alpha\n if alpha > 1:\n Ds[i] = torch.max(torch.abs(\n 1 - (nz_eigs[i:] / xmin) ** (-alpha + 1) - seq / n \n ))\n\n min_D_index = torch.argmin(Ds)\n final_alpha = alphas[min_D_index]\n final_D = Ds[min_D_index]\n \n final_alpha = final_alpha.item()\n final_D = final_D.item()\n final_alphahat=final_alpha*math.log10(spectral_norm)\n\n results['spectral_norm'].append(spectral_norm)\n results['alphahat'].append(final_alphahat)\n results['norm'].append(fnorm)\n results['alpha'].append(final_alpha)\n results['D'].append(final_D)\n results['longname'].append(name)\n results['eigs'].append(eigs.detach().cpu().numpy())\n results['eigs_num'].append(len(eigs))\n \n return results\n \n \n def get_layer_temps(self, assign_func, metric_scores, untuned_lr):\n n = len(metric_scores)\n idx = [i for i in range(n)]\n temps = np.array([untuned_lr] * n)\n \n if assign_func == 'tb_linear_map':\n lr_range = [self.lr_min_ratio * untuned_lr, self.lr_max_ratio * untuned_lr]\n score_range = [min(metric_scores), max(metric_scores)]\n temps = np.interp(metric_scores, score_range, lr_range)\n\n elif assign_func == 'tb_sqrt':\n temps = np.sqrt(metric_scores)/np.sum(np.sqrt(metric_scores)) * n * untuned_lr\n\n elif assign_func == 'tb_log2':\n temps = np.log2(metric_scores)/np.sum(np.log2(metric_scores)) * n * untuned_lr\n\n elif assign_func == 'tb_step':\n idxes = np.argsort(metric_scores)\n unsort_temps = [untuned_lr * (self.lr_min_ratio + (self.lr_max_ratio - self.lr_min_ratio) * i / n) for i in range(n)]\n temps = [value for _, value in sorted(list(zip(idxes, unsort_temps)), key=itemgetter(0))]\n \n else:\n raise NotImplementedError\n \n return temps" }, { "identifier": "SGDSNR", "path": "sgdsnr.py", "snippet": "class SGDSNR(Optimizer):\n r\"\"\"Implements stochastic gradient descent (optionally with momentum).\n\n Nesterov momentum is based on the formula from\n `On the importance of initialization and momentum in deep learning`__.\n\n Args:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float): learning rate\n momentum (float, optional): momentum factor (default: 0)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n dampening (float, optional): dampening for momentum (default: 0)\n nesterov (bool, optional): enables Nesterov momentum (default: False)\n\n Example:\n >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)\n >>> optimizer.zero_grad()\n >>> loss_fn(model(input), target).backward()\n >>> optimizer.step()\n\n __ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf\n\n .. note::\n The implementation of SGD with Momentum/Nesterov subtly differs from\n Sutskever et. al. and implementations in some other frameworks.\n\n Considering the specific case of Momentum, the update can be written as\n\n .. math::\n \\begin{aligned}\n v_{t+1} & = \\mu * v_{t} + g_{t+1}, \\\\\n p_{t+1} & = p_{t} - \\text{lr} * v_{t+1},\n \\end{aligned}\n\n where :math:`p`, :math:`g`, :math:`v` and :math:`\\mu` denote the\n parameters, gradient, velocity, and momentum respectively.\n\n This is in contrast to Sutskever et. al. and\n other frameworks which employ an update of the form\n\n .. math::\n \\begin{aligned}\n v_{t+1} & = \\mu * v_{t} + \\text{lr} * g_{t+1}, \\\\\n p_{t+1} & = p_{t} - v_{t+1}.\n \\end{aligned}\n\n The Nesterov version is analogously modified.\n \"\"\"\n\n def __init__(self, params, lr=required, \n momentum=0, dampening=0,\n weight_decay=0, nesterov=False, \n spectrum_regularization=0, \n differentiable: bool = False):\n if lr is not required and lr < 0.0:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if momentum < 0.0:\n raise ValueError(\"Invalid momentum value: {}\".format(momentum))\n if weight_decay < 0.0:\n raise ValueError(\"Invalid weight_decay value: {}\".format(weight_decay))\n\n defaults = dict(lr=lr, momentum=momentum, dampening=dampening,\n weight_decay=weight_decay, nesterov=nesterov, \n spectrum_regularization=spectrum_regularization)\n if nesterov and (momentum <= 0 or dampening != 0):\n raise ValueError(\"Nesterov momentum requires a momentum and zero dampening\")\n super(SGDSNR, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(SGDSNR, self).__setstate__(state)\n for group in self.param_groups:\n group.setdefault('nesterov', False)\n group.setdefault('spectrum_regularization', 0)\n \n\n @torch.no_grad()\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n for group in self.param_groups:\n weight_decay = group['weight_decay']\n momentum = group['momentum']\n dampening = group['dampening']\n nesterov = group['nesterov']\n spectrum_regularization = group['spectrum_regularization']\n \n\n for p in group['params']:\n if p.grad is None:\n continue\n d_p = p.grad\n\n if weight_decay != 0:\n d_p = d_p.add(p, alpha=weight_decay)\n else:\n if spectrum_regularization != 0:\n #print(\"spectrum_regularization\", spectrum_regularization, p.dim())\n if p.dim() > 1:\n d_p = d_p.add(torch.reshape(compute_weight(p.to(device)), p.shape),\n alpha=spectrum_regularization)\n else:\n pass\n else:\n pass\n\n if momentum != 0:\n param_state = self.state[p]\n if 'momentum_buffer' not in param_state:\n buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()\n else:\n buf = param_state['momentum_buffer']\n buf.mul_(momentum).add_(d_p, alpha=1 - dampening)\n if nesterov:\n d_p = d_p.add(buf, alpha=momentum)\n else:\n d_p = buf\n\n p.add_(d_p, alpha=-group['lr'])\n\n return loss" }, { "identifier": "LARS", "path": "lars_optim/lars.py", "snippet": "class LARS(Optimizer):\n def __init__(self, params, lr=required, momentum=0, weight_decay=0, eeta=0.001, epsilon=1e-5):\n if lr is not required and lr < 0.0:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if momentum < 0.0:\n raise ValueError(\"Invalid momentum value: {}\".format(momentum))\n if weight_decay < 0.0:\n raise ValueError(\n \"Invalid weight_decay value: {}\".format(weight_decay))\n if eeta <= 0 or eeta > 1:\n raise ValueError(\"Invalid eeta value: {}\".format(eeta))\n if epsilon <= 0:\n raise ValueError(\"Invalid epsilon value: {}\".format(epsilon))\n defaults = dict(lr=lr, momentum=momentum,\n weight_decay=weight_decay, eeta=eeta, epsilon=epsilon, lars=True)\n\n super().__init__(params, defaults)\n\n @torch.no_grad()\n def step(self, closure=None):\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n for group in self.param_groups:\n weight_decay = group['weight_decay']\n momentum = group['momentum']\n eeta = group['eeta']\n lr = group['lr']\n lars = group['lars']\n eps = group['epsilon']\n\n for p in group['params']:\n if p.grad is None:\n continue\n decayed_grad = p.grad\n scaled_lr = lr\n if lars:\n w_norm = torch.norm(p)\n g_norm = torch.norm(p.grad)\n trust_ratio = torch.where(\n w_norm > 0 and g_norm > 0,\n eeta * w_norm / (g_norm + weight_decay * w_norm + eps),\n torch.ones_like(w_norm)\n )\n trust_ratio.clamp_(0.0, 50)\n scaled_lr *= trust_ratio.item()\n if weight_decay != 0:\n decayed_grad = decayed_grad.add(p, alpha=weight_decay)\n decayed_grad = torch.clamp(decayed_grad, -10.0, 10.0)\n\n if momentum != 0:\n param_state = self.state[p]\n if 'momentum_buffer' not in param_state:\n buf = param_state['momentum_buffer'] = torch.clone(\n decayed_grad).detach()\n else:\n buf = param_state['momentum_buffer']\n buf.mul_(momentum).add_(decayed_grad)\n decayed_grad = buf\n\n p.add_(decayed_grad, alpha=-scaled_lr)\n\n return loss" }, { "identifier": "LAMB", "path": "lars_optim/lamb.py", "snippet": "class LAMB(Optimizer):\n r\"\"\"Implements Lamb algorithm.\n It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n adam (bool, optional): always use trust ratio = 1, which turns this into\n Adam. Useful for comparison purposes.\n .. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes:\n https://arxiv.org/abs/1904.00962\n \"\"\"\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6,\n weight_decay=0, bias_correction=False):\n if not 0.0 <= lr:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\n \"Invalid beta parameter at index 0: {}\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\n \"Invalid beta parameter at index 1: {}\".format(betas[1]))\n defaults = dict(lr=lr, betas=betas, eps=eps,\n weight_decay=weight_decay, bias_correction=bias_correction)\n super().__init__(params, defaults)\n\n @torch.no_grad()\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n torch.nn.utils.clip_grad_norm_(\n parameters=[\n p for group in self.param_groups for p in group['params']],\n max_norm=1.0,\n norm_type=2\n )\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError(\n 'Lamb does not support sparse gradients, consider SparseAdam instad.')\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state['exp_avg_sq'] = torch.zeros_like(p.data)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n state['step'] += 1\n\n # Decay the first and second moment running average coefficient\n # m_t\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n # v_t\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n\n # Paper v3 does not use debiasing.\n # bias_correction1 = 1 - beta1 ** state['step']\n # bias_correction2 = 1 - beta2 ** state['step']\n # Apply bias to lr to avoid broadcast.\n # * math.sqrt(bias_correction2) / bias_correction1\n scaled_lr = group['lr']\n if group['bias_correction']:\n bias_correction1 = 1 - beta1 ** state['step']\n bias_correction2 = 1 - beta2 ** state['step']\n exp_avg.div_(bias_correction1)\n exp_avg_sq.div_(bias_correction2)\n update = exp_avg / exp_avg_sq.sqrt().add(group['eps'])\n if group['weight_decay'] != 0:\n update.add_(p.data, alpha=group['weight_decay'])\n w_norm = torch.norm(p)\n g_norm = torch.norm(update)\n trust_ratio = torch.where(\n w_norm > 0 and g_norm > 0,\n w_norm / g_norm,\n torch.ones_like(w_norm)\n )\n scaled_lr *= trust_ratio.item()\n\n p.data.add_(update, alpha=-scaled_lr)\n\n return loss" }, { "identifier": "train", "path": "utils.py", "snippet": "def train( epoch, \n net, \n num_epochs, \n trainloader, \n criterion, \n optimizer, \n optim_type='SGD', \n tb_update_interval=0, \n untuned_lr=0, \n args=None):\n \n net.train()\n net.training = True\n train_loss = 0\n correct = 0\n total = 0 \n print(f'Training Epoch {epoch}')\n pbar = tqdm.tqdm(total=len(trainloader), desc=\"Training\")\n\n for batch_idx, (inputs, targets) in enumerate(trainloader):\n inputs, targets = inputs.cuda(), targets.cuda() # GPU settings\n optimizer.zero_grad()\n outputs = net(inputs) # Forward Propagation\n loss = criterion(outputs, targets) # Loss\n loss.backward() # Backward Propagation\n optimizer.step() # Optimizer update\n\n train_loss += loss.item() * targets.size(0)\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum()\n pbar.update(1)\n \n # if tb_update_interval > 0 and args.total_iters % tb_update_interval == 0:\n # print(f\"--------------------> tb_update_interval: {tb_update_interval}, temp_balance\")\n # temp_balance(args=args, net=net, optimizer=optimizer, epoch=epoch, untuned_lr=untuned_lr, iters=args.total_iters)\n \n # if tb_update_interval > 0:\n # args.total_iters += 1\n \n pbar.close()\n train_loss /= total\n acc = 100.*correct/total\n acc = acc.item()\n\n return acc, train_loss" }, { "identifier": "test", "path": "utils.py", "snippet": "def test(epoch, net, testloader, criterion):\n net.eval()\n net.training = False\n test_loss = 0\n correct = 0\n total = 0\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(testloader):\n inputs, targets = inputs.cuda(), targets.cuda()\n outputs = net(inputs)\n loss = criterion(outputs, targets)\n\n test_loss += loss.item() * targets.size(0)\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum()\n\n # Save checkpoint when best model\n acc = 100.*correct/total\n acc = acc.item()\n test_loss = test_loss/total\n \n return acc, test_loss" }, { "identifier": "getNetwork", "path": "utils.py", "snippet": "def getNetwork(args, num_classes):\n if args.net_type == 'vgg_cifar':\n net = VGG_cifar(args.depth, num_classes, args.widen_factor)\n file_name = 'vgg_cifar'\n elif args.net_type == 'resnet':\n net = ResNet(args.depth, num_classes, args.widen_factor)\n file_name = 'resnet'\n elif args.net_type == 'resnet_tiny_imagenet':\n net = ResNet_tiny_imagenet(args.depth, num_classes=num_classes)\n file_name = 'resnet_tiny_imagenet'\n elif args.net_type == 'wide_resnet':\n net = Wide_ResNet(depth=args.depth, \n widen_factor=args.widen_factor, \n num_classes=num_classes)\n file_name = 'wide_resnet'\n \n return net, file_name" }, { "identifier": "save_args_to_file", "path": "utils.py", "snippet": "def save_args_to_file(args, output_file_path):\n with open(output_file_path, \"w\") as output_file:\n json.dump(vars(args), output_file, indent=4)" } ]
import os import sys import time import argparse import random import torch import torch.nn as nn import torch.backends.cudnn as cudnn import numpy as np import torch.optim as optim import torchvision import torchvision.transforms as transforms import torchvision.datasets as datasets import config as cf import torch_optimizer from pathlib import Path from os.path import join from tempbalance import Tempbalance from sgdsnr import SGDSNR from adamp import SGDP, AdamP from lars_optim import LARS, LAMB from utils import train, test, getNetwork, save_args_to_file
10,394
transform=transform_train) testset = torchvision.datasets.SVHN(root=data_path, split='test', download=True, transform=transform_test) num_classes = 10 elif(args.dataset == 'tiny-imagenet-200'): print("| Preparing tiny-imagenet-200 dataset...") sys.stdout.write("| ") trainset = datasets.ImageFolder(os.path.join(data_path, 'train'), transform_train) testset = datasets.ImageFolder(os.path.join(data_path, 'val'), transform_test) num_classes = 200 else: raise NotImplementedError trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=6) testloader = torch.utils.data.DataLoader(testset, batch_size=cf.eval_batchsize[args.dataset], shuffle=False, num_workers=4) Path(args.ckpt_path).mkdir(parents=True, exist_ok=True) if args.print_tofile: # Open files for stdout and stderr redirection stdout_file = open(os.path.join(args.ckpt_path, 'stdout.log'), 'w') stderr_file = open(os.path.join(args.ckpt_path, 'stderr.log'), 'w') # Redirect stdout and stderr to the files sys.stdout = stdout_file sys.stderr = stderr_file # Model print('\n[Phase 2] : Model setup') if args.resume: # Load checkpoint print('| Resuming from checkpoint...') net, file_name = getNetwork(args, num_classes) checkpoint = torch.load(args.resume, map_location='cpu') net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count())) net.load_state_dict(checkpoint['net']) best_acc = checkpoint['test_acc'] start_epoch = checkpoint['epoch'] print(f"Loaded Epoch: {start_epoch} \n Test Acc: {best_acc:.3f} Train Acc: {checkpoint['train_acc']:.3f}") else: print('| Building net type [' + args.net_type + ']...') net, file_name = getNetwork(args, num_classes) net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count())) best_acc = 0 if use_cuda: net.cuda() cudnn.benchmark = True criterion = nn.CrossEntropyLoss() print(net) if args.use_tb: print("##############Enable and init Temp Balancing##################") tb_scheduler = Tempbalance(net=net, pl_fitting=args.pl_fitting, xmin_pos=args.xmin_pos, filter_zeros=args.filter_zeros, remove_first_layer=args.remove_first_layer, remove_last_layer=args.remove_last_layer, esd_metric_for_tb=args.esd_metric_for_tb, assign_func=args.assign_func, lr_min_ratio=args.lr_min_ratio, lr_max_ratio=args.lr_max_ratio, batchnorm=args.batchnorm, batchnorm_type=args.batchnorm_type ) tb_param_group, _ = \ tb_scheduler.build_optimizer_param_group(untuned_lr=args.lr, initialize=True) if args.optim_type == 'SGD': optimizer = optim.SGD(tb_param_group, momentum=0.9, weight_decay=args.weight_decay) elif args.optim_type == 'SGDSNR': optimizer = SGDSNR(tb_param_group, momentum=0.9, weight_decay=args.weight_decay, spectrum_regularization=args.sg) elif args.optim_type == 'SGDP': optimizer = SGDP(tb_param_group, momentum=0.9, weight_decay=args.weight_decay) else: raise NotImplementedError else: print('Disable Temp Balancing') if args.optim_type == 'SGD': optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=args.weight_decay) elif args.optim_type == 'SGDSNR': optimizer = SGDSNR(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=args.weight_decay, spectrum_regularization=args.sg) elif args.optim_type == 'SGDP': optimizer = SGDP( net.parameters(), lr=args.lr, momentum=0.9, weight_decay=args.weight_decay) elif args.optim_type == 'AdamP': optimizer = AdamP( net.parameters(), lr=args.lr, betas=(0.9, 0.999), weight_decay=args.weight_decay) elif args.optim_type == 'LARS':
from __future__ import print_function parser = argparse.ArgumentParser(description='PyTorch CIFAR-10 Training') parser.add_argument('--lr', type=float, default=0.01, help='learning_rate') parser.add_argument('--net-type', type=str, default='wide-resnet', help='model') parser.add_argument('--depth', type=int, default=28, help='depth of model') parser.add_argument('--num-epochs', type=int, default=200, help='number of epochs') parser.add_argument('--widen-factor', type=float, default=1, help='width of model') parser.add_argument('--dataset', type=str, default='cifar10', help='dataset = [cifar10/cifar100]') parser.add_argument('--lr-sche', type=str, default='cosine', choices=['cosine']) parser.add_argument('--weight-decay', type=float, default=1e-4) # 5e-4 parser.add_argument('--ckpt-path', type=str, default='', help='path to checkpoints') parser.add_argument('--print-tofile', default=False, type=lambda x: (str(x).lower() == 'true'), help='print to file') parser.add_argument('--batch-size', type=int, default=128) # 5e-4 parser.add_argument('--datadir', type=str, default='', help='directory of dataset') parser.add_argument('--optim-type', type=str, default='SGD', help='type of optimizer') parser.add_argument('--resume', type=str, default='', help='resume from checkpoint') parser.add_argument('--seed', type=int, default=42) parser.add_argument('--ww-interval', type=int, default=1) parser.add_argument('--epochs-to-save', type=int, nargs='+', default=[]) parser.add_argument('--pl-fitting', type=str, default='median', choices=['median', 'goodness-of-fit', 'fix-finger']) # temperature balance related parser.add_argument('--use-tb', default=True, type=lambda x: (str(x).lower() == 'true'), help='use temp balance') parser.add_argument('--remove-last-layer', default=True, type=lambda x: (str(x).lower() == 'true'), help='if remove the last layer') parser.add_argument('--remove-first-layer', default=True, type=lambda x: (str(x).lower() == 'true'), help='if remove the first layer') parser.add_argument('--batchnorm', default=True, type=lambda x: (str(x).lower() == 'true'), help='balancing batch norm layer') parser.add_argument('--filter-zeros', default=False, type=lambda x: (str(x).lower() == 'true') ) parser.add_argument('--esd-metric-for-tb', type=str, default='alpha', help='ww metric') parser.add_argument('--assign-func', type=str, default='', help='assignment function for layerwise lr') parser.add_argument('--lr-min-ratio', type=float, default=0.5) parser.add_argument('--lr-max-ratio', type=float, default=1.5) parser.add_argument('--xmin-pos', type=float, default=2, help='xmin_index = size of eigs // xmin_pos') parser.add_argument('--batchnorm-type', type=str, default='name', help='method to change batchnorm layer learning rate') parser.add_argument('--look-k', type=int, default=5, help='') parser.add_argument('--look-alpha', type=float, default=0.8, help='') parser.add_argument('--T_0', type=int, default=10, help='') parser.add_argument('--T-mult', type=int, default=2, help='') # spectral regularization related parser.add_argument('--sg', type=float, default=0.01, help='spectrum regularization') args = parser.parse_args() print(args) # Save the arguments to a file save_args_to_file(args, join(args.ckpt_path, 'args.json')) def set_seed(seed=42): print(f"=====> Set the random seed as {seed}") np.random.seed(seed) random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) # When running on the CuDNN backend, two further options must be set torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False # Set a fixed value for the hash seed os.environ["PYTHONHASHSEED"] = str(seed) # Hyper Parameter settings use_cuda = torch.cuda.is_available() best_acc = 0 start_epoch = cf.start_epoch set_seed(args.seed) # Data Loader print('\n[Phase 1] : Data Preparation') print(f"prepare preprocessing, {args.dataset}") transform_train = transforms.Compose([ transforms.RandomCrop(cf.crop_size[args.dataset], padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(cf.mean[args.dataset], cf.std[args.dataset]), ]) # meanstd transformation transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(cf.mean[args.dataset], cf.std[args.dataset]), ]) data_path = join(args.datadir, args.dataset) if(args.dataset == 'cifar10'): print("| Preparing CIFAR-10 dataset...") sys.stdout.write("| ") trainset = torchvision.datasets.CIFAR10(root=data_path, train=True, download=True, transform=transform_train) testset = torchvision.datasets.CIFAR10(root=data_path, train=False, download=False, transform=transform_test) num_classes = 10 elif(args.dataset == 'cifar100'): print("| Preparing CIFAR-100 dataset...") sys.stdout.write("| ") trainset = torchvision.datasets.CIFAR100(root=data_path, train=True, download=True, transform=transform_train) testset = torchvision.datasets.CIFAR100(root=data_path, train=False, download=False, transform=transform_test) num_classes = 100 elif(args.dataset == 'svhn'): print("| Preparing SVHN dataset...") sys.stdout.write("| ") trainset = torchvision.datasets.SVHN(root=data_path, split='train', download=True, transform=transform_train) testset = torchvision.datasets.SVHN(root=data_path, split='test', download=True, transform=transform_test) num_classes = 10 elif(args.dataset == 'tiny-imagenet-200'): print("| Preparing tiny-imagenet-200 dataset...") sys.stdout.write("| ") trainset = datasets.ImageFolder(os.path.join(data_path, 'train'), transform_train) testset = datasets.ImageFolder(os.path.join(data_path, 'val'), transform_test) num_classes = 200 else: raise NotImplementedError trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=6) testloader = torch.utils.data.DataLoader(testset, batch_size=cf.eval_batchsize[args.dataset], shuffle=False, num_workers=4) Path(args.ckpt_path).mkdir(parents=True, exist_ok=True) if args.print_tofile: # Open files for stdout and stderr redirection stdout_file = open(os.path.join(args.ckpt_path, 'stdout.log'), 'w') stderr_file = open(os.path.join(args.ckpt_path, 'stderr.log'), 'w') # Redirect stdout and stderr to the files sys.stdout = stdout_file sys.stderr = stderr_file # Model print('\n[Phase 2] : Model setup') if args.resume: # Load checkpoint print('| Resuming from checkpoint...') net, file_name = getNetwork(args, num_classes) checkpoint = torch.load(args.resume, map_location='cpu') net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count())) net.load_state_dict(checkpoint['net']) best_acc = checkpoint['test_acc'] start_epoch = checkpoint['epoch'] print(f"Loaded Epoch: {start_epoch} \n Test Acc: {best_acc:.3f} Train Acc: {checkpoint['train_acc']:.3f}") else: print('| Building net type [' + args.net_type + ']...') net, file_name = getNetwork(args, num_classes) net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count())) best_acc = 0 if use_cuda: net.cuda() cudnn.benchmark = True criterion = nn.CrossEntropyLoss() print(net) if args.use_tb: print("##############Enable and init Temp Balancing##################") tb_scheduler = Tempbalance(net=net, pl_fitting=args.pl_fitting, xmin_pos=args.xmin_pos, filter_zeros=args.filter_zeros, remove_first_layer=args.remove_first_layer, remove_last_layer=args.remove_last_layer, esd_metric_for_tb=args.esd_metric_for_tb, assign_func=args.assign_func, lr_min_ratio=args.lr_min_ratio, lr_max_ratio=args.lr_max_ratio, batchnorm=args.batchnorm, batchnorm_type=args.batchnorm_type ) tb_param_group, _ = \ tb_scheduler.build_optimizer_param_group(untuned_lr=args.lr, initialize=True) if args.optim_type == 'SGD': optimizer = optim.SGD(tb_param_group, momentum=0.9, weight_decay=args.weight_decay) elif args.optim_type == 'SGDSNR': optimizer = SGDSNR(tb_param_group, momentum=0.9, weight_decay=args.weight_decay, spectrum_regularization=args.sg) elif args.optim_type == 'SGDP': optimizer = SGDP(tb_param_group, momentum=0.9, weight_decay=args.weight_decay) else: raise NotImplementedError else: print('Disable Temp Balancing') if args.optim_type == 'SGD': optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=args.weight_decay) elif args.optim_type == 'SGDSNR': optimizer = SGDSNR(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=args.weight_decay, spectrum_regularization=args.sg) elif args.optim_type == 'SGDP': optimizer = SGDP( net.parameters(), lr=args.lr, momentum=0.9, weight_decay=args.weight_decay) elif args.optim_type == 'AdamP': optimizer = AdamP( net.parameters(), lr=args.lr, betas=(0.9, 0.999), weight_decay=args.weight_decay) elif args.optim_type == 'LARS':
optimizer = LARS(net.parameters(),
2
2023-10-24 00:45:55+00:00
12k
zhaojw1998/AccoMontage-3
train_prior.py
[ { "identifier": "Prior", "path": "orchestrator/prior_model.py", "snippet": "class Prior(nn.Module):\n def __init__(self, mixture_encoder=None,\n function_encoder=None,\n context_enc_layer=12, \n function_dec_layer=12, \n d_model=256, \n nhead=8, \n dim_feedforward=1024, \n dropout=.1, \n function_resolution=8,\n inference=False,\n QA_model=None,\n DEVICE='cuda:0'):\n super(Prior, self).__init__()\n\n # embeddings\n self.func_embedding = nn.Embedding(num_embeddings=NUM_TIME_CODE+1, embedding_dim=d_model, padding_idx=NUM_TIME_CODE)\n self.prog_embedding = nn.Embedding(num_embeddings=NUM_INSTR_CLASS+1, embedding_dim=d_model, padding_idx=NUM_INSTR_CLASS)\n self.total_len_embedding = nn.Embedding(num_embeddings=len(TOTAL_LEN_BIN)+1, embedding_dim=d_model, padding_idx=len(TOTAL_LEN_BIN))\n self.abs_pos_embedding = nn.Embedding(num_embeddings=len(ABS_POS_BIN)+1, embedding_dim=d_model, padding_idx=len(ABS_POS_BIN))\n self.rel_pos_embedding = nn.Embedding(num_embeddings=len(REL_POS_BIN)+1, embedding_dim=d_model, padding_idx=len(REL_POS_BIN))\n\n self.start_embedding = nn.Parameter(torch.empty(NUM_INSTR_CLASS+1, d_model))\n nn.init.normal_(self.start_embedding)\n with torch.no_grad():\n self.start_embedding[NUM_INSTR_CLASS].fill_(0)\n\n #pre-trained encoders\n if not inference:\n self.mixture_encoder = mixture_encoder\n for param in self.mixture_encoder.parameters():\n param.requires_grad = False\n self.function_encoder = function_encoder\n for param in self.function_encoder.parameters():\n param.requires_grad = False\n else:\n self.QA_model = QA_model\n self.mixture_encoder = self.QA_model.mixture_enc\n self.function_encoder = self.QA_model.function_enc\n\n \n self.context_enc = nn.TransformerEncoder(\n nn.TransformerEncoderLayer(d_model=d_model, \n nhead=nhead, \n dim_feedforward=dim_feedforward, \n dropout=dropout, \n activation=F.gelu, \n batch_first=True, \n norm_first=True,\n device=DEVICE),\n num_layers=context_enc_layer)\n #multi-track Transformer\n self.mt_trf = nn.ModuleDict({})\n for layer in range(function_dec_layer):\n self.mt_trf[f'track_layer_{layer}'] = TransformerEncoderLayerRPE(d_model=d_model, \n nhead=nhead, \n dim_feedforward=dim_feedforward, \n dropout=dropout, \n norm_first=True,\n max_len=18).to(DEVICE)\n self.mt_trf[f'time_layer_{layer}'] = nn.TransformerDecoderLayer(d_model=d_model, \n nhead=nhead, \n dim_feedforward=dim_feedforward, \n dropout=dropout, \n activation=F.gelu, \n batch_first=True, \n norm_first=True,\n device=DEVICE)\n \n #positional encoding\n self.max_len = 1000\n position = torch.arange(self.max_len).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))\n pe = torch.zeros(1, self.max_len, d_model)\n pe[0, :, 0::2] = torch.sin(position * div_term)\n pe[0, :, 1::2] = torch.cos(position * div_term)\n pe = pe.to(DEVICE)\n self.register_buffer('pe', pe)\n \n #decoder output module \n self.func_out_linear = nn.Linear(d_model, NUM_TIME_CODE)\n\n #constants\n self.d_model = d_model\n self.function_dec_layer = function_dec_layer\n self.func_res = function_resolution\n\n #loss function\n self.criterion = nn.CrossEntropyLoss(reduction='mean')\n\n\n def generate_square_subsequent_mask(self, sz=15):\n return torch.triu(torch.ones(sz, sz), diagonal=1).bool()\n\n\n def func_get_next_token(self, token, gt=None):\n #token: (batch, codebook_size)\n #gt: (bs,)\n if gt is None:\n idx = token.max(-1)[1]\n else:\n idx = gt\n token = torch.zeros_like(token, device=token.device)\n arange = torch.arange(token.shape[0], device=token.device).long()\n token[arange, idx] = 1\n return token.unsqueeze(1) #one-hot shaoe (batch, 1, ft_codebook_size)\n\n \n\n\n def run(self, mix, prog, function, tm_mask, tk_mask, total_len, abs_pos, rel_pos, inference=False):\n #mix: (batch, max_time, 256)\n #prog: (batch, max_track)\n #function: (batch, max_time, max_track, 8)\n #tm_mask: (batch, max_time)\n #tk_mask: (batch, max_track)\n #total_len: (batch, max_time)\n #abs_pos: (batch, max_time)\n #rel_pos: (batch, max_time)\n batch, max_time, _ = mix.shape\n _, max_track = prog.shape\n \n mix = mix + self.pe[:, :self.func_res*mix.shape[1], :][:, ::self.func_res]\n mix = mix + self.total_len_embedding(total_len)\n mix = mix + self.abs_pos_embedding(abs_pos)\n mix = mix + self.rel_pos_embedding(rel_pos)\n \n mix = self.context_enc(mix) #(batch, max_time, 256)\n mix = mix.unsqueeze(1) + self.prog_embedding(prog).unsqueeze(2) #(batch, max_track, max_time, 256)\n mix = mix.reshape(-1, max_time, self.d_model)\n\n function = function.permute(0, 1, 3, 2).reshape(batch, -1, max_track)\n func = self.func_embedding(function)#(batch, 8*max_time, max_track, d_model)\n \n func = torch.cat([\n self.start_embedding[prog].unsqueeze(1), #(batch, 1, max_track, d_model)\n func[:, :-1]], \n dim=1) #batch, 8*max_time, max_track, d_model\n\n func = func + self.prog_embedding(prog).unsqueeze(1) \n\n func = func + self.pe[:, :func.shape[1], :].unsqueeze(2)\n func = func + self.total_len_embedding(total_len).repeat_interleave(self.func_res, dim=1).unsqueeze(2)\n func = func + self.abs_pos_embedding(abs_pos).repeat_interleave(self.func_res, dim=1).unsqueeze(2)\n func = func + self.rel_pos_embedding(rel_pos).repeat_interleave(self.func_res, dim=1).unsqueeze(2)\n\n for layer in range(self.function_dec_layer):\n func = func.reshape(-1, max_track, self.d_model)\n func = self.mt_trf[f'track_layer_{layer}'](src=func, \n src_key_padding_mask=tk_mask.unsqueeze(1).repeat(1, self.func_res*max_time, 1).reshape(-1, max_track))\n func = func.reshape(batch, -1, max_track, self.d_model).permute(0, 2, 1, 3).reshape(-1, self.func_res*max_time, self.d_model)\n func = self.mt_trf[f'time_layer_{layer}'](tgt=func,\n tgt_mask=self.generate_square_subsequent_mask(self.func_res*max_time).to(func.device),\n tgt_key_padding_mask=tm_mask.unsqueeze(1).repeat(1, max_track, 1).reshape(-1, max_time).repeat_interleave(self.func_res, dim=-1),\n memory=mix) \n func = func.reshape(batch, max_track, -1, self.d_model).permute(0, 2, 1, 3) #(batch, 8*max_time, max_track, d_model)\n\n function_recon = self.func_out_linear(func)\n\n return function_recon, function\n\n \n\n def loss_function(self, function_recon, function_gt, tm_mask, tk_mask):\n\n mask = torch.logical_or(tm_mask.repeat_interleave(8, dim=-1).unsqueeze(-1), tk_mask.unsqueeze(1)) #(batch, 8*max_time, track) \n unmask = torch.logical_not(mask)\n\n function_loss = self.criterion(function_recon[unmask].reshape(-1, NUM_TIME_CODE), \n function_gt[unmask].reshape(-1))\n return function_loss\n \n\n def loss(self, mix, prog, function, tm_mask, tk_mask, total_len, abs_pos, rel_pos):\n output = self.run(mix, prog, function, tm_mask, tk_mask, total_len, abs_pos, rel_pos, inference=False)\n return self.loss_function(*output, tm_mask, tk_mask)\n \n\n def forward(self, mode, *input, **kwargs):\n if mode in [\"run\", 0]:\n return self.run(*input, **kwargs)\n elif mode in ['loss', 'train', 1]:\n return self.loss(*input, **kwargs)\n elif mode in ['inference', 'eval', 'val', 2]:\n return self.inference(*input, **kwargs)\n else:\n raise NotImplementedError\n\n\n def run_autoregressive_greedy(self, mix, prog, function, total_len, abs_pos, rel_pos, blur=.5):\n #mix: (batch, num2bar, bar_resolution, max_simu_note, 6)\n #prog: (batch, max_track)\n #function: (batch, 1, max_track, 32)\n #total_len: (batch, num2bar)\n #abs_pos: (batch, num2bar)\n #rel_pos: (batch, num2bar)\n batch, num_2bar, time, max_simu_note, _ = mix.shape\n _, max_track = prog.shape\n\n mix = mix.reshape(-1, time, max_simu_note, 6)\n mix = self.mixture_encoder(mix)[0].mean.reshape(batch, num_2bar, -1) #(batch, num_2bar, 256)\n mix_ = (1-blur)*mix.clone() + blur*torch.empty(mix.shape, device=mix.device).normal_(mean=0, std=1) \n \n mix_ = mix_ + self.pe[:, :self.func_res*mix.shape[1], :][:, ::self.func_res]\n mix_ = mix_ + self.total_len_embedding(total_len)\n mix_ = mix_ + self.abs_pos_embedding(abs_pos)\n mix_ = mix_ + self.rel_pos_embedding(rel_pos)\n\n mix_ = self.context_enc(mix_) #(batch, num_bar, 256)\n mix_ = mix_.unsqueeze(1) + self.prog_embedding(prog).unsqueeze(2) #(batch, max_track, num_bar, 256)\n mix_ = mix_.reshape(-1, num_2bar, self.d_model)\n \n function = function.reshape(-1, 32)\n function = self.function_encoder.get_code_indices(function).reshape(batch, max_track, self.func_res)\n\n\n for idx in range(self.func_res, self.func_res*num_2bar):\n func = self.func_embedding(function) #*batch, max_track, 8, d_model\n func = func.permute(0, 2, 1, 3).reshape(batch, -1, max_track, self.d_model)\n\n func = func + self.prog_embedding(prog).unsqueeze(1)\n func = func + self.pe[:, :func.shape[1], :].unsqueeze(2)\n\n func = func + self.total_len_embedding(total_len).repeat_interleave(self.func_res, dim=1)[:, :func.shape[1]].unsqueeze(2)\n func = func + self.abs_pos_embedding(abs_pos).repeat_interleave(self.func_res, dim=1)[:, :func.shape[1]].unsqueeze(2)\n func = func + self.rel_pos_embedding(rel_pos).repeat_interleave(self.func_res, dim=1)[:, :func.shape[1]].unsqueeze(2)\n\n for layer in range(self.function_dec_layer):\n \n func = func.reshape(-1, max_track, self.d_model)\n func = self.mt_trf[f'track_layer_{layer}'](src=func)\n func = func.reshape(batch, -1, max_track, self.d_model).permute(0, 2, 1, 3).reshape(-1, idx, self.d_model)\n func = self.mt_trf[f'time_layer_{layer}'](tgt=func,\n tgt_mask=self.generate_square_subsequent_mask(sz=idx).to(func.device),\n memory=mix_) \n func = func.reshape(batch, max_track, -1, self.d_model).permute(0, 2, 1, 3) #(batch, num2bar-1, max_track, d_model)\n\n \n func_pred = self.func_out_linear(func[:, -1,]).max(-1)[1].unsqueeze(-1)\n\n function = torch.cat([function, func_pred], dim=-1)\n if function.shape[1] == self.func_res*num_2bar:\n break\n \n function = function.reshape(batch, max_track, num_2bar, self.func_res).permute(0, 2, 1, 3)\n z_func = self.function_encoder.infer_by_codes(function)\n return self.QA_model.infer_with_function_codes(mix[0], prog[0].repeat(num_2bar, 1), z_func[0])\n \n\n def run_autoregressive_nucleus(self, mix, prog, func_prompt, total_len, abs_pos, rel_pos, blur=.5, p=.1, t=1):\n #mix: (batch, num2bar, bar_resolution, max_simu_note, 6)\n #prog: (batch, max_track)\n #func_prompt: (batch, 1, max_track, 32)\n #total_len: (batch, num2bar)\n #abs_pos: (batch, num2bar)\n #rel_pos: (batch, num2bar)\n\n batch, num_2bar, time, max_simu_note, _ = mix.shape\n _, max_track = prog.shape\n\n mix = mix.reshape(-1, time, max_simu_note, 6)\n mix = self.mixture_encoder(mix)[0].mean.reshape(batch, num_2bar, -1) #(batch, num_2bar, 256)\n mix_ = (1-blur)*mix.clone() + blur*torch.empty(mix.shape, device=mix.device).normal_(mean=0, std=1) \n \n mix_ = mix_ + self.pe[:, :self.func_res*mix.shape[1], :][:, ::self.func_res]\n mix_ = mix_ + self.total_len_embedding(total_len)\n mix_ = mix_ + self.abs_pos_embedding(abs_pos)\n mix_ = mix_ + self.rel_pos_embedding(rel_pos)\n\n mix_ = self.context_enc(mix_) #(batch, num_bar, 256)\n mix_ = mix_.unsqueeze(1) + self.prog_embedding(prog).unsqueeze(2) #(batch, max_track, num_bar, 256)\n mix_ = mix_.reshape(-1, num_2bar, self.d_model)\n \n start = self.start_embedding[prog].unsqueeze(1) #(batch, 1, max_track, dmodel)\n\n if func_prompt is not None:\n func_prompt = func_prompt.reshape(-1, 32)\n func_prompt = self.function_encoder.get_code_indices(func_prompt).reshape(batch, max_track, self.func_res).permute(0, 2, 1) #(batch, 8, max_track)\n #else:\n function = torch.empty((batch, 0, max_track)).long().to(mix.device)\n\n for idx in range(self.func_res*num_2bar):\n if (idx < self.func_res) and (func_prompt is not None):\n start = torch.cat([start, self.func_embedding(function[:, idx-1: idx, :])], dim=1)\n function = torch.cat([function, func_prompt[:, idx: idx+1, :]], dim=1) \n continue\n else:\n func = torch.cat([start, self.func_embedding(function[:, idx-1: idx, :])], dim=1)\n\n func = func + self.prog_embedding(prog).unsqueeze(1)\n func = func + self.pe[:, :func.shape[1], :].unsqueeze(2)\n\n func = func + self.total_len_embedding(total_len).repeat_interleave(self.func_res, dim=1)[:, :func.shape[1]].unsqueeze(2)\n func = func + self.abs_pos_embedding(abs_pos).repeat_interleave(self.func_res, dim=1)[:, :func.shape[1]].unsqueeze(2)\n func = func + self.rel_pos_embedding(rel_pos).repeat_interleave(self.func_res, dim=1)[:, :func.shape[1]].unsqueeze(2)\n\n for layer in range(self.function_dec_layer):\n \n func = func.reshape(-1, max_track, self.d_model)\n func = self.mt_trf[f'track_layer_{layer}'](src=func)\n func = func.reshape(batch, -1, max_track, self.d_model).permute(0, 2, 1, 3).reshape(-1, idx+1, self.d_model)\n func = self.mt_trf[f'time_layer_{layer}'](tgt=func,\n tgt_mask=self.generate_square_subsequent_mask(sz=idx+1).to(func.device),\n memory=mix_) \n func = func.reshape(batch, max_track, -1, self.d_model).permute(0, 2, 1, 3)#(batch, num2bar-1, max_track, d_model)\n \n start = torch.cat([start, self.func_embedding(function[:, idx-1: idx, :])], dim=1)\n\n func_logits = self.func_out_linear(func[:, -1,]) / t\n filtered_func_logits = self.nucleus_filter(func_logits, p)\n func_probability = F.softmax(filtered_func_logits, dim=-1)\n func_pred = torch.multinomial(func_probability.reshape(-1, NUM_TIME_CODE), 1).reshape(func_probability.shape[:-1]).unsqueeze(1)\n\n function = torch.cat([function, func_pred], dim=1)\n if function.shape[1] == self.func_res*num_2bar:\n break\n \n\n \n function = function.reshape(batch, num_2bar, self.func_res, max_track).permute(0, 1, 3, 2)\n z_func = self.function_encoder.infer_by_codes(function)\n return self.QA_model.infer_with_function_codes(mix[0], prog[0].repeat(num_2bar, 1), z_func[0])\n \n def nucleus_filter(self, logits, p):\n #sorted_logits, sorted_indices = torch.sort(logits, descending=True)\n sorted_logits, sorted_indices = torch.sort(logits, dim=-1, descending=True)\n #cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)\n cum_sum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)\n\n # Remove tokens with cumulative probability above the threshold\n #sorted_indices_to_remove = cumulative_probs > p\n nucleus = cum_sum_probs < p\n # Shift the indices to the right to keep also the first token above the threshold\n #sorted_indices_to_remove = torch.cat([sorted_indices_to_remove.new_zeros(sorted_indices_to_remove.shape[:-1] + (1,)), sorted_indices_to_remove[..., :-1]], dim=-1)\n nucleus = torch.cat([nucleus.new_ones(nucleus.shape[:-1] + (1,)), nucleus[..., :-1]], dim=-1)\n nucleus = nucleus.gather(-1, sorted_indices.argsort(-1))\n\n logits[~nucleus] = float('-inf')\n return logits\n \n\n\n @classmethod\n def init_model(cls, pretrain_model_path=None, DEVICE='cuda:0'):\n \"\"\"Fast model initialization.\"\"\"\n vqQaA = Query_and_reArrange(name='pretrain', trf_layers=2, device=DEVICE)\n if pretrain_model_path is not None:\n vqQaA.load_state_dict(torch.load(pretrain_model_path, map_location=torch.device('cpu')))\n vqQaA.eval()\n model = cls(vqQaA.mixture_enc, vqQaA.function_enc, DEVICE=DEVICE).to(DEVICE)\n return model\n \n @classmethod\n def init_inference_model(cls, prior_model_path, QA_model_path, DEVICE='cuda:0'):\n \"\"\"Fast model initialization.\"\"\"\n vqQaA = Query_and_reArrange(name='pretrain', trf_layers=2, device=DEVICE)\n vqQaA.load_state_dict(torch.load(QA_model_path, map_location=torch.device('cpu')))\n vqQaA.eval()\n model = cls(inference=True, QA_model=vqQaA, DEVICE=DEVICE).to(DEVICE)\n model.load_state_dict(torch.load(prior_model_path), strict=False)\n return model" }, { "identifier": "VQ_LMD_Dataset", "path": "orchestrator/prior_dataset.py", "snippet": "class VQ_LMD_Dataset(Dataset):\n def __init__(self, lmd_dir, debug_mode=False, split='train', mode='train'):\n super(VQ_LMD_Dataset, self).__init__()\n self.lmd_dir = lmd_dir\n self.split = split\n self.mode = mode\n self.debug_mode = debug_mode\n self.mixture_list = []\n self.program_list = [] \n self.function_list = []\n self.anchor_list = []\n \n print('loading LMD Dataset ...')\n self.load_lmd()\n\n def __len__(self):\n return len(self.anchor_list)\n \n def __getitem__(self, idx):\n song_id, start, total_len = self.anchor_list[idx]\n mix = self.mixture_list[song_id][start: min(total_len, start+SAMPLE_LEN)]\n prog = self.program_list[song_id]\n function = self.function_list[song_id][start: min(total_len, start+SAMPLE_LEN)]\n return mix, prog, function, (start, total_len)\n\n\n\n def load_lmd(self):\n lmd_list = os.listdir(self.lmd_dir)\n if self.split == 'train':\n lmd_list = lmd_list[: int(len(lmd_list)*.95)]\n elif self.split == 'validation':\n lmd_list = lmd_list[int(len(lmd_list)*.95): ]\n if self.debug_mode:\n lmd_list = lmd_list[: 1000]\n for song in tqdm(lmd_list):\n lmd_data = np.load(os.path.join(self.lmd_dir, song))\n mix = lmd_data['mixture'] #(num2bar, 256) 3 for duration, velocity, and control\n prog = lmd_data['programs'] #(track)\n if len(prog) > 20:\n continue #for sake of computing memory\n function = lmd_data['func_time'] #(num2bar, track, 8)\n\n if self.split == 'train':\n for i in range(0, len(mix), HOP_LEN):\n if i + SAMPLE_LEN >= len(mix):\n break\n self.anchor_list.append((len(self.mixture_list), i, len(mix))) #(song_id, start, total_length)\n else:\n for i in range(0, len(mix), SAMPLE_LEN):\n if i + SAMPLE_LEN >= len(mix):\n break\n self.anchor_list.append((len(self.mixture_list), i, len(mix))) #(song_id, start, total_length)\n self.anchor_list.append((len(self.mixture_list), max(0, len(mix)-SAMPLE_LEN), len(mix)))\n\n self.mixture_list.append(mix)\n self.program_list.append(prog)\n self.function_list.append(function)" }, { "identifier": "collate_fn", "path": "orchestrator/prior_dataset.py", "snippet": "def collate_fn(batch, device):\n max_dur = max([len(item[0]) for item in batch])\n max_tracks = max([len(item[1]) for item in batch])\n\n mixture = []\n programs = []\n function = []\n time_mask = []\n track_mask = []\n total_length = []\n abs_pos = []\n rel_pos = []\n\n for mix, prog, func, (start, total_len) in batch:\n time_mask.append([0]*len(mix) + [1]*(max_dur-len(mix)))\n track_mask.append([0]*len(prog) + [1]*(max_tracks-len(prog)))\n \n r_pos = np.round(np.arange(start, start+len(mix), 1) / (total_len-1) * len(REL_POS_BIN))\n total_len = np.argmin(np.abs(TOTAL_LEN_BIN - total_len)).repeat(len(mix))\n if start <= ABS_POS_BIN[-2]:\n a_pos = np.append(ABS_POS_BIN[start: min(ABS_POS_BIN[-1], start+len(mix))], [ABS_POS_BIN[-1]] * (start+len(mix)-ABS_POS_BIN[-1]))\n else:\n a_pos = np.array([ABS_POS_BIN[-1]] * len(mix))\n\n a = np.random.rand()\n if a < 0.3:\n blur_ratio = 0\n elif a < 0.7:\n blur_ratio = (np.random.rand() * 2 + 1) / 4 #range in [.25, .75)\n else:\n blur_ratio = 1\n mix = (1 - blur_ratio) * mix + blur_ratio * np.random.normal(loc=0, scale=1, size=mix.shape)\n\n if len(prog) < max_tracks:\n prog = np.pad(prog, ((0, max_tracks-len(prog))), mode='constant', constant_values=(NUM_INSTR_CLASS,))\n func = np.pad(func, ((0, 0), (0, max_tracks-func.shape[1]), (0, 0)), mode='constant', constant_values=(NUM_TIME_CODE,))\n\n if len(mix) < max_dur:\n mix = np.pad(mix, ((0, max_dur-len(mix)), (0, 0)), mode='constant', constant_values=(0,))\n total_len = np.pad(total_len, (0, max_dur-len(total_len)), mode='constant', constant_values=(len(TOTAL_LEN_BIN),))\n a_pos = np.pad(a_pos, (0, max_dur-len(a_pos)), mode='constant', constant_values=(len(ABS_POS_BIN),))\n r_pos = np.pad(r_pos, (0, max_dur-len(r_pos)), mode='constant', constant_values=(len(REL_POS_BIN),))\n func = np.pad(func, ((0, max_dur-len(func)), (0, 0), (0, 0)), mode='constant', constant_values=(NUM_TIME_CODE,))\n \n mixture.append(mix)\n programs.append(prog)\n function.append(func)\n total_length.append(total_len)\n abs_pos.append(a_pos)\n rel_pos.append(r_pos)\n \n return torch.from_numpy(np.array(mixture)).float().to(device), \\\n torch.from_numpy(np.array(programs)).long().to(device), \\\n torch.from_numpy(np.array(function)).long().to(device), \\\n torch.BoolTensor(time_mask).to(device), \\\n torch.BoolTensor(track_mask).to(device), \\\n torch.from_numpy(np.array(total_length)).long().to(device), \\\n torch.from_numpy(np.array(abs_pos)).long().to(device), \\\n torch.from_numpy(np.array(rel_pos)).long().to(device)" }, { "identifier": "OptimizerSchedulerWithWarmUp", "path": "orchestrator/utils/scheduler.py", "snippet": "class OptimizerSchedulerWithWarmUp(_Scheduler):\n\n def __init__(self, optimizer, warmupscheduler, scheduler, clip, warmup_step=1000, step=0):\n # optimizer and scheduler are pytorch class\n super(OptimizerSchedulerWithWarmUp, self).__init__(step)\n self.optimizer = optimizer\n self.warmupscheduler = warmupscheduler\n self.scheduler = scheduler\n self.warmup_step = warmup_step\n self.clip = clip\n\n def optimizer_zero_grad(self):\n self.optimizer.zero_grad()\n\n def step(self, require_zero_grad=False):\n self.optimizer.step()\n if self.scheduler is not None:\n if self._step < self.warmup_step:\n self.warmupscheduler.step()\n else:\n self.scheduler.step()\n if require_zero_grad:\n self.optimizer_zero_grad()\n self._update_step()" }, { "identifier": "SummaryWriters", "path": "orchestrator/utils/training.py", "snippet": "class SummaryWriters:\n\n def __init__(self, writer_names, tags, log_path, tasks=('train', 'val')):\n # writer_names example: ['loss', 'kl_loss', 'recon_loss']\n # tags example: {'name1': None, 'name2': (0, 1)}\n self.log_path = log_path\n #assert 'loss' == writer_names[0]\n self.writer_names = writer_names\n self.tags = tags\n self._regularize_tags()\n\n writer_dic = {}\n for name in writer_names:\n writer_dic[name] = SummaryWriter(os.path.join(log_path, name))\n self.writers = writer_dic\n\n all_tags = {}\n for task in tasks:\n task_dic = {}\n for key, val in self.tags.items():\n task_dic['_'.join([task, key])] = val\n all_tags[task] = task_dic\n self.all_tags = all_tags\n\n def _init_summary_writer(self):\n tags = {'batch_train': (0, 1, 2, 3, 4)}\n self.summary_writers = SummaryWriters(self.writer_names, tags,\n self.writer_path)\n\n def _regularize_tags(self):\n for key, val in self.tags.items():\n if val is None:\n self.tags[key] = tuple(range(len(self.writer_names)))\n\n def single_write(self, name, tag, val, step):\n self.writers[name].add_scalar(tag, val, step)\n\n def write_tag(self, task, tag, vals, step):\n assert len(vals) == len(self.all_tags[task][tag])\n for name_id, val in zip(self.all_tags[task][tag], vals):\n name = self.writer_names[name_id]\n self.single_write(name, tag, val, step)\n\n def write_task(self, task, vals_dic, step):\n for tag, name_ids in self.all_tags[task].items():\n vals = [vals_dic[self.writer_names[i]] for i in name_ids]\n self.write_tag(task, tag, vals, step)" }, { "identifier": "LogPathManager", "path": "orchestrator/utils/training.py", "snippet": "class LogPathManager:\n\n def __init__(self, readme_fn=None, save_root='.', log_path_name='result',\n with_date=True, with_time=True,\n writer_folder='writers', model_folder='models'):\n date = str(datetime.date.today()) if with_date else ''\n ctime = datetime.datetime.now().time().strftime(\"%H%M%S\") \\\n if with_time else ''\n log_folder = '_'.join([date, ctime, log_path_name])\n log_path = os.path.join(save_root, log_folder)\n writer_path = os.path.join(log_path, writer_folder)\n model_path = os.path.join(log_path, model_folder)\n self.log_path = log_path\n self.writer_path = writer_path\n self.model_path = model_path\n LogPathManager.create_path(log_path)\n LogPathManager.create_path(writer_path)\n LogPathManager.create_path(model_path)\n if readme_fn is not None:\n shutil.copyfile(readme_fn, os.path.join(log_path, 'readme.txt'))\n\n @staticmethod\n def create_path(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\n def epoch_model_path(self, model_name):\n model_fn = join_fn(model_name, 'epoch', ext='pt')\n return os.path.join(self.model_path, model_fn)\n\n def valid_model_path(self, model_name):\n model_fn = join_fn(model_name, 'valid', ext='pt')\n return os.path.join(self.model_path, model_fn)\n\n def final_model_path(self, model_name):\n model_fn = join_fn(model_name, 'final', ext='pt')\n return os.path.join(self.model_path, model_fn)" }, { "identifier": "epoch_time", "path": "orchestrator/utils/training.py", "snippet": "def epoch_time(start_time, end_time):\n elapsed_time = end_time - start_time\n elapsed_mins = int(elapsed_time / 60)\n elapsed_secs = int(elapsed_time - (elapsed_mins * 60))\n return elapsed_mins, elapsed_secs" } ]
import os import time import torch import torch.multiprocessing as mp from torch import optim from orchestrator.prior_model import Prior from orchestrator.prior_dataset import VQ_LMD_Dataset, collate_fn from torch.utils.data import DataLoader from torch.optim.lr_scheduler import LinearLR, CosineAnnealingLR from orchestrator.utils.scheduler import OptimizerSchedulerWithWarmUp from orchestrator.utils.training import SummaryWriters, LogPathManager, epoch_time from tqdm import tqdm from torch.utils.data.distributed import DistributedSampler from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed import init_process_group, destroy_process_group
9,880
return loss_dic def write_loss_to_dic(writer_names, loss_items): loss_dic = {} assert len(writer_names) == len(loss_items) for key, val in zip(writer_names, loss_items): loss_dic[key] = val.item() return loss_dic def init_loss_dic(writer_names): loss_dic = {} for key in writer_names: loss_dic[key] = 0. return loss_dic def average_epoch_loss(epoch_loss_dict, num_batch): for key in epoch_loss_dict: epoch_loss_dict[key] /= num_batch return epoch_loss_dict def batch_report(loss, n_epoch, idx, num_batch, mode='training', verbose=False): if verbose: print(f'------------{mode}------------') print('Epoch: [{0}][{1}/{2}]'.format(n_epoch, idx, num_batch)) print(f"\t time func loss: {loss['func_l']:.3f}") def scheduler_show(optimizer_scheduler, verbose=False): schedule_params = {} schedule_params['lr'] = optimizer_scheduler.optimizer.param_groups[0]['lr'] if verbose: print(schedule_params) return schedule_params def train(model, dataloader, optimizer_scheduler, writer_names, loss_writers, scheduler_writers, n_epoch, VERBOSE): model.train() epoch_loss_dic = init_loss_dic(writer_names) for idx, batch in tqdm(enumerate(dataloader), total=len(dataloader)): try: optimizer_scheduler.optimizer_zero_grad() loss = model('loss', *batch) loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), optimizer_scheduler.clip) optimizer_scheduler.step() epoch_loss_dic = accumulate_loss_dic(writer_names, epoch_loss_dic, [loss]) batch_loss_dic = write_loss_to_dic(writer_names, [loss]) train_step = n_epoch * len(dataloader) + idx if loss_writers is not None: loss_writers.write_task('train', batch_loss_dic, train_step) batch_report(batch_loss_dic, n_epoch, idx, len(dataloader), mode='train', verbose=VERBOSE) scheduler_dic = scheduler_show(optimizer_scheduler, verbose=VERBOSE) if scheduler_writers is not None: scheduler_writers.write_task('train', scheduler_dic, train_step) except Exception as exc: print(exc) print(batch[0].shape, batch[1].shape) continue scheduler_show(optimizer_scheduler, verbose=True) epoch_loss_dic = average_epoch_loss(epoch_loss_dic, len(dataloader)) return epoch_loss_dic def val(model, dataloader, writer_names, summary_writers, n_epoch, VERBOSE): model.eval() epoch_loss_dic = init_loss_dic(writer_names) for idx, batch in tqdm(enumerate(dataloader), total=len(dataloader)): try: with torch.no_grad(): loss = model('loss', *batch)#, **input_params) epoch_loss_dic = accumulate_loss_dic(writer_names, epoch_loss_dic, [loss]) batch_loss_dic = write_loss_to_dic(writer_names, [loss]) if summary_writers is not None: batch_report(batch_loss_dic, n_epoch, idx, len(dataloader), mode='validation', verbose=VERBOSE) except Exception as exc: print(exc) print(batch[0].shape, batch[1].shape) continue epoch_loss_dic = average_epoch_loss(epoch_loss_dic, len(dataloader)) if summary_writers is not None: summary_writers.write_task('val', epoch_loss_dic, n_epoch) return epoch_loss_dic def epoch_report(start_time, end_time, train_loss, valid_loss, n_epoch): epoch_mins, epoch_secs = epoch_time(start_time, end_time) print(f'Epoch: {n_epoch + 1:02} | ' f'Time: {epoch_mins}m {epoch_secs}s', flush=True) print(f'\tTrain Loss: {train_loss:.3f}', flush=True) print(f'\t Valid. Loss: {valid_loss:.3f}', flush=True) if __name__ == '__main__': os.environ['CUDA_VISIBLE_DEVICES']= '0, 1' os.environ['CUDA_LAUNCH_BLOCKING'] = '1' MODEL_NAME = 'Prior-Model-VQ-Q&A-T large' DEBUG = 0 if DEBUG: save_root = 'AccoMontage3/prior_model_VQ-Q&A-T/save' log_path_name = 'debug' else: save_root = '/data1/zhaojw/AccoMontage3/' log_path_name = MODEL_NAME readme_fn = 'AccoMontage3/prior_model_VQ-Q&A-T/train_DDP.py'
def ddp_setup(rank, world_size): """ Args: rank: Unique identifier of each process world_size: Total number of processes """ os.environ["MASTER_ADDR"] = "localhost" os.environ["MASTER_PORT"] = "12355" init_process_group(backend="nccl", rank=rank, world_size=world_size) def main(rank, world_size, log_path_mng, VERBOSE, MODEL_NAME): #print('rank:', rank) ddp_setup(rank, world_size) PRETRAIN_PATH = "/data1/zhaojw/AccoMontage3/2023-12-07_134449_VQ-Q&A-T/models/VQ-Q&A-T_009_epoch.pt" BATCH_SIZE = 8 N_EPOCH = 10 CLIP = 1 LR = 1e-4 WARMUP_STEP = 1000 if VERBOSE: N_EPOCH=5 LR = 1e-3 WARMUP_STEP=10 model = Prior.init_model(pretrain_model_path=PRETRAIN_PATH, DEVICE=rank) model = DDP(model, device_ids=[rank], find_unused_parameters=False) lmd_dir = "/data1/zhaojw/LMD/VQ-Q&A-T-009-reorder/" train_set = VQ_LMD_Dataset(lmd_dir, debug_mode=VERBOSE, split='train', mode='train') train_loader = DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=False, collate_fn=lambda b: collate_fn(b, rank), sampler=DistributedSampler(train_set)) val_set = VQ_LMD_Dataset(lmd_dir, debug_mode=VERBOSE, split='validation', mode='train') val_loader = DataLoader(val_set, batch_size=BATCH_SIZE, shuffle=False, collate_fn=lambda b: collate_fn(b, rank), sampler=DistributedSampler(val_set)) print(f'Dataset loaded. {len(train_loader)} samples for train and {len(val_loader)} samples for validation.') #optimizer = optim.Adam(model.parameters(), lr=LR) optimizer = optim.AdamW(model.parameters(), lr=LR, betas=[0.9, 0.999], weight_decay=1e-2) warmup_scheduler = LinearLR(optimizer, start_factor=1e-14, end_factor=1, total_iters=WARMUP_STEP) scheduler = CosineAnnealingLR(optimizer, T_max=len(train_loader)*N_EPOCH-WARMUP_STEP, eta_min=1e-6) #scheduler = MinExponentialLR(optimizer, gamma=0.99998, minimum=1e-5) #optimizer_scheduler = OptimizerScheduler(optimizer, scheduler, CLIP) optimizer_scheduler = OptimizerSchedulerWithWarmUp(optimizer, warmup_scheduler, scheduler, CLIP, WARMUP_STEP) writer_names = ['func_l'] scheduler_writer_names = ['lr'] if rank == 0: tags = {'loss': None} loss_writers = SummaryWriters(writer_names, tags, log_path_mng.writer_path) tags = {'scheduler': None} scheduler_writers = SummaryWriters(scheduler_writer_names, tags, log_path_mng.writer_path) else: loss_writers = None scheduler_writers = None VERBOSE = False for n_epoch in range(N_EPOCH): start_time = time.time() train_loader.sampler.set_epoch(n_epoch) print(f'Training epoch {n_epoch}') train_loss = train(model, train_loader, optimizer_scheduler, writer_names, loss_writers, scheduler_writers, n_epoch=n_epoch, VERBOSE=VERBOSE)['func_l'] print(f'Validating epoch {n_epoch}') val_loss = val(model, val_loader, writer_names, loss_writers, n_epoch=n_epoch, VERBOSE=VERBOSE)['func_l'] end_time = time.time() if rank == 0: torch.save(model.module.state_dict(), log_path_mng.epoch_model_path(f'{MODEL_NAME}_{str(n_epoch).zfill(3)}')) epoch_report(start_time, end_time, train_loss, val_loss, n_epoch) destroy_process_group() def accumulate_loss_dic(writer_names, loss_dic, loss_items): assert len(writer_names) == len(loss_items) for key, val in zip(writer_names, loss_items): loss_dic[key] += val.item() return loss_dic def write_loss_to_dic(writer_names, loss_items): loss_dic = {} assert len(writer_names) == len(loss_items) for key, val in zip(writer_names, loss_items): loss_dic[key] = val.item() return loss_dic def init_loss_dic(writer_names): loss_dic = {} for key in writer_names: loss_dic[key] = 0. return loss_dic def average_epoch_loss(epoch_loss_dict, num_batch): for key in epoch_loss_dict: epoch_loss_dict[key] /= num_batch return epoch_loss_dict def batch_report(loss, n_epoch, idx, num_batch, mode='training', verbose=False): if verbose: print(f'------------{mode}------------') print('Epoch: [{0}][{1}/{2}]'.format(n_epoch, idx, num_batch)) print(f"\t time func loss: {loss['func_l']:.3f}") def scheduler_show(optimizer_scheduler, verbose=False): schedule_params = {} schedule_params['lr'] = optimizer_scheduler.optimizer.param_groups[0]['lr'] if verbose: print(schedule_params) return schedule_params def train(model, dataloader, optimizer_scheduler, writer_names, loss_writers, scheduler_writers, n_epoch, VERBOSE): model.train() epoch_loss_dic = init_loss_dic(writer_names) for idx, batch in tqdm(enumerate(dataloader), total=len(dataloader)): try: optimizer_scheduler.optimizer_zero_grad() loss = model('loss', *batch) loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), optimizer_scheduler.clip) optimizer_scheduler.step() epoch_loss_dic = accumulate_loss_dic(writer_names, epoch_loss_dic, [loss]) batch_loss_dic = write_loss_to_dic(writer_names, [loss]) train_step = n_epoch * len(dataloader) + idx if loss_writers is not None: loss_writers.write_task('train', batch_loss_dic, train_step) batch_report(batch_loss_dic, n_epoch, idx, len(dataloader), mode='train', verbose=VERBOSE) scheduler_dic = scheduler_show(optimizer_scheduler, verbose=VERBOSE) if scheduler_writers is not None: scheduler_writers.write_task('train', scheduler_dic, train_step) except Exception as exc: print(exc) print(batch[0].shape, batch[1].shape) continue scheduler_show(optimizer_scheduler, verbose=True) epoch_loss_dic = average_epoch_loss(epoch_loss_dic, len(dataloader)) return epoch_loss_dic def val(model, dataloader, writer_names, summary_writers, n_epoch, VERBOSE): model.eval() epoch_loss_dic = init_loss_dic(writer_names) for idx, batch in tqdm(enumerate(dataloader), total=len(dataloader)): try: with torch.no_grad(): loss = model('loss', *batch)#, **input_params) epoch_loss_dic = accumulate_loss_dic(writer_names, epoch_loss_dic, [loss]) batch_loss_dic = write_loss_to_dic(writer_names, [loss]) if summary_writers is not None: batch_report(batch_loss_dic, n_epoch, idx, len(dataloader), mode='validation', verbose=VERBOSE) except Exception as exc: print(exc) print(batch[0].shape, batch[1].shape) continue epoch_loss_dic = average_epoch_loss(epoch_loss_dic, len(dataloader)) if summary_writers is not None: summary_writers.write_task('val', epoch_loss_dic, n_epoch) return epoch_loss_dic def epoch_report(start_time, end_time, train_loss, valid_loss, n_epoch): epoch_mins, epoch_secs = epoch_time(start_time, end_time) print(f'Epoch: {n_epoch + 1:02} | ' f'Time: {epoch_mins}m {epoch_secs}s', flush=True) print(f'\tTrain Loss: {train_loss:.3f}', flush=True) print(f'\t Valid. Loss: {valid_loss:.3f}', flush=True) if __name__ == '__main__': os.environ['CUDA_VISIBLE_DEVICES']= '0, 1' os.environ['CUDA_LAUNCH_BLOCKING'] = '1' MODEL_NAME = 'Prior-Model-VQ-Q&A-T large' DEBUG = 0 if DEBUG: save_root = 'AccoMontage3/prior_model_VQ-Q&A-T/save' log_path_name = 'debug' else: save_root = '/data1/zhaojw/AccoMontage3/' log_path_name = MODEL_NAME readme_fn = 'AccoMontage3/prior_model_VQ-Q&A-T/train_DDP.py'
log_path_mng = LogPathManager(readme_fn, save_root=save_root, log_path_name=log_path_name)
5
2023-10-23 12:36:57+00:00
12k
bytedance/ColTrack
motlib/mot_dataset/transform/mot_video/mosaic.py
[ { "identifier": "adjust_box_anns", "path": "motlib/mot_dataset/transform/yolox/utils.py", "snippet": "def adjust_box_anns(bbox, scale_ratio, padw, padh, w_max, h_max):\n #bbox[:, 0::2] = np.clip(bbox[:, 0::2] * scale_ratio + padw, 0, w_max)\n #bbox[:, 1::2] = np.clip(bbox[:, 1::2] * scale_ratio + padh, 0, h_max)\n bbox[:, 0::2] = bbox[:, 0::2] * scale_ratio + padw\n bbox[:, 1::2] = bbox[:, 1::2] * scale_ratio + padh\n return bbox" }, { "identifier": "box_candidates", "path": "motlib/mot_dataset/transform/yolox/data_augment.py", "snippet": "def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.2):\n # box1(4,n), box2(4,n)\n # Compute candidate boxes which include follwing 5 things:\n # box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio\n w1, h1 = box1[2] - box1[0], box1[3] - box1[1]\n w2, h2 = box2[2] - box2[0], box2[3] - box2[1]\n ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16)) # aspect ratio\n return (\n (w2 > wh_thr)\n & (h2 > wh_thr)\n & (w2 * h2 / (w1 * h1 + 1e-16) > area_thr)\n & (ar < ar_thr)\n ) # candidates" }, { "identifier": "augment_hsv", "path": "motlib/mot_dataset/transform/yolox/data_augment.py", "snippet": "def augment_hsv(img, hgain=0.015, sgain=0.7, vgain=0.4):\n r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains\n hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))\n dtype = img.dtype # uint8\n\n x = np.arange(0, 256, dtype=np.int16)\n lut_hue = ((x * r[0]) % 180).astype(dtype)\n lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)\n lut_val = np.clip(x * r[2], 0, 255).astype(dtype)\n\n img_hsv = cv2.merge(\n (cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))\n ).astype(dtype)\n cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed" }, { "identifier": "Dataset", "path": "motlib/mot_dataset/transform/yolox/dataset.py", "snippet": "class Dataset(MOT2CoCoDataset):\n def __init__(self, args, train_or_test, transforms=None) -> None:\n super().__init__(args, train_or_test, transforms)\n\n self.__input_dim = args.img_size[:2]\n self.enable_mosaic = args.mosaic\n \n @property\n def input_dim(self):\n \"\"\"\n Dimension that can be used by transforms to set the correct image size, etc.\n This allows transforms to have a single source of truth\n for the input dimension of the network.\n\n Return:\n list: Tuple containing the current width,height\n \"\"\"\n if hasattr(self, \"_input_dim\"):\n return self._input_dim\n return self.__input_dim\n \n @staticmethod\n def resize_getitem(getitem_fn):\n \"\"\"\n Decorator method that needs to be used around the ``__getitem__`` method. |br|\n This decorator enables the on the fly resizing of\n the ``input_dim`` with our :class:`~lightnet.data.DataLoader` class.\n\n Example:\n >>> class CustomSet(ln.data.Dataset):\n ... def __len__(self):\n ... return 10\n ... @ln.data.Dataset.resize_getitem\n ... def __getitem__(self, index):\n ... # Should return (image, anno) but here we return input_dim\n ... return self.input_dim\n >>> data = CustomSet((200,200))\n >>> data[0]\n (200, 200)\n >>> data[(480,320), 0]\n (480, 320)\n \"\"\"\n\n @wraps(getitem_fn)\n def wrapper(self, index):\n if not isinstance(index, int):\n has_dim = True\n self._input_dim = index[0]\n self.enable_mosaic = index[2]\n index = index[1]\n else:\n has_dim = False\n\n ret_val = getitem_fn(self, index)\n\n if has_dim:\n del self._input_dim\n\n return ret_val\n\n return wrapper" }, { "identifier": "MosaicDetection", "path": "motlib/mot_dataset/transform/yolox/mosaic.py", "snippet": "class MosaicDetection(Dataset):\n \"\"\"Detection dataset wrapper that performs mixup for normal dataset.\"\"\"\n\n def __init__(\n self, dataset, img_size, mosaic=True, \n degrees=10.0, translate=0.1, scale=(0.5, 1.5), mscale=(0.5, 1.5),\n shear=2.0, perspective=0.0, enable_mixup=True, args=None, train_or_test=\"train\", transforms=None\n ):\n \"\"\"\n\n Args:\n dataset(Dataset) : Pytorch dataset object.\n img_size (tuple):\n mosaic (bool): enable mosaic augmentation or not.\n preproc (func):\n degrees (float):\n translate (float):\n scale (tuple):\n mscale (tuple):\n shear (float):\n perspective (float):\n enable_mixup (bool):\n *args(tuple) : Additional arguments for mixup random sampler.\n \"\"\"\n super().__init__(args, train_or_test, transforms)\n self._dataset = dataset\n self.degrees = degrees\n self.translate = translate\n self.scale = scale\n self.shear = shear\n self.perspective = perspective\n self.mixup_scale = mscale\n self.enable_mosaic = mosaic\n self.enable_mixup = enable_mixup\n\n # self.randomerasing = RandomErasing(p=args.p_era, p_img=args.p_img, p_box=args.p_box, scale=args.p_scale, value=114)\n\n def __len__(self):\n return len(self._dataset)\n\n @Dataset.resize_getitem\n def __getitem__(self, idx):\n if self.enable_mosaic:\n mosaic_labels = []\n input_dim = self._dataset.input_dim\n input_h, input_w = input_dim[0], input_dim[1]\n\n # yc, xc = s, s # mosaic center x, y\n yc = int(random.uniform(0.5 * input_h, 1.5 * input_h))\n xc = int(random.uniform(0.5 * input_w, 1.5 * input_w))\n\n # 3 additional image indices\n indices = [idx] + [random.randint(0, len(self._dataset) - 1) for _ in range(3)]\n\n for i_mosaic, index in enumerate(indices):\n img, _labels, _, _ = self._dataset.pull_item(index)\n h0, w0 = img.shape[:2] # orig hw\n scale = min(1. * input_h / h0, 1. * input_w / w0)\n img = cv2.resize(\n img, (int(w0 * scale), int(h0 * scale)), interpolation=cv2.INTER_LINEAR\n )\n # generate output mosaic image\n (h, w, c) = img.shape[:3]\n if i_mosaic == 0:\n mosaic_img = np.full((input_h * 2, input_w * 2, c), 114, dtype=np.uint8)\n\n # suffix l means large image, while s means small image in mosaic aug.\n (l_x1, l_y1, l_x2, l_y2), (s_x1, s_y1, s_x2, s_y2) = get_mosaic_coordinate(\n mosaic_img, i_mosaic, xc, yc, w, h, input_h, input_w\n )\n\n mosaic_img[l_y1:l_y2, l_x1:l_x2] = img[s_y1:s_y2, s_x1:s_x2]\n padw, padh = l_x1 - s_x1, l_y1 - s_y1\n\n labels = _labels.copy()\n # Normalized xywh to pixel xyxy format\n if _labels.size > 0:\n labels[:, 0] = scale * _labels[:, 0] + padw\n labels[:, 1] = scale * _labels[:, 1] + padh\n labels[:, 2] = scale * _labels[:, 2] + padw\n labels[:, 3] = scale * _labels[:, 3] + padh\n mosaic_labels.append(labels)\n\n if len(mosaic_labels):\n mosaic_labels = np.concatenate(mosaic_labels, 0)\n '''\n np.clip(mosaic_labels[:, 0], 0, 2 * input_w, out=mosaic_labels[:, 0])\n np.clip(mosaic_labels[:, 1], 0, 2 * input_h, out=mosaic_labels[:, 1])\n np.clip(mosaic_labels[:, 2], 0, 2 * input_w, out=mosaic_labels[:, 2])\n np.clip(mosaic_labels[:, 3], 0, 2 * input_h, out=mosaic_labels[:, 3])\n '''\n \n mosaic_labels = mosaic_labels[mosaic_labels[:, 0] < 2 * input_w]\n mosaic_labels = mosaic_labels[mosaic_labels[:, 2] > 0]\n mosaic_labels = mosaic_labels[mosaic_labels[:, 1] < 2 * input_h]\n mosaic_labels = mosaic_labels[mosaic_labels[:, 3] > 0]\n \n #augment_hsv(mosaic_img)\n zero_box_flag = len(mosaic_labels) == 0\n\n for _ in range(100):\n mosaic_img_tmp, mosaic_labels_tmp = random_perspective(\n mosaic_img,\n mosaic_labels,\n degrees=self.degrees,\n translate=self.translate,\n scale=self.scale,\n shear=self.shear,\n perspective=self.perspective,\n border=[-input_h // 2, -input_w // 2],\n ) # border to remove\n if zero_box_flag or len(mosaic_labels_tmp) > 0:\n mosaic_img, mosaic_labels = mosaic_img_tmp, mosaic_labels_tmp\n break\n\n # -----------------------------------------------------------------\n # CopyPaste: https://arxiv.org/abs/2012.07177\n # -----------------------------------------------------------------\n if self.enable_mixup and not len(mosaic_labels) == 0:\n mosaic_img, mosaic_labels = self.mixup(mosaic_img, mosaic_labels, self.input_dim)\n \n # mosaic_img = self.randomerasing(mosaic_img, mosaic_labels[:, :4].copy())\n \n mix_img, padded_labels = self._transforms(mosaic_img, mosaic_labels, self.input_dim)\n img_info = (mix_img.shape[1], mix_img.shape[0])\n\n return mix_img, padded_labels, img_info, np.array([idx])\n\n else:\n self._dataset._input_dim = self.input_dim\n img, label, img_info, id_ = self._dataset.pull_item(idx)\n img, label = self._transforms(img, label, self.input_dim)\n return img, label, img_info, id_\n\n def mixup(self, origin_img, origin_labels, input_dim):\n jit_factor = random.uniform(*self.mixup_scale)\n FLIP = random.uniform(0, 1) > 0.5\n cp_labels = []\n while len(cp_labels) == 0:\n cp_index = random.randint(0, self.__len__() - 1)\n cp_labels = self._dataset.load_anno(cp_index)\n img, cp_labels, _, _ = self._dataset.pull_item(cp_index)\n\n if len(img.shape) == 3:\n cp_img = np.ones((input_dim[0], input_dim[1], 3)) * 114.0\n else:\n cp_img = np.ones(input_dim) * 114.0\n cp_scale_ratio = min(input_dim[0] / img.shape[0], input_dim[1] / img.shape[1])\n resized_img = cv2.resize(\n img,\n (int(img.shape[1] * cp_scale_ratio), int(img.shape[0] * cp_scale_ratio)),\n interpolation=cv2.INTER_LINEAR,\n ).astype(np.float32)\n cp_img[\n : int(img.shape[0] * cp_scale_ratio), : int(img.shape[1] * cp_scale_ratio)\n ] = resized_img\n cp_img = cv2.resize(\n cp_img,\n (int(cp_img.shape[1] * jit_factor), int(cp_img.shape[0] * jit_factor)),\n )\n cp_scale_ratio *= jit_factor\n if FLIP:\n cp_img = cp_img[:, ::-1, :]\n\n origin_h, origin_w = cp_img.shape[:2]\n target_h, target_w = origin_img.shape[:2]\n padded_img = np.zeros(\n (max(origin_h, target_h), max(origin_w, target_w), 3)\n ).astype(np.uint8)\n padded_img[:origin_h, :origin_w] = cp_img\n\n x_offset, y_offset = 0, 0\n if padded_img.shape[0] > target_h:\n y_offset = random.randint(0, padded_img.shape[0] - target_h - 1)\n if padded_img.shape[1] > target_w:\n x_offset = random.randint(0, padded_img.shape[1] - target_w - 1)\n padded_cropped_img = padded_img[\n y_offset: y_offset + target_h, x_offset: x_offset + target_w\n ]\n\n cp_bboxes_origin_np = adjust_box_anns(\n cp_labels[:, :4].copy(), cp_scale_ratio, 0, 0, origin_w, origin_h\n )\n if FLIP:\n cp_bboxes_origin_np[:, 0::2] = (\n origin_w - cp_bboxes_origin_np[:, 0::2][:, ::-1]\n )\n cp_bboxes_transformed_np = cp_bboxes_origin_np.copy()\n '''\n cp_bboxes_transformed_np[:, 0::2] = np.clip(\n cp_bboxes_transformed_np[:, 0::2] - x_offset, 0, target_w\n )\n cp_bboxes_transformed_np[:, 1::2] = np.clip(\n cp_bboxes_transformed_np[:, 1::2] - y_offset, 0, target_h\n )\n '''\n cp_bboxes_transformed_np[:, 0::2] = cp_bboxes_transformed_np[:, 0::2] - x_offset\n cp_bboxes_transformed_np[:, 1::2] = cp_bboxes_transformed_np[:, 1::2] - y_offset\n keep_list = box_candidates(cp_bboxes_origin_np.T, cp_bboxes_transformed_np.T, 5)\n\n if keep_list.sum() >= 1.0:\n cls_labels = cp_labels[keep_list, 4:5].copy()\n id_labels = cp_labels[keep_list, 5:6].copy()\n box_labels = cp_bboxes_transformed_np[keep_list]\n labels = np.hstack((box_labels, cls_labels, id_labels))\n # remove outside bbox\n labels = labels[labels[:, 0] < target_w]\n labels = labels[labels[:, 2] > 0]\n labels = labels[labels[:, 1] < target_h]\n labels = labels[labels[:, 3] > 0]\n origin_labels = np.vstack((origin_labels, labels))\n origin_img = origin_img.astype(np.float32)\n origin_img = 0.5 * origin_img + 0.5 * padded_cropped_img.astype(np.float32)\n\n return origin_img, origin_labels" }, { "identifier": "get_mosaic_coordinate", "path": "motlib/mot_dataset/transform/yolox/mosaic.py", "snippet": "def get_mosaic_coordinate(mosaic_image, mosaic_index, xc, yc, w, h, input_h, input_w):\n # TODO update doc\n # index0 to top left part of image\n if mosaic_index == 0:\n x1, y1, x2, y2 = max(xc - w, 0), max(yc - h, 0), xc, yc\n small_coord = w - (x2 - x1), h - (y2 - y1), w, h\n # index1 to top right part of image\n elif mosaic_index == 1:\n x1, y1, x2, y2 = xc, max(yc - h, 0), min(xc + w, input_w * 2), yc\n small_coord = 0, h - (y2 - y1), min(w, x2 - x1), h\n # index2 to bottom left part of image\n elif mosaic_index == 2:\n x1, y1, x2, y2 = max(xc - w, 0), yc, xc, min(input_h * 2, yc + h)\n small_coord = w - (x2 - x1), 0, w, min(y2 - y1, h)\n # index2 to bottom right part of image\n elif mosaic_index == 3:\n x1, y1, x2, y2 = xc, yc, min(xc + w, input_w * 2), min(input_h * 2, yc + h) # noqa\n small_coord = 0, 0, min(w, x2 - x1), min(y2 - y1, h)\n return (x1, y1, x2, y2), small_coord" }, { "identifier": "random_perspective", "path": "motlib/mot_dataset/transform/mot_video/data_augment.py", "snippet": "def random_perspective(\n imgs,\n targets_all=(),\n degrees=10,\n translate=0.1,\n scale=0.1,\n shear=10,\n perspective=0.0,\n border=(0, 0),\n):\n img = imgs[0]\n # targets = [cls, xyxy]\n height = img.shape[0] + border[0] * 2 # shape(h,w,c)\n width = img.shape[1] + border[1] * 2\n\n # Center\n C = np.eye(3)\n C[0, 2] = -img.shape[1] / 2 # x translation (pixels)\n C[1, 2] = -img.shape[0] / 2 # y translation (pixels)\n\n # Rotation and Scale\n R = np.eye(3)\n a = random.uniform(-degrees, degrees)\n # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations\n s = random.uniform(scale[0], scale[1])\n # s = 2 ** random.uniform(-scale, scale)\n R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)\n\n # Shear\n S = np.eye(3)\n S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)\n S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)\n\n # Translation\n T = np.eye(3)\n T[0, 2] = (\n random.uniform(0.5 - translate, 0.5 + translate) * width\n ) # x translation (pixels)\n T[1, 2] = (\n random.uniform(0.5 - translate, 0.5 + translate) * height\n ) # y translation (pixels)\n\n # Combined rotation matrix\n M = T @ S @ R @ C # order of operations (right to left) is IMPORTANT\n\n ###########################\n # For Aug out of Mosaic\n # s = 1.\n # M = np.eye(3)\n ###########################\n\n if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed\n new_imgs = []\n for img in imgs:\n if perspective:\n img = cv2.warpPerspective(\n img, M, dsize=(width, height), borderValue=(114, 114, 114)\n )\n else: # affine\n img = cv2.warpAffine(\n img, M[:2], dsize=(width, height), borderValue=(114, 114, 114)\n )\n new_imgs.append(img)\n imgs = new_imgs\n\n # Transform label coordinates\n new_targets_all = []\n for targets in targets_all:\n n = len(targets)\n if n:\n # warp points\n xy = np.ones((n * 4, 3))\n xy[:, :2] = targets[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(\n n * 4, 2\n ) # x1y1, x2y2, x1y2, x2y1\n xy = xy @ M.T # transform\n if perspective:\n xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale\n else: # affine\n xy = xy[:, :2].reshape(n, 8)\n\n # create new boxes\n x = xy[:, [0, 2, 4, 6]]\n y = xy[:, [1, 3, 5, 7]]\n xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T\n\n # clip boxes\n #xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)\n #xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)\n\n # filter candidates\n i = box_candidates(box1=targets[:, :4].T * s, box2=xy.T)\n targets = targets[i]\n targets[:, :4] = xy[i]\n \n targets = targets[targets[:, 0] < width]\n targets = targets[targets[:, 2] > 0]\n targets = targets[targets[:, 1] < height]\n targets = targets[targets[:, 3] > 0]\n new_targets_all.append(targets)\n targets_all = new_targets_all\n return imgs, targets_all" }, { "identifier": "RandomErasing", "path": "motlib/mot_dataset/transform/mot_video/data_augment.py", "snippet": "class RandomErasing(object):\n def __init__(self, p=0.5, area_keep=0.7, scale=(0.02, 0.33), ratio=(0.3, 3.3), value=114, inplace=True):\n super().__init__()\n logger = logging.getLogger(__name__)\n if not isinstance(value, (numbers.Number, str, tuple, list)):\n raise TypeError(\"Argument value should be either a number or str or a sequence\")\n if isinstance(value, str) and value != \"random\":\n raise ValueError(\"If value is str, it should be 'random'\")\n if not isinstance(scale, (tuple, list)):\n raise TypeError(\"Scale should be a sequence\")\n if not isinstance(ratio, (tuple, list)):\n raise TypeError(\"Ratio should be a sequence\")\n if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):\n logger.warning(\"Scale and ratio should be of kind (min, max)\")\n if scale[0] < 0 or scale[1] > 1:\n raise ValueError(\"Scale should be between 0 and 1\")\n if p < 0 or p > 1:\n raise ValueError(\"Random erasing probability should be between 0 and 1\")\n\n self.p = p\n self.area_keep = area_keep\n self.scale = scale\n self.ratio = ratio\n self.value = value\n self.inplace = inplace\n\n @staticmethod\n def get_params(img, scale, ratio, value) :\n \"\"\"Get parameters for ``erase`` for a random erasing.\n\n Args:\n img (Tensor): Tensor image to be erased.\n scale (sequence): range of proportion of erased area against input image.\n ratio (sequence): range of aspect ratio of erased area.\n value (list, optional): erasing value. If None, it is interpreted as \"random\"\n (erasing each pixel with random values). If ``len(value)`` is 1, it is interpreted as a number,\n i.e. ``value[0]``.\n\n Returns:\n tuple: params (i, j, h, w, v) to be passed to ``erase`` for random erasing.\n \"\"\"\n img_h, img_w, img_c = img.shape\n area = img_h * img_w\n\n log_ratio = torch.log(torch.tensor(ratio))\n for _ in range(10):\n erase_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item()\n aspect_ratio = torch.exp(torch.empty(1).uniform_(log_ratio[0], log_ratio[1])).item()\n\n h = int(round(math.sqrt(erase_area * aspect_ratio)))\n w = int(round(math.sqrt(erase_area / aspect_ratio)))\n if not (h < img_h and w < img_w):\n continue\n\n if value is None:\n v = torch.empty([h, w, img_c], dtype=torch.float32).normal_()\n else:\n v = torch.tensor(value)[None, None, :]\n\n i = torch.randint(0, img_h + 1, size=(1,)).item() - h // 2\n j = torch.randint(0, img_w + 1, size=(1,)).item() - w // 2\n return i, j, h, w, v\n\n # Return original image\n return 0, 0, img_h, img_w, None\n \n @staticmethod\n def erase(img, y, x, h, w, v):\n img[y:y+h,x:x+w] = v\n \n @staticmethod\n def box_area(boxes):\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])\n\n def box_overlap_select(self, boxes1, boxes2, img_h, img_w):\n if boxes1.shape[0] == 0 or boxes2.shape[0] == 0:\n return boxes1\n boxes1[0::2] = np.clip(boxes1[0::2], a_min=0, a_max=img_w)\n boxes1[1::2] = np.clip(boxes1[1::2], a_min=0, a_max=img_h)\n area2 = self.box_area(boxes2)\n\n lt = np.maximum(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]\n rb = np.minimum(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]\n\n wh = (rb - lt) # [N,M,2]\n wh[wh<0] = 0\n inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]\n overlap = inter / (area2[None, :] + 1e-6)\n assert overlap.shape[0] == 1\n select_idx = overlap.reshape(-1) < self.area_keep\n return select_idx\n \n def erase_img(self, img, targets_raw, targets, value):\n img_h, img_w, img_c = img.shape\n for _ in range(100):\n y, x, h, w, v = self.get_params(img, self.scale, self.ratio, value)\n if v is None:\n continue\n box1 = np.array([x, y, x+w, y+h]).reshape(1, 4)\n select_idx = self.box_overlap_select(box1, targets, img_h=img_h, img_w=img_w)\n targets_select = targets_raw[select_idx]\n if targets_select.shape[0] > 0:\n for x1, y1, x2, y2 in box1:\n self.erase(img, y1, x1, y2-y1, x2-x1, value)\n return img, targets_select\n return img, targets_raw\n \n def core(self, img_all, label_raw_all, value):\n img_res, label_res = [], []\n for img, label_raw in zip(img_all, label_raw_all):\n if torch.rand(1) < self.p:\n label = deepcopy(label_raw)\n label = label.astype(np.int32)[:, :4]\n img_h, img_w, img_c = img.shape\n label[0::2] = np.clip(label[0::2], a_min=0, a_max=img_w)\n label[1::2] = np.clip(label[1::2], a_min=0, a_max=img_h)\n img, label_selected = self.erase_img(img, deepcopy(label_raw), label, value)\n label_res.append(label_selected)\n else:\n label_res.append(label_raw)\n img_res.append(img)\n return img_res, label_res\n\n \n def __call__(self, img, label):\n if isinstance(self.value, (int, float)):\n value = [self.value]\n elif isinstance(self.value, str):\n value = None\n elif isinstance(self.value, tuple):\n value = list(self.value)\n else:\n value = self.value\n\n if value is not None and not (len(value) in (1, img[0].shape[-3])):\n raise ValueError(\n \"If value is a sequence, it should have either a single value or \"\n f\"{img[0].shape[-3]} (number of input channels)\"\n )\n if self.p > 0:\n img, label = self.core(img, label, value)\n\n return img, label\n\n def __repr__(self) -> str:\n s = (\n f\"{self.__class__.__name__}\"\n f\"(p={self.p}, \"\n f\"scale={self.scale}, \"\n f\"ratio={self.ratio}, \"\n f\"value={self.value}, \"\n f\"inplace={self.inplace})\"\n )\n return s" } ]
import cv2 import numpy as np import random from motlib.mot_dataset.transform.yolox.utils import adjust_box_anns from copy import deepcopy from motlib.mot_dataset.transform.yolox.data_augment import box_candidates, augment_hsv from motlib.mot_dataset.transform.yolox.dataset import Dataset from motlib.mot_dataset.transform.yolox.mosaic import MosaicDetection, get_mosaic_coordinate from collections import defaultdict from .data_augment import random_perspective, RandomErasing
7,788
#!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) Megvii, Inc. and its affiliates. # This file may have been modified by Bytedance Ltd. and/or its affiliates (“Bytedance's Modifications”). All Bytedance's Modifications are Copyright (year) Bytedance Ltd. and/or its affiliates. class MOTMosaicDetection(MosaicDetection): """Detection dataset wrapper that performs mixup for normal dataset.""" def __init__( self, dataset, img_size, mosaic=True, degrees=10.0, translate=0.1, scale=(0.5, 1.5), mscale=(0.5, 1.5), shear=2.0, perspective=0.0, enable_mixup=True, args=None, train_or_test="train", transforms=None ): super().__init__(dataset, img_size, mosaic, degrees, translate, scale, mscale, shear, perspective, enable_mixup, args, train_or_test, transforms) self.erasing_func = RandomErasing(p=args.p_era, area_keep=args.area_keep, value=114)
#!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) Megvii, Inc. and its affiliates. # This file may have been modified by Bytedance Ltd. and/or its affiliates (“Bytedance's Modifications”). All Bytedance's Modifications are Copyright (year) Bytedance Ltd. and/or its affiliates. class MOTMosaicDetection(MosaicDetection): """Detection dataset wrapper that performs mixup for normal dataset.""" def __init__( self, dataset, img_size, mosaic=True, degrees=10.0, translate=0.1, scale=(0.5, 1.5), mscale=(0.5, 1.5), shear=2.0, perspective=0.0, enable_mixup=True, args=None, train_or_test="train", transforms=None ): super().__init__(dataset, img_size, mosaic, degrees, translate, scale, mscale, shear, perspective, enable_mixup, args, train_or_test, transforms) self.erasing_func = RandomErasing(p=args.p_era, area_keep=args.area_keep, value=114)
@Dataset.resize_getitem
3
2023-10-16 02:18:33+00:00
12k
CuriseJia/FreeStyleRet
test.py
[ { "identifier": "ShallowStyleRetrieval", "path": "src/models/style_retrieval.py", "snippet": "class ShallowStyleRetrieval(nn.Module):\n def __init__(self, model_args):\n super(ShallowStyleRetrieval, self).__init__()\n self.args = model_args\n self.openclip, self.pre_process_train, self.pre_process_val = open_clip.create_model_and_transforms(\n model_name='ViT-L-14', pretrained=self.args.origin_resume)\n self.tokenizer = open_clip.get_tokenizer('ViT-L-14')\n self.openclip.apply(freeze_all_but_bn)\n self.visual = self.openclip.visual\n self.transformer = self.visual.transformer\n # Prompt Token\n self.gram_prompt = nn.Parameter(torch.randn(\n self.args.gram_prompts, self.args.gram_prompt_dim))\n self.gram_encoder = VGG\n self.gram_encoder.load_state_dict(torch.load(self.args.gram_encoder_path))\n self.gram_encoder.apply(freeze_model)\n self.gram_patch = nn.Conv2d(128, 256, 16, 16)\n self.gram_pool = nn.Linear(256, 4)\n self.gram_linear = nn.Sequential(\n nn.Linear(256, 512),\n nn.Linear(512, 1024),\n nn.Linear(1024, self.args.gram_prompt_dim))\n self.style_prompt = nn.Parameter(torch.randn(\n self.args.style_prompts, self.args.style_prompt_dim))\n self.style_patch = nn.Conv2d(256, 256, 16, 16)\n self.style_linear = nn.Sequential(\n nn.Linear(256, 512),\n nn.Linear(512, 1024),\n nn.Linear(1024, self.args.gram_prompt_dim))\n # loss\n self.i2t_loss = nn.TripletMarginWithDistanceLoss(\n distance_function=lambda x, y: 1.0-F.cosine_similarity(x, y), \n margin=1)\n self.t2i_loss = nn.TripletMarginWithDistanceLoss(\n distance_function=lambda x, y: 1.0-F.cosine_similarity(x, y), \n margin=1)\n \n\n def get_loss(self, image_feature, pair_feature, negative_feature, optimizer):\n loss_1 = self.i2t_loss(image_feature, pair_feature, negative_feature)\n loss_2 = self.t2i_loss(pair_feature, image_feature, negative_feature)\n loss = (loss_1 + loss_2) / 2\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n return loss.detach().cpu().numpy()\n \n\n def _get_features(self, image, model, layers=None):\n if layers is None:\n layers = {'0': 'conv1_1', \n '5': 'conv2_1', \n '10': 'conv3_1', \n '19': 'conv4_1', \n '21': 'conv4_2', \n '28': 'conv5_1',\n '31': 'conv5_2'} \n features = {}\n x = image\n for name, layer in model._modules.items():\n x = layer(x) \n if name in layers:\n features[layers[name]] = x\n \n return features\n \n\n def _get_gram_prompt(self, input):\n latent_feature = self._get_features(input, self.gram_encoder)\n embed = self.gram_patch(latent_feature['conv3_1'])\n n, c, h, w = embed.shape # (b, 256, 7, 7)\n\n features = embed.view(n, c, -1) # (b*256, 49)\n features = torch.bmm(features, features.transpose(1, 2))\n features = self.gram_pool(features)\n prompt_feature = self.gram_linear(features.permute(0, 2, 1))\n\n return prompt_feature\n \n\n def _get_style_prompt(self, input):\n # style_feature = torch.tensor(torch.randn(4, 4096))\n feature = torch.from_numpy(np.load(self.args.style_cluster_path)).view(4, 4096).float().to(self.args.device)\n \n gram = self._get_features(input, self.gram_encoder)\n embed = self.gram_patch(gram['conv3_1'])\n n, c, h, w = embed.shape\n gram = embed.view(n, c, -1) # (b*256, 49)\n gram = torch.bmm(gram, gram.transpose(1, 2))\n\n gram = self.gram_pool(gram)\n gram = self.gram_linear(gram.permute(0, 2, 1))\n\n feature = select_style_prompt(gram, feature)\n\n return feature\n \n\n def _visual_forward(self, x):\n gram_prompt = self._get_gram_prompt(x)\n style_prompt = self._get_style_prompt(x)\n\n x = self.visual.conv1(x) # shape = [*, width, grid, grid]\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n\n # class embeddings and positional embeddings\n x = torch.cat(\n [self.visual.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),\n x], dim=1) # shape = [*, grid ** 2 + 1, width]\n x = x + self.visual.positional_embedding.to(x.dtype)\n\n # a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in\n x = self.visual.patch_dropout(x)\n x = self.visual.ln_pre(x)\n\n if self.args.prompt_location == 'Shallow':\n\n x = torch.cat([x[:, 0, :].unsqueeze(1), style_prompt, x[:, 1:, :]], dim=1)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.visual.transformer(x)\n x = x.permute(1, 0, 2) # LND -> NLD\n \n elif self.args.prompt_location == 'Bottom':\n\n x = x.permute(1, 0, 2) # NLD -> LND\n for r in range(len(self.transformer.resblocks)):\n if r == len(self.transformer.resblocks)-1:\n x = torch.cat([x[0, :, :].unsqueeze(0), \n gram_prompt.permute(1, 0, 2), \n x[1:, :, :]], dim=0)\n x = self.transformer.resblocks[r](x)\n x = x.permute(1, 0, 2) # LND -> NLD\n\n # if self.visual.attn_pool is not None:\n # x = self.visual.attn_pool(x)\n # x = self.visual.ln_post(x)\n # pooled, tokens = self.visual._global_pool(x)\n # else:\n pooled, tokens = self.visual._global_pool(x)\n pooled = self.visual.ln_post(pooled)\n\n if self.visual.proj is not None:\n pooled = pooled @ self.visual.proj\n\n # if self.visual.output_tokens:\n # return pooled, tokens\n \n return pooled\n \n\n def forward(self, data, dtype='image'):\n if dtype == 'image': \n feat = self._visual_forward(data)\n\n elif dtype == 'text':\n feat = self.openclip.encode_text(data)\n\n return feat" }, { "identifier": "DeepStyleRetrieval", "path": "src/models/style_retrieval.py", "snippet": "class DeepStyleRetrieval(nn.Module):\n def __init__(self, model_args):\n super(DeepStyleRetrieval, self).__init__()\n self.args = model_args\n self.openclip, self.pre_process_train, self.pre_process_val = open_clip.create_model_and_transforms(\n model_name='ViT-L-14', pretrained=self.args.origin_resume)\n self.tokenizer = open_clip.get_tokenizer('ViT-L-14')\n self.openclip.apply(freeze_all_but_bn)\n self.visual = self.openclip.visual\n self.transformer = self.visual.transformer\n # Prompt Token\n self.gram_prompt = nn.Parameter(torch.randn(\n self.args.gram_prompts, self.args.gram_prompt_dim))\n self.gram_encoder = VGG\n self.gram_encoder.load_state_dict(torch.load(self.args.gram_encoder_path))\n self.gram_encoder.apply(freeze_model)\n self.gram_patch = nn.Conv2d(128, 256, 16, 16)\n self.gram_pool = nn.Linear(256, 4)\n self.gram_linear = nn.Sequential(\n nn.Linear(256, 512),\n nn.Linear(512, 1024),\n nn.Linear(1024, self.args.gram_prompt_dim))\n self.style_prompt = nn.Parameter(torch.randn(\n self.args.style_prompts, self.args.style_prompt_dim))\n self.style_patch = nn.Conv2d(256, 256, 16, 16)\n self.style_linear = nn.Sequential(\n nn.Linear(256, 512),\n nn.Linear(512, 1024),\n nn.Linear(1024, self.args.gram_prompt_dim))\n # loss\n self.i2t_loss = nn.TripletMarginWithDistanceLoss(\n distance_function=lambda x, y: 1.0-F.cosine_similarity(x, y), \n margin=1)\n self.t2i_loss = nn.TripletMarginWithDistanceLoss(\n distance_function=lambda x, y: 1.0-F.cosine_similarity(x, y), \n margin=1)\n \n\n def get_loss(self, image_feature, pair_feature, negative_feature, optimizer):\n loss_1 = self.i2t_loss(image_feature, pair_feature, negative_feature)\n loss_2 = self.t2i_loss(pair_feature, image_feature, negative_feature)\n loss = (loss_1 + loss_2) / 2\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n return loss.detach().cpu().numpy()\n \n\n def _get_features(self, image, model, layers=None):\n if layers is None:\n layers = {'0': 'conv1_1', \n '5': 'conv2_1', \n '10': 'conv3_1', \n '19': 'conv4_1', \n '21': 'conv4_2', \n '28': 'conv5_1',\n '31': 'conv5_2'} \n features = {}\n x = image\n for name, layer in model._modules.items():\n x = layer(x) \n if name in layers:\n features[layers[name]] = x\n \n return features\n \n\n def _get_gram_prompt(self, input):\n latent_feature = self._get_features(input, self.gram_encoder)\n embed = self.gram_patch(latent_feature['conv3_1'])\n n, c, h, w = embed.shape # (b, 256, 7, 7)\n\n features = embed.view(n, c, -1) # (b*256, 49)\n features = torch.bmm(features, features.transpose(1, 2))\n features = self.gram_pool(features)\n prompt_feature = self.gram_linear(features.permute(0, 2, 1))\n\n return prompt_feature\n \n\n # def _get_style_prompt(self, input):\n # feature = torch.from_numpy(np.load(self.args.style_cluster_path)).view(self.args.style_prompts, 128, 112, 112).float().to(self.args.device) # (4, 1605632)\n # # style_feature = torch.tensor(torch.randn(4, 256, 256))\n # style_feature = self.gram_patch(feature)\n # n, c, h, w = style_feature.shape # (b, 256, 7, 7)\n # style_feature = style_feature.view(n, c, -1) # (b*256, 49)\n # style_feature = torch.bmm(style_feature, style_feature.transpose(1, 2))\n \n # gram = self._get_features(input, self.gram_encoder)\n # embed = self.gram_patch(gram['conv3_1'])\n # n, c, h, w = embed.shape\n # gram = embed.view(n, c, -1) # (b*256, 49)\n # gram = torch.bmm(gram, gram.transpose(1, 2))\n # feature = select_style_prompt(gram, style_feature.view(self.args.style_prompts, -1)) # (b, 65536)\n # feature = self.style_patch(feature.view(self.args.batch_size, 256, 16, 16)).view(self.args.batch_size, 256)\n # feature = self.style_linear(feature).unsqueeze(1).repeat(1, self.args.style_prompts, 1)\n\n # return feature\n\n def _get_style_prompt(self, input):\n # style_feature = torch.tensor(torch.randn(4, 4096))\n feature = torch.from_numpy(np.load(self.args.style_cluster_path)).view(4, 4096).float().to(self.args.device)\n \n gram = self._get_features(input, self.gram_encoder)\n embed = self.gram_patch(gram['conv3_1'])\n n, c, h, w = embed.shape\n gram = embed.view(n, c, -1) # (b*256, 49)\n gram = torch.bmm(gram, gram.transpose(1, 2))\n\n gram = self.gram_pool(gram)\n gram = self.gram_linear(gram.permute(0, 2, 1))\n\n feature = select_style_prompt(gram, feature)\n\n return feature\n\n\n def _visual_forward(self, x):\n input = x\n self.gram_prompt.parameter = self._get_gram_prompt(input)\n self.style_prompt.parameter = self._get_style_prompt(input)\n\n x = self.visual.conv1(x) # shape = [*, width, grid, grid]\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n\n # class embeddings and positional embeddings\n x = torch.cat(\n [self.visual.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),\n x], dim=1) # shape = [*, grid ** 2 + 1, width]\n x = x + self.visual.positional_embedding.to(x.dtype)\n\n # a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in\n x = self.visual.patch_dropout(x)\n x = self.visual.ln_pre(x)\n\n # add style_prompt\n x = torch.cat([x[:, 0, :].unsqueeze(1), self.style_prompt.expand(x.shape[0],-1,-1), x[:, 1:, :]], dim=1)\n\n # add gram_prompt before the last block of transformer\n x = x.permute(1, 0, 2) # NLD -> LND\n for r in range(len(self.transformer.resblocks)):\n if r == len(self.transformer.resblocks)-1:\n x = torch.cat([x[0, :, :].unsqueeze(0), \n self.gram_prompt.expand(self.args.batch_size,-1,-1).permute(1, 0, 2), \n x[1:, :, :]], dim=0)\n x = self.transformer.resblocks[r](x)\n x = x.permute(1, 0, 2) # LND -> NLD\n\n # if self.visual.attn_pool is not None:\n # x = self.visual.attn_pool(x)\n # x = self.visual.ln_post(x)\n # pooled, tokens = self.visual._global_pool(x)\n # else:\n pooled, tokens = self.visual._global_pool(x)\n pooled = self.visual.ln_post(pooled)\n\n if self.visual.proj is not None:\n pooled = pooled @ self.visual.proj\n\n # if self.visual.output_tokens:\n # return pooled, tokens\n \n return pooled\n \n\n def forward(self, data, dtype='image'):\n if dtype == 'image': \n feat = self._visual_forward(data)\n\n elif dtype == 'text':\n feat = self.openclip.encode_text(data)\n\n return feat" }, { "identifier": "BLIP_Retrieval", "path": "src/models/blip_retrieval.py", "snippet": "class BLIP_Retrieval(nn.Module):\n def __init__(self, model_args):\n super(BLIP_Retrieval, self).__init__()\n self.args = model_args\n self.blip = blip_retrieval(pretrained=self.args.origin_resume, image_size=224, vit='large', vit_grad_ckpt=True, vit_ckpt_layer=10)\n self.blip.apply(freeze_all_but_bn)\n self.visual = self.blip.visual_encoder.blocks\n # Prompt Token\n self.gram_prompt = nn.Parameter(torch.randn(\n self.args.gram_prompts, self.args.gram_prompt_dim))\n self.gram_encoder = VGG\n self.gram_encoder.load_state_dict(torch.load(self.args.gram_encoder_path))\n self.gram_encoder.apply(freeze_model)\n self.gram_patch = nn.Conv2d(128, 256, 16, 16)\n self.gram_pool = nn.Linear(256, 4)\n self.gram_linear = nn.Sequential(\n nn.Linear(256, 512),\n nn.Linear(512, 1024),\n nn.Linear(1024, self.args.gram_prompt_dim))\n self.style_prompt = nn.Parameter(torch.randn(\n self.args.style_prompts, self.args.style_prompt_dim))\n self.style_patch = nn.Conv2d(256, 256, 16, 16)\n self.style_linear = nn.Sequential(\n nn.Linear(256, 512),\n nn.Linear(512, 1024),\n nn.Linear(1024, self.args.gram_prompt_dim))\n # loss and process\n self.triplet_loss = nn.TripletMarginWithDistanceLoss(\n distance_function=lambda x, y: 1.0-F.cosine_similarity(x, y), \n margin=1)\n self.pre_process_train = image_transform(224, True, image_mean, image_std)\n self.pre_process_val = image_transform(224, False, image_mean, image_std)\n \n\n def _get_features(self, image, model, layers=None):\n if layers is None:\n layers = {'0': 'conv1_1', \n '5': 'conv2_1', \n '10': 'conv3_1', \n '19': 'conv4_1', \n '21': 'conv4_2', \n '28': 'conv5_1',\n '31': 'conv5_2'} \n features = {}\n x = image\n for name, layer in model._modules.items():\n x = layer(x) \n if name in layers:\n features[layers[name]] = x\n \n return features\n\n\n def _get_gram_prompt(self, input):\n latent_feature = self._get_features(input, self.gram_encoder)\n embed = self.gram_patch(latent_feature['conv3_1'])\n n, c, h, w = embed.shape # (b, 256, 7, 7)\n\n features = embed.view(n, c, -1) # (b*256, 49)\n features = torch.bmm(features, features.transpose(1, 2))\n features = self.gram_pool(features)\n prompt_feature = self.gram_linear(features.permute(0, 2, 1))\n\n return prompt_feature\n \n\n def _get_style_prompt(self, input):\n # style_feature = torch.tensor(torch.randn(4, 4096))\n feature = torch.from_numpy(np.load(self.args.style_cluster_path)).view(4, 4096).float().to(self.args.device)\n \n gram = self._get_features(input, self.gram_encoder)\n embed = self.gram_patch(gram['conv3_1'])\n n, c, h, w = embed.shape\n gram = embed.view(n, c, -1) # (b*256, 49)\n gram = torch.bmm(gram, gram.transpose(1, 2))\n\n gram = self.gram_pool(gram)\n gram = self.gram_linear(gram.permute(0, 2, 1))\n\n feature = select_style_prompt(gram, feature)\n\n return feature\n\n\n def forward(self, data, dtype='image'):\n if dtype == 'image':\n gram_prompt = self._get_gram_prompt(data)\n style_prompt = self._get_style_prompt(data)\n\n feat = self.blip.visual_encoder.patch_embed(data)\n cls_tokens = self.blip.visual_encoder.cls_token.expand(data.shape[0], -1, -1)\n feat = torch.cat((cls_tokens, feat), dim=1)\n feat = feat + self.blip.visual_encoder.pos_embed[:,:feat.size(1),:]\n feat = self.blip.visual_encoder.pos_drop(feat)\n\n feat = torch.cat([feat[:, 0, :].unsqueeze(1), style_prompt, feat[:, 1:, :]], dim=1)\n for r in range(len(self.blip.visual_encoder.blocks)):\n if r == len(self.blip.visual_encoder.blocks)-1:\n feat = torch.cat([feat[:, 0, :].unsqueeze(1), \n gram_prompt,\n feat[:, 1:, :]], dim=1)\n feat = self.blip.visual_encoder.blocks[r](feat)\n \n feat = self.blip.visual_encoder.norm(feat)\n \n ori_embed = F.normalize(self.blip.vision_proj(feat[:,0,:]),dim=-1) \n\n return ori_embed\n \n else:\n text = self.blip.tokenizer(data, padding='max_length', truncation=True, max_length=35, \n return_tensors=\"pt\").to(self.args.device)\n text_output = self.blip.text_encoder(text.input_ids, attention_mask = text.attention_mask, \n return_dict = True, mode = 'text')\n text_feat = F.normalize(self.blip.text_proj(text_output.last_hidden_state[:,0,:]),dim=-1)\n\n return text_feat\n \n\n def get_loss(self, image_feature, pair_feature, negative_feature, optimizer):\n loss = self.triplet_loss(image_feature, pair_feature, negative_feature)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n return loss.detach().cpu().numpy()" }, { "identifier": "T2ITestDataset", "path": "src/dataset/data.py", "snippet": "class T2ITestDataset(Dataset):\n def __init__(self, root_path, json_path, image_transform):\n self.root_path = root_path\n self.dataset = json.load(open(json_path,'r'))\n self.image_transform = image_transform\n \n\n def __len__(self):\n return len(self.dataset)\n \n \n def __getitem__(self, index):\n caption_path = os.path.join(self.root_path, 'text/'+self.dataset[index]['caption'])\n image_path = os.path.join(self.root_path, 'images/'+self.dataset[index]['image'])\n \n f = open(caption_path, 'r')\n caption = f.readline().replace('\\n', '')\n pair_image = self.image_transform(Image.open(image_path))\n\n return [caption, pair_image, index]" }, { "identifier": "I2ITestDataset", "path": "src/dataset/data.py", "snippet": "class I2ITestDataset(Dataset):\n def __init__(self, style, root_path, json_path, image_transform):\n self.style = style\n self.root_path = root_path\n self.dataset = json.load(open(json_path,'r'))\n self.image_transform = image_transform\n \n\n def __len__(self):\n return len(self.dataset)\n \n \n def __getitem__(self, index):\n ori_path = os.path.join(self.root_path, 'images/'+self.dataset[index]['image'])\n pair_path = os.path.join(self.root_path, '{}/'.format(self.style)+self.dataset[index]['image'])\n \n ori_image = self.image_transform(Image.open(ori_path))\n pair_image = self.image_transform(Image.open(pair_path))\n\n return [ori_image, pair_image, index]" }, { "identifier": "X2ITestDataset", "path": "src/dataset/data.py", "snippet": "class X2ITestDataset(Dataset):\n def __init__(self, style, root_path, json_path, image_transform):\n self.style = style\n self.root_path = root_path\n self.dataset = json.load(open(json_path,'r'))\n self.image_transform = image_transform\n \n\n def __len__(self):\n return len(self.dataset)\n \n \n def __getitem__(self, index):\n caption_path = os.path.join(self.root_path, 'text/'+self.dataset[index]['caption'])\n ori_path = os.path.join(self.root_path, 'images/'+self.dataset[index]['image'])\n pair_path = os.path.join(self.root_path, '{}/'.format(self.style)+self.dataset[index]['image'])\n \n f = open(caption_path, 'r')\n caption = f.readline().replace('\\n', '')\n ori_image = self.image_transform(Image.open(ori_path))\n pair_image = self.image_transform(Image.open(pair_path))\n\n return [caption, ori_image, pair_image, index]" }, { "identifier": "setup_seed", "path": "src/utils/utils.py", "snippet": "def setup_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n cudnn.benchmark = True" }, { "identifier": "getR1Accuary", "path": "src/utils/utils.py", "snippet": "def getR1Accuary(prob):\n temp = prob.detach().cpu().numpy()\n temp = np.argsort(temp, axis=1)\n count = 0\n for i in range(prob.shape[0]):\n if temp[i][prob.shape[1]-1] == i:\n count+=1\n acc = count/prob.shape[0]\n return acc" }, { "identifier": "getR5Accuary", "path": "src/utils/utils.py", "snippet": "def getR5Accuary(prob):\n temp = prob.detach().cpu().numpy()\n temp = np.argsort(temp, axis=1)\n count = 0\n for i in range(prob.shape[0]):\n for j in range(prob.shape[1]-4,prob.shape[1]):\n if temp[i][j] == i:\n count+=1\n acc = count/prob.shape[0]\n return acc" } ]
import argparse import torch import torch.nn.functional as F from tqdm import tqdm from torch.utils.data import DataLoader from src.models import ShallowStyleRetrieval, DeepStyleRetrieval, BLIP_Retrieval from src.dataset.data import T2ITestDataset, I2ITestDataset, X2ITestDataset from src.utils.utils import setup_seed, getR1Accuary, getR5Accuary
7,602
def parse_args(): parser = argparse.ArgumentParser(description='Parse args for FreeStyleRet Training.') # project settings parser.add_argument('--resume', default='', type=str, help='load checkpoints from given path') parser.add_argument('--origin_resume', default='model_large_retrieval_coco.pth', type=str, help='load checkpoints from given path') parser.add_argument('--gram_encoder_path', default='pretrained/vgg_normalised.pth', type=str, help='load vgg from given path') parser.add_argument('--style_cluster_path', default='pretrained/style_cluster.npy', type=str, help='load style prompt from given npy') parser.add_argument('--device', default='cuda:0') parser.add_argument('--seed', default=42, type=int) parser.add_argument('--num_workers', default=6, type=int) # data settings parser.add_argument("--type", type=str, default='style2image', help='choose train text2image or style2image.') parser.add_argument("--style", type=str, default='sketch', help='choose sketch, art or mosaic.') parser.add_argument("--test_dataset_path", type=str, default='DSR/') parser.add_argument("--test_json_path", type=str, default='DSR/test.json') parser.add_argument("--batch_size", type=int, default=24) # model settings parser.add_argument('--prompt', type=str, default='DeepPrompt', help='ShallowPrompt or DeepPrompt') parser.add_argument('--gram_prompts', type=int, default=4) parser.add_argument('--gram_prompt_dim', type=int, default=1024) parser.add_argument('--style_prompts', type=int, default=4) parser.add_argument('--style_prompt_dim', type=int, default=1024) args = parser.parse_args() return args def eval(args, model, dataloader): model.eval() r1 = [] r5 = [] if args.type == 'text2image': for data in enumerate(tqdm(dataloader)): if args.prompt == 'BLIP_Retrieval': caption = data[1][0] else: caption = model.tokenizer(data[1][0]).to(args.device, non_blocking=True) image = data[1][1].to(args.device, non_blocking=True) image_feature = model(image, dtype='image') text_feature = model(caption, dtype='text') image_feature = F.normalize(image_feature, dim=-1) text_feature = F.normalize(text_feature, dim=-1) prob = torch.softmax((100.0 * text_feature @ image_feature.T), dim=-1) r1.append(getR1Accuary(prob)) r5.append(getR5Accuary(prob)) elif args.type == 'style2image': for data in enumerate(tqdm(dataloader)): origin_image = data[1][0].to(args.device, non_blocking=True) retrival_image = data[1][1].to(args.device, non_blocking=True) original_feature = model(origin_image, dtype='image') retrival_feature = model(retrival_image, dtype='image') original_feature = F.normalize(original_feature, dim=-1) retrival_feature = F.normalize(retrival_feature, dim=-1) prob = torch.softmax((100.0 * retrival_feature @ original_feature.T), dim=-1) r1.append(getR1Accuary(prob)) r5.append(getR5Accuary(prob)) else: for data in enumerate(tqdm(dataloader)): if args.prompt == 'BLIP_Retrieval': caption = data[1][0] else: caption = model.tokenizer(data[1][0]).to(args.device, non_blocking=True) origin_image = data[1][1].to(args.device, non_blocking=True) retrival_image = data[1][2].to(args.device, non_blocking=True) text_feature = model(caption, dtype='text') original_feature = model(origin_image, dtype='image') retrival_feature = model(retrival_image, dtype='image') text_feature = F.normalize(text_feature, dim=-1) original_feature = F.normalize(original_feature, dim=-1) retrival_feature = F.normalize(retrival_feature, dim=-1) prob1 = torch.softmax((100.0 * text_feature @ original_feature.T), dim=-1) prob2 = prob = torch.softmax((100.0 * retrival_feature @ original_feature.T), dim=-1) prob = prob1.max(prob2) r1.append(getR1Accuary(prob)) r5.append(getR5Accuary(prob)) resr1 = sum(r1)/len(r1) resr5 = sum(r5)/len(r5) print('R@1 Acc is {}'.format(resr1)) print('R@5 Acc is {}'.format(resr5)) if __name__ == "__main__": args = parse_args() setup_seed(args.seed) if args.prompt == 'ShallowPrompt': model = ShallowStyleRetrieval(args) elif args.prompt == 'DeepPrompt':
def parse_args(): parser = argparse.ArgumentParser(description='Parse args for FreeStyleRet Training.') # project settings parser.add_argument('--resume', default='', type=str, help='load checkpoints from given path') parser.add_argument('--origin_resume', default='model_large_retrieval_coco.pth', type=str, help='load checkpoints from given path') parser.add_argument('--gram_encoder_path', default='pretrained/vgg_normalised.pth', type=str, help='load vgg from given path') parser.add_argument('--style_cluster_path', default='pretrained/style_cluster.npy', type=str, help='load style prompt from given npy') parser.add_argument('--device', default='cuda:0') parser.add_argument('--seed', default=42, type=int) parser.add_argument('--num_workers', default=6, type=int) # data settings parser.add_argument("--type", type=str, default='style2image', help='choose train text2image or style2image.') parser.add_argument("--style", type=str, default='sketch', help='choose sketch, art or mosaic.') parser.add_argument("--test_dataset_path", type=str, default='DSR/') parser.add_argument("--test_json_path", type=str, default='DSR/test.json') parser.add_argument("--batch_size", type=int, default=24) # model settings parser.add_argument('--prompt', type=str, default='DeepPrompt', help='ShallowPrompt or DeepPrompt') parser.add_argument('--gram_prompts', type=int, default=4) parser.add_argument('--gram_prompt_dim', type=int, default=1024) parser.add_argument('--style_prompts', type=int, default=4) parser.add_argument('--style_prompt_dim', type=int, default=1024) args = parser.parse_args() return args def eval(args, model, dataloader): model.eval() r1 = [] r5 = [] if args.type == 'text2image': for data in enumerate(tqdm(dataloader)): if args.prompt == 'BLIP_Retrieval': caption = data[1][0] else: caption = model.tokenizer(data[1][0]).to(args.device, non_blocking=True) image = data[1][1].to(args.device, non_blocking=True) image_feature = model(image, dtype='image') text_feature = model(caption, dtype='text') image_feature = F.normalize(image_feature, dim=-1) text_feature = F.normalize(text_feature, dim=-1) prob = torch.softmax((100.0 * text_feature @ image_feature.T), dim=-1) r1.append(getR1Accuary(prob)) r5.append(getR5Accuary(prob)) elif args.type == 'style2image': for data in enumerate(tqdm(dataloader)): origin_image = data[1][0].to(args.device, non_blocking=True) retrival_image = data[1][1].to(args.device, non_blocking=True) original_feature = model(origin_image, dtype='image') retrival_feature = model(retrival_image, dtype='image') original_feature = F.normalize(original_feature, dim=-1) retrival_feature = F.normalize(retrival_feature, dim=-1) prob = torch.softmax((100.0 * retrival_feature @ original_feature.T), dim=-1) r1.append(getR1Accuary(prob)) r5.append(getR5Accuary(prob)) else: for data in enumerate(tqdm(dataloader)): if args.prompt == 'BLIP_Retrieval': caption = data[1][0] else: caption = model.tokenizer(data[1][0]).to(args.device, non_blocking=True) origin_image = data[1][1].to(args.device, non_blocking=True) retrival_image = data[1][2].to(args.device, non_blocking=True) text_feature = model(caption, dtype='text') original_feature = model(origin_image, dtype='image') retrival_feature = model(retrival_image, dtype='image') text_feature = F.normalize(text_feature, dim=-1) original_feature = F.normalize(original_feature, dim=-1) retrival_feature = F.normalize(retrival_feature, dim=-1) prob1 = torch.softmax((100.0 * text_feature @ original_feature.T), dim=-1) prob2 = prob = torch.softmax((100.0 * retrival_feature @ original_feature.T), dim=-1) prob = prob1.max(prob2) r1.append(getR1Accuary(prob)) r5.append(getR5Accuary(prob)) resr1 = sum(r1)/len(r1) resr5 = sum(r5)/len(r5) print('R@1 Acc is {}'.format(resr1)) print('R@5 Acc is {}'.format(resr5)) if __name__ == "__main__": args = parse_args() setup_seed(args.seed) if args.prompt == 'ShallowPrompt': model = ShallowStyleRetrieval(args) elif args.prompt == 'DeepPrompt':
model = DeepStyleRetrieval(args)
1
2023-10-17 09:32:57+00:00
12k
liuqidong07/MOELoRA-peft
src/MLoRA/peft/tuners/adalora.py
[ { "identifier": "PeftType", "path": "src/MLoRA/peft/utils/config.py", "snippet": "class PeftType(str, enum.Enum):\n PROMPT_TUNING = \"PROMPT_TUNING\"\n P_TUNING = \"P_TUNING\"\n PREFIX_TUNING = \"PREFIX_TUNING\"\n LORA = \"LORA\"\n ADALORA = \"ADALORA\"\n ADAPTION_PROMPT = \"ADAPTION_PROMPT\"\n MMOELORAS = \"MMOELORAS\"" }, { "identifier": "TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING", "path": "src/MLoRA/peft/utils/other.py", "snippet": "TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING = {\n \"t5\": [\"q\", \"k\", \"v\", \"o\", \"wi\", \"wo\"],\n \"mt5\": [\"q\", \"k\", \"v\", \"o\", \"wi_0\", \"wi_1\", \"wo\"],\n \"bart\": [\"q_proj\", \"k_proj\", \"v_proj\", \"out_proj\", \"fc1\", \"fc2\"],\n # \"gpt2\": [\"c_attn\"],\n # \"bloom\": [\"query_key_value\"],\n \"opt\": [\"q_proj\", \"k_proj\", \"v_proj\", \"out_proj\", \"fc1\", \"fc2\"],\n # \"gptj\": [\"q_proj\", \"v_proj\"],\n # \"gpt_neox\": [\"query_key_value\"],\n # \"gpt_neo\": [\"q_proj\", \"v_proj\"],\n # \"bert\": [\"query\", \"value\"],\n \"roberta\": [\"query\", \"key\", \"value\", \"dense\"],\n # \"xlm-roberta\": [\"query\", \"value\"],\n # \"electra\": [\"query\", \"value\"],\n \"deberta-v2\": [\"query_proj\", \"key_proj\", \"value_proj\", \"dense\"],\n # \"deberta\": [\"in_proj\"],\n # \"layoutlm\": [\"query\", \"value\"],\n}" }, { "identifier": "transpose", "path": "src/MLoRA/peft/utils/other.py", "snippet": "def transpose(weight, fan_in_fan_out):\n return weight.T if fan_in_fan_out else weight" }, { "identifier": "_get_submodules", "path": "src/MLoRA/peft/utils/other.py", "snippet": "def _get_submodules(model, key):\n parent = model.get_submodule(\".\".join(key.split(\".\")[:-1]))\n target_name = key.split(\".\")[-1]\n target = model.get_submodule(key)\n return parent, target, target_name" }, { "identifier": "_freeze_adapter", "path": "src/MLoRA/peft/utils/other.py", "snippet": "def _freeze_adapter(model, adapter_name):\n for n, p in model.named_parameters():\n if adapter_name in n:\n p.requires_grad = False" }, { "identifier": "LoraConfig", "path": "src/MLoRA/peft/tuners/lora.py", "snippet": "class LoraConfig(PeftConfig):\n \"\"\"\n This is the configuration class to store the configuration of a [`LoraModel`].\n\n Args:\n r (`int`): Lora attention dimension.\n target_modules (`Union[List[str],str]`): The names of the modules to apply Lora to.\n lora_alpha (`float`): The alpha parameter for Lora scaling.\n lora_dropout (`float`): The dropout probability for Lora layers.\n fan_in_fan_out (`bool`): Set this to True if the layer to replace stores weight like (fan_in, fan_out).\n For example, gpt-2 uses `Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`.:\n bias (`str`): Bias type for Lora. Can be 'none', 'all' or 'lora_only'\n modules_to_save (`List[str]`):List of modules apart from LoRA layers to be set as trainable\n and saved in the final checkpoint.\n \"\"\"\n\n r: int = field(default=8, metadata={\"help\": \"Lora attention dimension\"})\n target_modules: Optional[Union[List[str], str]] = field(\n default=None,\n metadata={\n \"help\": \"List of module names or regex expression of the module names to replace with Lora.\"\n \"For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' \"\n },\n )\n lora_alpha: int = field(default=None, metadata={\"help\": \"Lora alpha\"})\n lora_dropout: float = field(default=None, metadata={\"help\": \"Lora dropout\"})\n fan_in_fan_out: bool = field(\n default=False,\n metadata={\"help\": \"Set this to True if the layer to replace stores weight like (fan_in, fan_out)\"},\n )\n bias: str = field(default=\"none\", metadata={\"help\": \"Bias type for Lora. Can be 'none', 'all' or 'lora_only'\"})\n modules_to_save: Optional[List[str]] = field(\n default=None,\n metadata={\n \"help\": \"List of modules apart from LoRA layers to be set as trainable and saved in the final checkpoint. \"\n \"For example, in Sequence Classification or Token Classification tasks, \"\n \"the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved.\"\n },\n )\n init_lora_weights: bool = field(\n default=True,\n metadata={\"help\": \"Whether to initialize the weights of the Lora layers.\"},\n )\n\n def __post_init__(self):\n self.peft_type = PeftType.LORA" }, { "identifier": "LoraLayer", "path": "src/MLoRA/peft/tuners/lora.py", "snippet": "class LoraLayer:\n def __init__(\n self,\n in_features: int,\n out_features: int,\n ):\n self.r = {}\n self.lora_alpha = {}\n self.scaling = {}\n self.lora_dropout = nn.ModuleDict({})\n self.lora_A = nn.ModuleDict({})\n self.lora_B = nn.ModuleDict({})\n # Mark the weight as unmerged\n self.merged = False\n self.disable_adapters = False\n self.in_features = in_features\n self.out_features = out_features\n\n def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights):\n self.r[adapter_name] = r\n self.lora_alpha[adapter_name] = lora_alpha\n if lora_dropout > 0.0:\n lora_dropout_layer = nn.Dropout(p=lora_dropout)\n else:\n lora_dropout_layer = nn.Identity()\n\n self.lora_dropout.update(nn.ModuleDict({adapter_name: lora_dropout_layer}))\n # Actual trainable parameters\n if r > 0:\n self.lora_A.update(nn.ModuleDict({adapter_name: nn.Linear(self.in_features, r, bias=False)}))\n self.lora_B.update(nn.ModuleDict({adapter_name: nn.Linear(r, self.out_features, bias=False)}))\n self.scaling[adapter_name] = lora_alpha / r\n if init_lora_weights:\n self.reset_lora_parameters(adapter_name)\n self.to(self.weight.device)\n\n def reset_lora_parameters(self, adapter_name):\n if adapter_name in self.lora_A.keys():\n # initialize A the same way as the default for nn.Linear and B to zero\n nn.init.kaiming_uniform_(self.lora_A[adapter_name].weight, a=math.sqrt(5))\n nn.init.zeros_(self.lora_B[adapter_name].weight)" }, { "identifier": "LoraModel", "path": "src/MLoRA/peft/tuners/lora.py", "snippet": "class LoraModel(torch.nn.Module):\n \"\"\"\n Creates Low Rank Adapter (Lora) model from a pretrained transformers model.\n\n Args:\n model ([`~transformers.PreTrainedModel`]): The model to be adapted.\n config ([`LoraConfig`]): The configuration of the Lora model.\n\n Returns:\n `torch.nn.Module`: The Lora model.\n\n Example:\n\n ```py\n >>> from transformers import AutoModelForSeq2SeqLM, LoraConfig\n >>> from peft import LoraModel, LoraConfig\n\n >>> config = LoraConfig(\n ... peft_type=\"LORA\",\n ... task_type=\"SEQ_2_SEQ_LM\",\n ... r=8,\n ... lora_alpha=32,\n ... target_modules=[\"q\", \"v\"],\n ... lora_dropout=0.01,\n ... )\n\n >>> model = AutoModelForSeq2SeqLM.from_pretrained(\"t5-base\")\n >>> lora_model = LoraModel(config, model)\n ```\n\n **Attributes**:\n - **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted.\n - **peft_config** ([`LoraConfig`]): The configuration of the Lora model.\n \"\"\"\n\n def __init__(self, model, config, adapter_name):\n super().__init__()\n self.model = model\n self.forward = self.model.forward\n self.peft_config = config\n self.add_adapter(adapter_name, self.peft_config[adapter_name])\n\n def add_adapter(self, adapter_name, config=None):\n if config is not None:\n model_config = self.model.config.to_dict() if hasattr(self.model.config, \"to_dict\") else self.model.config\n config = self._prepare_lora_config(config, model_config)\n self.peft_config[adapter_name] = config\n self._find_and_replace(adapter_name)\n if len(self.peft_config) > 1 and self.peft_config[adapter_name].bias != \"none\":\n raise ValueError(\n \"LoraModel supports only 1 adapter with bias. When using multiple adapters, set bias to 'none' for all adapters.\"\n )\n mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias) # freeze all layers except for lora layer\n if self.peft_config[adapter_name].inference_mode: # if inference, also freeze lora layer\n _freeze_adapter(self.model, adapter_name)\n\n def _find_and_replace(self, adapter_name):\n \"\"\"Replace the target `Linear` module with LoRA layer (Linear+LoRA)\"\"\"\n lora_config = self.peft_config[adapter_name]\n loaded_in_8bit = getattr(self.model, \"is_loaded_in_8bit\", False)\n if loaded_in_8bit and not is_bnb_available():\n raise ImportError(\n \"To use Lora with 8-bit quantization, please install the `bitsandbytes` package. \"\n \"You can install it with `pip install bitsandbytes`.\"\n )\n is_target_modules_in_base_model = False\n kwargs = {\n \"r\": lora_config.r,\n \"lora_alpha\": lora_config.lora_alpha,\n \"lora_dropout\": lora_config.lora_dropout,\n \"fan_in_fan_out\": lora_config.fan_in_fan_out,\n \"init_lora_weights\": lora_config.init_lora_weights,\n }\n key_list = [key for key, _ in self.model.named_modules()]\n for key in key_list:\n if isinstance(lora_config.target_modules, str):\n target_module_found = re.fullmatch(lora_config.target_modules, key)\n else:\n target_module_found = any(key.endswith(target_key) for target_key in lora_config.target_modules)\n if target_module_found:\n if not is_target_modules_in_base_model:\n is_target_modules_in_base_model = True\n parent, target, target_name = _get_submodules(self.model, key) # parent: the parent mudle of target (e.g., SelfAttention), target: target module (e.g., nn.Linear()), target name: the name of target module (e.g., query_key_value)\n bias = target.bias is not None\n if isinstance(target, LoraLayer): # if the target is LoraLayer, only need to update the parameters\n target.update_layer(\n adapter_name,\n lora_config.r,\n lora_config.lora_alpha,\n lora_config.lora_dropout,\n lora_config.init_lora_weights,\n )\n else: # if not, get the lora parameter for create.\n if loaded_in_8bit and isinstance(target, bnb.nn.Linear8bitLt):\n eightbit_kwargs = kwargs.copy()\n eightbit_kwargs.update(\n {\n \"has_fp16_weights\": target.state.has_fp16_weights,\n \"memory_efficient_backward\": target.state.memory_efficient_backward,\n \"threshold\": target.state.threshold,\n \"index\": target.index,\n }\n )\n new_module = Linear8bitLt(\n adapter_name, target.in_features, target.out_features, bias=bias, **eightbit_kwargs\n )\n else: # create based on the original module type\n if isinstance(target, torch.nn.Linear):\n in_features, out_features = target.in_features, target.out_features\n if kwargs[\"fan_in_fan_out\"]:\n warnings.warn(\n \"fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. \"\n \"Setting fan_in_fan_out to False.\"\n )\n kwargs[\"fan_in_fan_out\"] = lora_config.fan_in_fan_out = False\n elif isinstance(target, Conv1D):\n in_features, out_features = (\n target.weight.ds_shape if hasattr(target.weight, \"ds_shape\") else target.weight.shape\n )\n if not kwargs[\"fan_in_fan_out\"]:\n warnings.warn(\n \"fan_in_fan_out is set to False but the target module is `Conv1D`. \"\n \"Setting fan_in_fan_out to True.\"\n )\n kwargs[\"fan_in_fan_out\"] = lora_config.fan_in_fan_out = True\n else:\n raise ValueError(\n f\"Target module {target} is not supported. \"\n f\"Currently, only `torch.nn.Linear` and `Conv1D` are supported.\"\n )\n new_module = Linear(adapter_name, in_features, out_features, bias=bias, **kwargs) # create the lora module, here is not the raw nn.Linear, but the lora layer\n\n self._replace_module(parent, target_name, new_module, target)\n if not is_target_modules_in_base_model:\n raise ValueError(\n f\"Target modules {lora_config.target_modules} not found in the base model. \"\n f\"Please check the target modules and try again.\"\n )\n\n def _replace_module(self, parent_module, child_name, new_module, old_module):\n \"\"\"substitute the original nn.Linear to new Linear (nn.Linear+LoRA block)\"\"\"\n setattr(parent_module, child_name, new_module)\n new_module.weight = old_module.weight\n if old_module.bias is not None:\n new_module.bias = old_module.bias\n if getattr(old_module, \"state\", None) is not None: # synchronize the state and device\n new_module.state = old_module.state\n new_module.to(old_module.weight.device)\n\n # dispatch to correct device\n for name, module in new_module.named_modules():\n if \"lora_\" in name:\n module.to(old_module.weight.device)\n\n def __getattr__(self, name: str):\n \"\"\"Forward missing attributes to the wrapped module.\"\"\"\n try:\n return super().__getattr__(name) # defer to nn.Module's logic\n except AttributeError:\n return getattr(self.model, name)\n\n def get_peft_config_as_dict(self, inference: bool = False):\n config_dict = {}\n for key, value in self.peft_config.items():\n config = {k: v.value if isinstance(v, Enum) else v for k, v in asdict(value).items()}\n if inference:\n config[\"inference_mode\"] = True\n config_dict[key] = config\n return config\n\n def _set_adapter_layers(self, enabled=True):\n for module in self.model.modules():\n if isinstance(module, LoraLayer):\n module.disable_adapters = False if enabled else True\n\n def enable_adapter_layers(self):\n self._set_adapter_layers(enabled=True)\n\n def disable_adapter_layers(self):\n self._set_adapter_layers(enabled=False)\n\n def set_adapter(self, adapter_name):\n for module in self.model.modules():\n if isinstance(module, LoraLayer):\n if module.merged:\n warnings.warn(\"Adapter cannot be set when the model is merged. Unmerging the model first.\")\n module.unmerge()\n module.active_adapter = adapter_name\n\n def merge_adapter(self):\n for module in self.model.modules():\n if isinstance(module, LoraLayer):\n module.merge()\n\n def unmerge_adapter(self):\n for module in self.model.modules():\n if isinstance(module, LoraLayer):\n module.unmerge()\n\n @staticmethod\n def _prepare_lora_config(peft_config, model_config):\n if peft_config.target_modules is None:\n if model_config[\"model_type\"] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING:\n raise ValueError(\"Please specify `target_modules` in `peft_config`\")\n peft_config.target_modules = TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config[\"model_type\"]]\n if peft_config.inference_mode:\n peft_config.merge_weights = True\n return peft_config\n\n def merge_and_unload(self):\n r\"\"\"\n This method merges the LoRa layers into the base model. This is needed if someone wants to use the base model\n as a standalone model.\n \"\"\"\n if getattr(self.config, \"model_type\", None) == \"gpt2\":\n raise ValueError(\"GPT2 models are not supported for merging LORA layers\")\n\n if getattr(self.model, \"is_loaded_in_8bit\", False):\n raise ValueError(\"Cannot merge LORA layers when the model is loaded in 8-bit mode\")\n\n key_list = [key for key, _ in self.model.named_modules() if \"lora\" not in key]\n for key in key_list:\n try:\n parent, target, target_name = _get_submodules(self.model, key)\n except AttributeError:\n continue\n if isinstance(target, LoraLayer):\n bias = target.bias is not None\n new_module = torch.nn.Linear(target.in_features, target.out_features, bias=bias)\n target.merge()\n self._replace_module(parent, target_name, new_module, target)\n\n # save any additional trainable modules part of `modules_to_save`\n if isinstance(target, ModulesToSaveWrapper):\n setattr(parent, target_name, target.modules_to_save[target.active_adapter])\n\n return self.model\n\n def add_weighted_adapter(self, adapters, weights, adapter_name):\n if len({self.peft_config[adapter].r for adapter in adapters}) != 1:\n raise ValueError(\"All adapters must have the same r value\")\n self.peft_config[adapter_name] = self.peft_config[adapters[0]]\n self.peft_config[adapter_name].lora_alpha = self.peft_config[adapters[0]].r\n self._find_and_replace(adapter_name)\n mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias)\n _freeze_adapter(self.model, adapter_name)\n key_list = [key for key, _ in self.model.named_modules() if \"lora\" not in key]\n for key in key_list:\n _, target, _ = _get_submodules(self.model, key)\n if isinstance(target, LoraLayer):\n target.lora_A[adapter_name].weight.data = target.lora_A[adapter_name].weight.data * 0.0\n target.lora_B[adapter_name].weight.data = target.lora_B[adapter_name].weight.data * 0.0\n for adapter, weight in zip(adapters, weights):\n if adapter not in target.lora_A:\n continue\n target.lora_A[adapter_name].weight.data += (\n target.lora_A[adapter].weight.data * weight * target.scaling[adapter]\n )\n target.lora_B[adapter_name].weight.data += target.lora_B[adapter].weight.data * weight" }, { "identifier": "mark_only_lora_as_trainable", "path": "src/MLoRA/peft/tuners/lora.py", "snippet": "def mark_only_lora_as_trainable(model: nn.Module, bias: str = \"none\") -> None:\n \"\"\"Only activate the LoRA layer as trainable\"\"\"\n for n, p in model.named_parameters():\n if \"lora_\" not in n:\n p.requires_grad = False\n if bias == \"none\":\n return\n elif bias == \"all\":\n for n, p in model.named_parameters():\n if \"bias\" in n:\n p.requires_grad = True\n elif bias == \"lora_only\":\n for m in model.modules():\n if isinstance(m, LoraLayer) and hasattr(m, \"bias\") and m.bias is not None:\n m.bias.requires_grad = True\n else:\n raise NotImplementedError" } ]
import importlib import re import warnings import torch import torch.nn as nn import torch.nn.functional as F import bitsandbytes as bnb from dataclasses import dataclass, field from typing import Optional from transformers.pytorch_utils import Conv1D from ..utils import ( TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING, PeftType, _freeze_adapter, _get_submodules, transpose, ) from .lora import ( LoraConfig, LoraLayer, LoraModel, mark_only_lora_as_trainable, )
8,092
state_dict[key][:, rank_idx] if rank != state_dict[key].shape[1] else state_dict[key] ) return state_dict def update_and_allocate(self, global_step): lora_config = self.peft_config[self.trainable_adapter_name] # Update the importance score and allocate the budget if global_step < lora_config.total_step - lora_config.tfinal: _, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step) if rank_pattern: lora_config.rank_pattern = rank_pattern # Finalize the budget allocation elif global_step == lora_config.total_step - lora_config.tfinal: _, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step, force_mask=True) # for some reason, this freezes the trainable parameters and nothing gets updates # self.resize_modules_by_rank_pattern(rank_pattern, self.trainable_adapter_name) lora_config.rank_pattern = rank_pattern self.rankallocator.reset_ipt() # Currently using inefficient way to mask the unimportant weights using the rank pattern # due to problem mentioned above elif global_step > lora_config.total_step - lora_config.tfinal: self.rankallocator.mask_using_rank_pattern(self.model, lora_config.rank_pattern) # Pass the function and do forward propagation else: return None @staticmethod def _prepare_adalora_config(peft_config, model_config): if peft_config.target_modules is None: if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING: raise ValueError("Please specify `target_modules` in `peft_config`") peft_config.target_modules = TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING[ model_config["model_type"] ] if peft_config.inference_mode: peft_config.merge_weights = True return peft_config class AdaLoraLayer(LoraLayer): def __init__( self, in_features: int, out_features: int, ): super().__init__(in_features, out_features) self.lora_E = nn.ParameterDict({}) self.lora_A = nn.ParameterDict({}) self.lora_B = nn.ParameterDict({}) self.ranknum = nn.ParameterDict({}) def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights): self.r[adapter_name] = r self.lora_alpha[adapter_name] = lora_alpha if lora_dropout > 0.0: lora_dropout_layer = nn.Dropout(p=lora_dropout) else: def lora_dropout_layer(x): return x self.lora_dropout.update(nn.ModuleDict({adapter_name: lora_dropout_layer})) # Actual trainable parameters # Right singular vectors self.lora_A.update(nn.ParameterDict({adapter_name: nn.Parameter(torch.zeros(r, self.in_features))})) # Singular values self.lora_E.update(nn.ParameterDict({adapter_name: nn.Parameter(torch.zeros(r, 1))})) # Left singular vectors self.lora_B.update(nn.ParameterDict({adapter_name: nn.Parameter(torch.zeros(self.out_features, r))})) # The current rank self.ranknum.update(nn.ParameterDict({adapter_name: nn.Parameter(torch.zeros(1), requires_grad=False)})) self.ranknum[adapter_name].data.fill_(float(r)) self.ranknum[adapter_name].requires_grad = False self.scaling[adapter_name] = lora_alpha if lora_alpha > 0 else float(r) if init_lora_weights: self.reset_lora_parameters(adapter_name) self.to(self.weight.device) def reset_lora_parameters(self, adapter_name): if adapter_name in self.lora_A.keys(): nn.init.zeros_(self.lora_E[adapter_name]) nn.init.normal_(self.lora_A[adapter_name], mean=0.0, std=0.02) nn.init.normal_(self.lora_B[adapter_name], mean=0.0, std=0.02) class SVDLinear(nn.Linear, AdaLoraLayer): # SVD-based adaptation by a dense layer def __init__( self, adapter_name: str, in_features: int, out_features: int, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, fan_in_fan_out: bool = False, **kwargs, ): init_lora_weights = kwargs.pop("init_lora_weights", True) nn.Linear.__init__(self, in_features, out_features, **kwargs) AdaLoraLayer.__init__(self, in_features=in_features, out_features=out_features) # Freezing the pre-trained weight matrix self.weight.requires_grad = False self.fan_in_fan_out = fan_in_fan_out if fan_in_fan_out: self.weight.data = self.weight.data.T nn.Linear.reset_parameters(self) self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights) self.active_adapter = adapter_name def merge(self): if self.active_adapter not in self.lora_A.keys(): return if self.merged: warnings.warn("Already merged. Nothing to do.") return if self.r[self.active_adapter] > 0: self.weight.data += (
def is_bnb_available(): return importlib.util.find_spec("bitsandbytes") is not None if is_bnb_available(): @dataclass class AdaLoraConfig(LoraConfig): """ This is the configuration class to store the configuration of a [`~peft.AdaLora`]. Args: target_r (`int`): The target average rank of incremental matrix. init_r (`int`): The initial rank for each incremental matrix. tinit (`int`): The steps of initial fine-tuning warmup. tfinal (`int`): The step of final fine-tuning. deltaT (`int`): The time internval between two budget allocations. beta1 (`float`): The hyperparameter of EMA for sensitivity smoothing. beta2 (`float`): The hyperparameter of EMA for undertainty quantification. orth_reg_weight (`float`): The coefficient of orthogonal regularization. total_step (`int`): The total training steps that should be specified before training. rank_pattern (`list`): The allocated rank for each weight matrix by RankAllocator. """ target_r: int = field(default=8, metadata={"help": "Target Lora matrix dimension."}) init_r: int = field(default=12, metadata={"help": "Intial Lora matrix dimension."}) tinit: int = field(default=0, metadata={"help": "The steps of initial warmup."}) tfinal: int = field(default=0, metadata={"help": "The steps of final warmup."}) deltaT: int = field(default=1, metadata={"help": "Step interval of rank allocation."}) beta1: float = field(default=0.85, metadata={"help": "Hyperparameter of EMA."}) beta2: float = field(default=0.85, metadata={"help": "Hyperparameter of EMA."}) orth_reg_weight: float = field(default=0.5, metadata={"help": "The orthogonal regularization coefficient."}) total_step: Optional[int] = field(default=None, metadata={"help": "The total training steps."}) rank_pattern: Optional[dict] = field(default=None, metadata={"help": "The saved rank pattern."}) def __post_init__(self): self.peft_type = PeftType.ADALORA class AdaLoraModel(LoraModel): """ Creates AdaLoRA (Adaptive LoRA) model from a pretrained transformers model. Paper: https://openreview.net/pdf?id=lq62uWRJjiY Args: model ([`transformers.PreTrainedModel`]): The model to be adapted. config ([`AdaLoraConfig`]): The configuration of the AdaLora model. Returns: `torch.nn.Module`: The AdaLora model. Example:: >>> from transformers import AutoModelForSeq2SeqLM, LoraConfig >>> from peft import AdaLoraModel, AdaLoraConfig >>> config = AdaLoraConfig( peft_type="ADALORA", task_type="SEQ_2_SEQ_LM", r=8, lora_alpha=32, target_modules=["q", "v"], lora_dropout=0.01, ) >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> model = AdaLoraModel(config, model) **Attributes**: - **model** ([`transformers.PreTrainedModel`]) -- The model to be adapted. - **peft_config** ([`AdaLoraConfig`]): The configuration of the AdaLora model. """ def __init__(self, model, config, adapter_name): nn.Module.__init__(self) self.model = model self.peft_config = config self.add_adapter(adapter_name, self.peft_config[adapter_name]) def add_adapter(self, adapter_name, config=None): if config is not None: model_config = self.model.config.to_dict() if hasattr(self.model.config, "to_dict") else self.model.config config = self._prepare_adalora_config(config, model_config) self.peft_config[adapter_name] = config self._find_and_replace(adapter_name) if len(self.peft_config) > 1 and self.peft_config[adapter_name].bias != "none": raise ValueError( "AdaLoraModel supports only 1 adapter with bias. When using multiple adapters, set bias to 'none' for all adapters." ) traininable_mode_counter = 0 for config in self.peft_config.values(): if not config.inference_mode: traininable_mode_counter += 1 if traininable_mode_counter > 1: raise ValueError( "AdaLoraModel supports only 1 trainable adapter. " "When using multiple adapters, set inference_mode to True for all adapters except the one you want to train." ) mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias) if self.peft_config[adapter_name].inference_mode: _freeze_adapter(self.model, adapter_name) else: self.trainable_adapter_name = adapter_name self.rankallocator = RankAllocator(self.model, self.peft_config[adapter_name], self.trainable_adapter_name) def _find_and_replace(self, adapter_name): lora_config = self.peft_config[adapter_name] loaded_in_8bit = getattr(self.model, "is_loaded_in_8bit", False) if loaded_in_8bit and not is_bnb_available(): raise ImportError( "To use Lora with 8-bit quantization, please install the `bitsandbytes` package. " "You can install it with `pip install bitsandbytes`." ) is_target_modules_in_base_model = False kwargs = { "r": lora_config.init_r, "lora_alpha": lora_config.lora_alpha, "lora_dropout": lora_config.lora_dropout, "fan_in_fan_out": lora_config.fan_in_fan_out, "init_lora_weights": lora_config.init_lora_weights, } key_list = [key for key, _ in self.model.named_modules()] for key in key_list: if isinstance(lora_config.target_modules, str): target_module_found = re.fullmatch(lora_config.target_modules, key) else: target_module_found = any(key.endswith(target_key) for target_key in lora_config.target_modules) if target_module_found: if not is_target_modules_in_base_model: is_target_modules_in_base_model = True parent, target, target_name = _get_submodules(self.model, key) bias = target.bias is not None if isinstance(target, LoraLayer): target.update_layer( adapter_name, lora_config.init_r, lora_config.lora_alpha, lora_config.lora_dropout, lora_config.init_lora_weights, ) else: if loaded_in_8bit and isinstance(target, bnb.nn.Linear8bitLt): kwargs.update( { "has_fp16_weights": target.state.has_fp16_weights, "memory_efficient_backward": target.state.memory_efficient_backward, "threshold": target.state.threshold, "index": target.index, } ) new_module = SVDLinear8bitLt( adapter_name, target.in_features, target.out_features, bias=bias, **kwargs ) else: if isinstance(target, torch.nn.Linear): in_features, out_features = target.in_features, target.out_features if kwargs["fan_in_fan_out"]: warnings.warn( "fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. " "Setting fan_in_fan_out to False." ) kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = False elif isinstance(target, Conv1D): in_features, out_features = ( target.weight.ds_shape if hasattr(target.weight, "ds_shape") else target.weight.shape ) if not kwargs["fan_in_fan_out"]: warnings.warn( "fan_in_fan_out is set to False but the target module is `Conv1D`. " "Setting fan_in_fan_out to True." ) kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = True else: raise ValueError( f"Target module {target} is not supported. " f"Currently, only `torch.nn.Linear` and `Conv1D` are supported." ) new_module = SVDLinear(adapter_name, in_features, out_features, bias=bias, **kwargs) self._replace_module(parent, target_name, new_module, target) if not is_target_modules_in_base_model: raise ValueError( f"Target modules {lora_config.target_modules} not found in the base model. " f"Please check the target modules and try again." ) def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: return getattr(self.model, name) def forward(self, *args, **kwargs): outputs = self.model.forward(*args, **kwargs) # Calculate the orthogonal regularization orth_reg_weight = self.peft_config[self.trainable_adapter_name].orth_reg_weight assert orth_reg_weight > 0 if hasattr(outputs, "loss"): regu_loss = 0 num_param = 0 for n, p in self.model.named_parameters(): if ("lora_A" in n or "lora_B" in n) and self.trainable_adapter_name in n: para_cov = p @ p.T if "lora_A" in n else p.T @ p I = torch.eye(*para_cov.size(), out=torch.empty_like(para_cov)) I.requires_grad = False num_param += 1 regu_loss += torch.norm(para_cov - I, p="fro") regu_loss = regu_loss / num_param outputs.loss += orth_reg_weight * regu_loss return outputs def resize_modules_by_rank_pattern(self, rank_pattern, adapter_name): lora_config = self.peft_config[adapter_name] for name, rank_idx in rank_pattern.items(): if isinstance(rank_idx, list): rank = sum(rank_idx) elif isinstance(rank_idx, torch.Tensor): rank_idx = rank_idx.view(-1) rank = rank_idx.sum().item() else: raise ValueError("Unexcepted type of rank_idx") key = ".".join(name.split(".")[0:-2]) if adapter_name in name else ".".join(name.split(".")[0:-1]) _, target, _ = _get_submodules(self.model, key) lora_E_weights = target.lora_E[adapter_name][rank_idx] lora_A_weights = target.lora_A[adapter_name][rank_idx] lora_B_weights = target.lora_B[adapter_name][:, rank_idx] ranknum = target.ranknum[adapter_name] target.update_layer( adapter_name, rank, lora_config.lora_alpha, lora_config.lora_dropout, lora_config.init_lora_weights, ) with torch.no_grad(): if rank > 0: target.lora_E[adapter_name].copy_(lora_E_weights) target.lora_A[adapter_name].copy_(lora_A_weights) target.lora_B[adapter_name].copy_(lora_B_weights) # The scaling is exactly as the previous target.ranknum[adapter_name].copy_(ranknum) def resize_state_dict_by_rank_pattern(self, rank_pattern, state_dict, adapter_name): for name, rank_idx in rank_pattern.items(): rank = sum(rank_idx) prefix = ".".join(name.split(".")[0:-2]) if adapter_name in name else ".".join(name.split(".")[0:-1]) for layer in ["lora_E", "lora_A", "lora_B"]: key = f"base_model.model.{prefix}.{layer}.{adapter_name}" if layer != "lora_B": state_dict[key] = ( state_dict[key][rank_idx] if rank != state_dict[key].shape[0] else state_dict[key] ) else: state_dict[key] = ( state_dict[key][:, rank_idx] if rank != state_dict[key].shape[1] else state_dict[key] ) return state_dict def update_and_allocate(self, global_step): lora_config = self.peft_config[self.trainable_adapter_name] # Update the importance score and allocate the budget if global_step < lora_config.total_step - lora_config.tfinal: _, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step) if rank_pattern: lora_config.rank_pattern = rank_pattern # Finalize the budget allocation elif global_step == lora_config.total_step - lora_config.tfinal: _, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step, force_mask=True) # for some reason, this freezes the trainable parameters and nothing gets updates # self.resize_modules_by_rank_pattern(rank_pattern, self.trainable_adapter_name) lora_config.rank_pattern = rank_pattern self.rankallocator.reset_ipt() # Currently using inefficient way to mask the unimportant weights using the rank pattern # due to problem mentioned above elif global_step > lora_config.total_step - lora_config.tfinal: self.rankallocator.mask_using_rank_pattern(self.model, lora_config.rank_pattern) # Pass the function and do forward propagation else: return None @staticmethod def _prepare_adalora_config(peft_config, model_config): if peft_config.target_modules is None: if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING: raise ValueError("Please specify `target_modules` in `peft_config`") peft_config.target_modules = TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING[ model_config["model_type"] ] if peft_config.inference_mode: peft_config.merge_weights = True return peft_config class AdaLoraLayer(LoraLayer): def __init__( self, in_features: int, out_features: int, ): super().__init__(in_features, out_features) self.lora_E = nn.ParameterDict({}) self.lora_A = nn.ParameterDict({}) self.lora_B = nn.ParameterDict({}) self.ranknum = nn.ParameterDict({}) def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights): self.r[adapter_name] = r self.lora_alpha[adapter_name] = lora_alpha if lora_dropout > 0.0: lora_dropout_layer = nn.Dropout(p=lora_dropout) else: def lora_dropout_layer(x): return x self.lora_dropout.update(nn.ModuleDict({adapter_name: lora_dropout_layer})) # Actual trainable parameters # Right singular vectors self.lora_A.update(nn.ParameterDict({adapter_name: nn.Parameter(torch.zeros(r, self.in_features))})) # Singular values self.lora_E.update(nn.ParameterDict({adapter_name: nn.Parameter(torch.zeros(r, 1))})) # Left singular vectors self.lora_B.update(nn.ParameterDict({adapter_name: nn.Parameter(torch.zeros(self.out_features, r))})) # The current rank self.ranknum.update(nn.ParameterDict({adapter_name: nn.Parameter(torch.zeros(1), requires_grad=False)})) self.ranknum[adapter_name].data.fill_(float(r)) self.ranknum[adapter_name].requires_grad = False self.scaling[adapter_name] = lora_alpha if lora_alpha > 0 else float(r) if init_lora_weights: self.reset_lora_parameters(adapter_name) self.to(self.weight.device) def reset_lora_parameters(self, adapter_name): if adapter_name in self.lora_A.keys(): nn.init.zeros_(self.lora_E[adapter_name]) nn.init.normal_(self.lora_A[adapter_name], mean=0.0, std=0.02) nn.init.normal_(self.lora_B[adapter_name], mean=0.0, std=0.02) class SVDLinear(nn.Linear, AdaLoraLayer): # SVD-based adaptation by a dense layer def __init__( self, adapter_name: str, in_features: int, out_features: int, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, fan_in_fan_out: bool = False, **kwargs, ): init_lora_weights = kwargs.pop("init_lora_weights", True) nn.Linear.__init__(self, in_features, out_features, **kwargs) AdaLoraLayer.__init__(self, in_features=in_features, out_features=out_features) # Freezing the pre-trained weight matrix self.weight.requires_grad = False self.fan_in_fan_out = fan_in_fan_out if fan_in_fan_out: self.weight.data = self.weight.data.T nn.Linear.reset_parameters(self) self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights) self.active_adapter = adapter_name def merge(self): if self.active_adapter not in self.lora_A.keys(): return if self.merged: warnings.warn("Already merged. Nothing to do.") return if self.r[self.active_adapter] > 0: self.weight.data += (
transpose(
2
2023-10-19 10:55:50+00:00
12k
YuroFR/freqtrade-modded-crypto-trading-bot
freqtrade/plugins/pairlist/VolatilityFilter.py
[ { "identifier": "Config", "path": "freqtrade/constants.py", "snippet": "DOCS_LINK = \"https://www.freqtrade.io/en/stable\"\nDEFAULT_CONFIG = 'config.json'\nPROCESS_THROTTLE_SECS = 5 # sec\nHYPEROPT_EPOCH = 100 # epochs\nRETRY_TIMEOUT = 30 # sec\nTIMEOUT_UNITS = ['minutes', 'seconds']\nEXPORT_OPTIONS = ['none', 'trades', 'signals']\nDEFAULT_DB_PROD_URL = 'sqlite:///tradesv3.sqlite'\nDEFAULT_DB_DRYRUN_URL = 'sqlite:///tradesv3.dryrun.sqlite'\nUNLIMITED_STAKE_AMOUNT = 'unlimited'\nDEFAULT_AMOUNT_RESERVE_PERCENT = 0.05\nREQUIRED_ORDERTIF = ['entry', 'exit']\nREQUIRED_ORDERTYPES = ['entry', 'exit', 'stoploss', 'stoploss_on_exchange']\nPRICING_SIDES = ['ask', 'bid', 'same', 'other']\nORDERTYPE_POSSIBILITIES = ['limit', 'market']\n_ORDERTIF_POSSIBILITIES = ['GTC', 'FOK', 'IOC', 'PO']\nORDERTIF_POSSIBILITIES = _ORDERTIF_POSSIBILITIES + [t.lower() for t in _ORDERTIF_POSSIBILITIES]\nSTOPLOSS_PRICE_TYPES = [p for p in PriceType]\nHYPEROPT_LOSS_BUILTIN = ['ShortTradeDurHyperOptLoss', 'OnlyProfitHyperOptLoss',\n 'SharpeHyperOptLoss', 'SharpeHyperOptLossDaily',\n 'SortinoHyperOptLoss', 'SortinoHyperOptLossDaily',\n 'CalmarHyperOptLoss',\n 'MaxDrawDownHyperOptLoss', 'MaxDrawDownRelativeHyperOptLoss',\n 'ProfitDrawDownHyperOptLoss']\nAVAILABLE_PAIRLISTS = ['StaticPairList', 'VolumePairList', 'ProducerPairList', 'RemotePairList',\n 'AgeFilter', \"FullTradesFilter\", 'OffsetFilter', 'PerformanceFilter',\n 'PrecisionFilter', 'PriceFilter', 'RangeStabilityFilter',\n 'ShuffleFilter', 'SpreadFilter', 'VolatilityFilter']\nAVAILABLE_PROTECTIONS = ['CooldownPeriod',\n 'LowProfitPairs', 'MaxDrawdown', 'StoplossGuard']\nAVAILABLE_DATAHANDLERS = ['json', 'jsongz', 'hdf5', 'feather', 'parquet']\nBACKTEST_BREAKDOWNS = ['day', 'week', 'month']\nBACKTEST_CACHE_AGE = ['none', 'day', 'week', 'month']\nBACKTEST_CACHE_DEFAULT = 'day'\nDRY_RUN_WALLET = 1000\nDATETIME_PRINT_FORMAT = '%Y-%m-%d %H:%M:%S'\nMATH_CLOSE_PREC = 1e-14 # Precision used for float comparisons\nDEFAULT_DATAFRAME_COLUMNS = ['date', 'open', 'high', 'low', 'close', 'volume']\nDEFAULT_TRADES_COLUMNS = ['timestamp', 'id', 'type', 'side', 'price', 'amount', 'cost']\nTRADES_DTYPES = {\n 'timestamp': 'int64',\n 'id': 'str',\n 'type': 'str',\n 'side': 'str',\n 'price': 'float64',\n 'amount': 'float64',\n 'cost': 'float64',\n}\nTRADING_MODES = ['spot', 'margin', 'futures']\nMARGIN_MODES = ['cross', 'isolated', '']\nLAST_BT_RESULT_FN = '.last_result.json'\nFTHYPT_FILEVERSION = 'fthypt_fileversion'\nUSERPATH_HYPEROPTS = 'hyperopts'\nUSERPATH_STRATEGIES = 'strategies'\nUSERPATH_NOTEBOOKS = 'notebooks'\nUSERPATH_FREQAIMODELS = 'freqaimodels'\nTELEGRAM_SETTING_OPTIONS = ['on', 'off', 'silent']\nWEBHOOK_FORMAT_OPTIONS = ['form', 'json', 'raw']\nFULL_DATAFRAME_THRESHOLD = 100\nCUSTOM_TAG_MAX_LENGTH = 255\nDL_DATA_TIMEFRAMES = ['1m', '5m']\nENV_VAR_PREFIX = 'FREQTRADE__'\nCANCELED_EXCHANGE_STATES = ('cancelled', 'canceled', 'expired')\nNON_OPEN_EXCHANGE_STATES = CANCELED_EXCHANGE_STATES + ('closed',)\nDECIMAL_PER_COIN_FALLBACK = 3 # Should be low to avoid listing all possible FIAT's\nDECIMALS_PER_COIN = {\n 'BTC': 8,\n 'ETH': 5,\n}\nDUST_PER_COIN = {\n 'BTC': 0.0001,\n 'ETH': 0.01\n}\nUSER_DATA_FILES = {\n 'sample_strategy.py': USERPATH_STRATEGIES,\n 'sample_hyperopt_loss.py': USERPATH_HYPEROPTS,\n 'strategy_analysis_example.ipynb': USERPATH_NOTEBOOKS,\n}\nSUPPORTED_FIAT = [\n \"AUD\", \"BRL\", \"CAD\", \"CHF\", \"CLP\", \"CNY\", \"CZK\", \"DKK\",\n \"EUR\", \"GBP\", \"HKD\", \"HUF\", \"IDR\", \"ILS\", \"INR\", \"JPY\",\n \"KRW\", \"MXN\", \"MYR\", \"NOK\", \"NZD\", \"PHP\", \"PKR\", \"PLN\",\n \"RUB\", \"UAH\", \"SEK\", \"SGD\", \"THB\", \"TRY\", \"TWD\", \"ZAR\",\n \"USD\", \"BTC\", \"ETH\", \"XRP\", \"LTC\", \"BCH\"\n]\nMINIMAL_CONFIG = {\n \"stake_currency\": \"\",\n \"dry_run\": True,\n \"exchange\": {\n \"name\": \"\",\n \"key\": \"\",\n \"secret\": \"\",\n \"pair_whitelist\": [],\n \"ccxt_async_config\": {\n }\n }\n}\n__MESSAGE_TYPE_DICT: Dict[str, Dict[str, str]] = {x: {'type': 'object'} for x in RPCMessageType}\nCONF_SCHEMA = {\n 'type': 'object',\n 'properties': {\n 'max_open_trades': {'type': ['integer', 'number'], 'minimum': -1},\n 'new_pairs_days': {'type': 'integer', 'default': 30},\n 'timeframe': {'type': 'string'},\n 'stake_currency': {'type': 'string'},\n 'stake_amount': {\n 'type': ['number', 'string'],\n 'minimum': 0.0001,\n 'pattern': UNLIMITED_STAKE_AMOUNT\n },\n 'tradable_balance_ratio': {\n 'type': 'number',\n 'minimum': 0.0,\n 'maximum': 1,\n 'default': 0.99\n },\n 'available_capital': {\n 'type': 'number',\n 'minimum': 0,\n },\n 'amend_last_stake_amount': {'type': 'boolean', 'default': False},\n 'last_stake_amount_min_ratio': {\n 'type': 'number', 'minimum': 0.0, 'maximum': 1.0, 'default': 0.5\n },\n 'fiat_display_currency': {'type': 'string', 'enum': SUPPORTED_FIAT},\n 'dry_run': {'type': 'boolean'},\n 'dry_run_wallet': {'type': 'number', 'default': DRY_RUN_WALLET},\n 'cancel_open_orders_on_exit': {'type': 'boolean', 'default': False},\n 'process_only_new_candles': {'type': 'boolean'},\n 'minimal_roi': {\n 'type': 'object',\n 'patternProperties': {\n '^[0-9.]+$': {'type': 'number'}\n },\n },\n 'amount_reserve_percent': {'type': 'number', 'minimum': 0.0, 'maximum': 0.5},\n 'stoploss': {'type': 'number', 'maximum': 0, 'exclusiveMaximum': True},\n 'trailing_stop': {'type': 'boolean'},\n 'trailing_stop_positive': {'type': 'number', 'minimum': 0, 'maximum': 1},\n 'trailing_stop_positive_offset': {'type': 'number', 'minimum': 0, 'maximum': 1},\n 'trailing_only_offset_is_reached': {'type': 'boolean'},\n 'use_exit_signal': {'type': 'boolean'},\n 'exit_profit_only': {'type': 'boolean'},\n 'exit_profit_offset': {'type': 'number'},\n 'ignore_roi_if_entry_signal': {'type': 'boolean'},\n 'ignore_buying_expired_candle_after': {'type': 'number'},\n 'trading_mode': {'type': 'string', 'enum': TRADING_MODES},\n 'margin_mode': {'type': 'string', 'enum': MARGIN_MODES},\n 'reduce_df_footprint': {'type': 'boolean', 'default': False},\n 'minimum_trade_amount': {'type': 'number', 'default': 10},\n 'targeted_trade_amount': {'type': 'number', 'default': 20},\n 'lookahead_analysis_exportfilename': {'type': 'string'},\n 'startup_candle': {\n 'type': 'array',\n 'uniqueItems': True,\n 'default': [199, 399, 499, 999, 1999],\n },\n 'liquidation_buffer': {'type': 'number', 'minimum': 0.0, 'maximum': 0.99},\n 'backtest_breakdown': {\n 'type': 'array',\n 'items': {'type': 'string', 'enum': BACKTEST_BREAKDOWNS}\n },\n 'bot_name': {'type': 'string'},\n 'unfilledtimeout': {\n 'type': 'object',\n 'properties': {\n 'entry': {'type': 'number', 'minimum': 1},\n 'exit': {'type': 'number', 'minimum': 1},\n 'exit_timeout_count': {'type': 'number', 'minimum': 0, 'default': 0},\n 'unit': {'type': 'string', 'enum': TIMEOUT_UNITS, 'default': 'minutes'}\n }\n },\n 'entry_pricing': {\n 'type': 'object',\n 'properties': {\n 'price_last_balance': {\n 'type': 'number',\n 'minimum': 0,\n 'maximum': 1,\n 'exclusiveMaximum': False,\n },\n 'price_side': {'type': 'string', 'enum': PRICING_SIDES, 'default': 'same'},\n 'use_order_book': {'type': 'boolean'},\n 'order_book_top': {'type': 'integer', 'minimum': 1, 'maximum': 50, },\n 'check_depth_of_market': {\n 'type': 'object',\n 'properties': {\n 'enabled': {'type': 'boolean'},\n 'bids_to_ask_delta': {'type': 'number', 'minimum': 0},\n }\n },\n },\n 'required': ['price_side']\n },\n 'exit_pricing': {\n 'type': 'object',\n 'properties': {\n 'price_side': {'type': 'string', 'enum': PRICING_SIDES, 'default': 'same'},\n 'price_last_balance': {\n 'type': 'number',\n 'minimum': 0,\n 'maximum': 1,\n 'exclusiveMaximum': False,\n },\n 'use_order_book': {'type': 'boolean'},\n 'order_book_top': {'type': 'integer', 'minimum': 1, 'maximum': 50, },\n },\n 'required': ['price_side']\n },\n 'custom_price_max_distance_ratio': {\n 'type': 'number', 'minimum': 0.0\n },\n 'order_types': {\n 'type': 'object',\n 'properties': {\n 'entry': {'type': 'string', 'enum': ORDERTYPE_POSSIBILITIES},\n 'exit': {'type': 'string', 'enum': ORDERTYPE_POSSIBILITIES},\n 'force_exit': {'type': 'string', 'enum': ORDERTYPE_POSSIBILITIES},\n 'force_entry': {'type': 'string', 'enum': ORDERTYPE_POSSIBILITIES},\n 'emergency_exit': {\n 'type': 'string',\n 'enum': ORDERTYPE_POSSIBILITIES,\n 'default': 'market'},\n 'stoploss': {'type': 'string', 'enum': ORDERTYPE_POSSIBILITIES},\n 'stoploss_on_exchange': {'type': 'boolean'},\n 'stoploss_price_type': {'type': 'string', 'enum': STOPLOSS_PRICE_TYPES},\n 'stoploss_on_exchange_interval': {'type': 'number'},\n 'stoploss_on_exchange_limit_ratio': {'type': 'number', 'minimum': 0.0,\n 'maximum': 1.0}\n },\n 'required': ['entry', 'exit', 'stoploss', 'stoploss_on_exchange']\n },\n 'order_time_in_force': {\n 'type': 'object',\n 'properties': {\n 'entry': {'type': 'string', 'enum': ORDERTIF_POSSIBILITIES},\n 'exit': {'type': 'string', 'enum': ORDERTIF_POSSIBILITIES}\n },\n 'required': REQUIRED_ORDERTIF\n },\n 'exchange': {'$ref': '#/definitions/exchange'},\n 'edge': {'$ref': '#/definitions/edge'},\n 'freqai': {'$ref': '#/definitions/freqai'},\n 'external_message_consumer': {'$ref': '#/definitions/external_message_consumer'},\n 'experimental': {\n 'type': 'object',\n 'properties': {\n 'block_bad_exchanges': {'type': 'boolean'}\n }\n },\n 'pairlists': {\n 'type': 'array',\n 'items': {\n 'type': 'object',\n 'properties': {\n 'method': {'type': 'string', 'enum': AVAILABLE_PAIRLISTS},\n },\n 'required': ['method'],\n }\n },\n 'protections': {\n 'type': 'array',\n 'items': {\n 'type': 'object',\n 'properties': {\n 'method': {'type': 'string', 'enum': AVAILABLE_PROTECTIONS},\n 'stop_duration': {'type': 'number', 'minimum': 0.0},\n 'stop_duration_candles': {'type': 'number', 'minimum': 0},\n 'trade_limit': {'type': 'number', 'minimum': 1},\n 'lookback_period': {'type': 'number', 'minimum': 1},\n 'lookback_period_candles': {'type': 'number', 'minimum': 1},\n },\n 'required': ['method'],\n }\n },\n 'telegram': {\n 'type': 'object',\n 'properties': {\n 'enabled': {'type': 'boolean'},\n 'token': {'type': 'string'},\n 'chat_id': {'type': 'string'},\n 'allow_custom_messages': {'type': 'boolean', 'default': True},\n 'balance_dust_level': {'type': 'number', 'minimum': 0.0},\n 'notification_settings': {\n 'type': 'object',\n 'default': {},\n 'properties': {\n 'status': {'type': 'string', 'enum': TELEGRAM_SETTING_OPTIONS},\n 'warning': {'type': 'string', 'enum': TELEGRAM_SETTING_OPTIONS},\n 'startup': {'type': 'string', 'enum': TELEGRAM_SETTING_OPTIONS},\n 'entry': {'type': 'string', 'enum': TELEGRAM_SETTING_OPTIONS},\n 'entry_fill': {\n 'type': 'string',\n 'enum': TELEGRAM_SETTING_OPTIONS,\n 'default': 'off'\n },\n 'entry_cancel': {'type': 'string', 'enum': TELEGRAM_SETTING_OPTIONS, },\n 'exit': {\n 'type': ['string', 'object'],\n 'additionalProperties': {\n 'type': 'string',\n 'enum': TELEGRAM_SETTING_OPTIONS\n }\n },\n 'exit_fill': {\n 'type': 'string',\n 'enum': TELEGRAM_SETTING_OPTIONS,\n 'default': 'on'\n },\n 'exit_cancel': {'type': 'string', 'enum': TELEGRAM_SETTING_OPTIONS},\n 'protection_trigger': {\n 'type': 'string',\n 'enum': TELEGRAM_SETTING_OPTIONS,\n 'default': 'on'\n },\n 'protection_trigger_global': {\n 'type': 'string',\n 'enum': TELEGRAM_SETTING_OPTIONS,\n 'default': 'on'\n },\n 'show_candle': {\n 'type': 'string',\n 'enum': ['off', 'ohlc'],\n 'default': 'off'\n },\n 'strategy_msg': {\n 'type': 'string',\n 'enum': TELEGRAM_SETTING_OPTIONS,\n 'default': 'on'\n },\n }\n },\n 'reload': {'type': 'boolean'},\n },\n 'required': ['enabled', 'token', 'chat_id'],\n },\n 'webhook': {\n 'type': 'object',\n 'properties': {\n 'enabled': {'type': 'boolean'},\n 'url': {'type': 'string'},\n 'format': {'type': 'string', 'enum': WEBHOOK_FORMAT_OPTIONS, 'default': 'form'},\n 'retries': {'type': 'integer', 'minimum': 0},\n 'retry_delay': {'type': 'number', 'minimum': 0},\n **__MESSAGE_TYPE_DICT,\n # **{x: {'type': 'object'} for x in RPCMessageType},\n # Below -> Deprecated\n 'webhookentry': {'type': 'object'},\n 'webhookentrycancel': {'type': 'object'},\n 'webhookentryfill': {'type': 'object'},\n 'webhookexit': {'type': 'object'},\n 'webhookexitcancel': {'type': 'object'},\n 'webhookexitfill': {'type': 'object'},\n 'webhookstatus': {'type': 'object'},\n },\n },\n 'discord': {\n 'type': 'object',\n 'properties': {\n 'enabled': {'type': 'boolean'},\n 'webhook_url': {'type': 'string'},\n \"exit_fill\": {\n 'type': 'array', 'items': {'type': 'object'},\n 'default': [\n {\"Trade ID\": \"{trade_id}\"},\n {\"Exchange\": \"{exchange}\"},\n {\"Pair\": \"{pair}\"},\n {\"Direction\": \"{direction}\"},\n {\"Open rate\": \"{open_rate}\"},\n {\"Close rate\": \"{close_rate}\"},\n {\"Amount\": \"{amount}\"},\n {\"Open date\": \"{open_date:%Y-%m-%d %H:%M:%S}\"},\n {\"Close date\": \"{close_date:%Y-%m-%d %H:%M:%S}\"},\n {\"Profit\": \"{profit_amount} {stake_currency}\"},\n {\"Profitability\": \"{profit_ratio:.2%}\"},\n {\"Enter tag\": \"{enter_tag}\"},\n {\"Exit Reason\": \"{exit_reason}\"},\n {\"Strategy\": \"{strategy}\"},\n {\"Timeframe\": \"{timeframe}\"},\n ]\n },\n \"entry_fill\": {\n 'type': 'array', 'items': {'type': 'object'},\n 'default': [\n {\"Trade ID\": \"{trade_id}\"},\n {\"Exchange\": \"{exchange}\"},\n {\"Pair\": \"{pair}\"},\n {\"Direction\": \"{direction}\"},\n {\"Open rate\": \"{open_rate}\"},\n {\"Amount\": \"{amount}\"},\n {\"Open date\": \"{open_date:%Y-%m-%d %H:%M:%S}\"},\n {\"Enter tag\": \"{enter_tag}\"},\n {\"Strategy\": \"{strategy} {timeframe}\"},\n ]\n },\n }\n },\n 'api_server': {\n 'type': 'object',\n 'properties': {\n 'enabled': {'type': 'boolean'},\n 'listen_ip_address': {'format': 'ipv4'},\n 'listen_port': {\n 'type': 'integer',\n 'minimum': 1024,\n 'maximum': 65535\n },\n 'username': {'type': 'string'},\n 'password': {'type': 'string'},\n 'ws_token': {'type': ['string', 'array'], 'items': {'type': 'string'}},\n 'jwt_secret_key': {'type': 'string'},\n 'CORS_origins': {'type': 'array', 'items': {'type': 'string'}},\n 'verbosity': {'type': 'string', 'enum': ['error', 'info']},\n },\n 'required': ['enabled', 'listen_ip_address', 'listen_port', 'username', 'password']\n },\n 'db_url': {'type': 'string'},\n 'export': {'type': 'string', 'enum': EXPORT_OPTIONS, 'default': 'trades'},\n 'disableparamexport': {'type': 'boolean'},\n 'initial_state': {'type': 'string', 'enum': ['running', 'stopped']},\n 'force_entry_enable': {'type': 'boolean'},\n 'disable_dataframe_checks': {'type': 'boolean'},\n 'internals': {\n 'type': 'object',\n 'default': {},\n 'properties': {\n 'process_throttle_secs': {'type': 'integer'},\n 'interval': {'type': 'integer'},\n 'sd_notify': {'type': 'boolean'},\n }\n },\n 'dataformat_ohlcv': {\n 'type': 'string',\n 'enum': AVAILABLE_DATAHANDLERS,\n 'default': 'feather'\n },\n 'dataformat_trades': {\n 'type': 'string',\n 'enum': AVAILABLE_DATAHANDLERS,\n 'default': 'feather'\n },\n 'position_adjustment_enable': {'type': 'boolean'},\n 'max_entry_position_adjustment': {'type': ['integer', 'number'], 'minimum': -1},\n },\n 'definitions': {\n 'exchange': {\n 'type': 'object',\n 'properties': {\n 'name': {'type': 'string'},\n 'key': {'type': 'string', 'default': ''},\n 'secret': {'type': 'string', 'default': ''},\n 'password': {'type': 'string', 'default': ''},\n 'uid': {'type': 'string'},\n 'pair_whitelist': {\n 'type': 'array',\n 'items': {\n 'type': 'string',\n },\n 'uniqueItems': True\n },\n 'pair_blacklist': {\n 'type': 'array',\n 'items': {\n 'type': 'string',\n },\n 'uniqueItems': True\n },\n 'unknown_fee_rate': {'type': 'number'},\n 'outdated_offset': {'type': 'integer', 'minimum': 1},\n 'markets_refresh_interval': {'type': 'integer'},\n 'ccxt_config': {'type': 'object'},\n 'ccxt_async_config': {'type': 'object'}\n },\n 'required': ['name']\n },\n 'edge': {\n 'type': 'object',\n 'properties': {\n 'enabled': {'type': 'boolean'},\n 'process_throttle_secs': {'type': 'integer', 'minimum': 600},\n 'calculate_since_number_of_days': {'type': 'integer'},\n 'allowed_risk': {'type': 'number'},\n 'stoploss_range_min': {'type': 'number'},\n 'stoploss_range_max': {'type': 'number'},\n 'stoploss_range_step': {'type': 'number'},\n 'minimum_winrate': {'type': 'number'},\n 'minimum_expectancy': {'type': 'number'},\n 'min_trade_number': {'type': 'number'},\n 'max_trade_duration_minute': {'type': 'integer'},\n 'remove_pumps': {'type': 'boolean'}\n },\n 'required': ['process_throttle_secs', 'allowed_risk']\n },\n 'external_message_consumer': {\n 'type': 'object',\n 'properties': {\n 'enabled': {'type': 'boolean', 'default': False},\n 'producers': {\n 'type': 'array',\n 'items': {\n 'type': 'object',\n 'properties': {\n 'name': {'type': 'string'},\n 'host': {'type': 'string'},\n 'port': {\n 'type': 'integer',\n 'default': 8080,\n 'minimum': 0,\n 'maximum': 65535\n },\n 'secure': {'type': 'boolean', 'default': False},\n 'ws_token': {'type': 'string'},\n },\n 'required': ['name', 'host', 'ws_token']\n }\n },\n 'wait_timeout': {'type': 'integer', 'minimum': 0},\n 'sleep_time': {'type': 'integer', 'minimum': 0},\n 'ping_timeout': {'type': 'integer', 'minimum': 0},\n 'remove_entry_exit_signals': {'type': 'boolean', 'default': False},\n 'initial_candle_limit': {\n 'type': 'integer',\n 'minimum': 0,\n 'maximum': 1500,\n 'default': 1500\n },\n 'message_size_limit': { # In megabytes\n 'type': 'integer',\n 'minimum': 1,\n 'maxmium': 20,\n 'default': 8,\n }\n },\n 'required': ['producers']\n },\n \"freqai\": {\n \"type\": \"object\",\n \"properties\": {\n \"enabled\": {\"type\": \"boolean\", \"default\": False},\n \"keras\": {\"type\": \"boolean\", \"default\": False},\n \"write_metrics_to_disk\": {\"type\": \"boolean\", \"default\": False},\n \"purge_old_models\": {\"type\": [\"boolean\", \"number\"], \"default\": 2},\n \"conv_width\": {\"type\": \"integer\", \"default\": 1},\n \"train_period_days\": {\"type\": \"integer\", \"default\": 0},\n \"backtest_period_days\": {\"type\": \"number\", \"default\": 7},\n \"identifier\": {\"type\": \"string\", \"default\": \"example\"},\n \"feature_parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"include_corr_pairlist\": {\"type\": \"array\"},\n \"include_timeframes\": {\"type\": \"array\"},\n \"label_period_candles\": {\"type\": \"integer\"},\n \"include_shifted_candles\": {\"type\": \"integer\", \"default\": 0},\n \"DI_threshold\": {\"type\": \"number\", \"default\": 0},\n \"weight_factor\": {\"type\": \"number\", \"default\": 0},\n \"principal_component_analysis\": {\"type\": \"boolean\", \"default\": False},\n \"use_SVM_to_remove_outliers\": {\"type\": \"boolean\", \"default\": False},\n \"plot_feature_importances\": {\"type\": \"integer\", \"default\": 0},\n \"svm_params\": {\"type\": \"object\",\n \"properties\": {\n \"shuffle\": {\"type\": \"boolean\", \"default\": False},\n \"nu\": {\"type\": \"number\", \"default\": 0.1}\n },\n },\n \"shuffle_after_split\": {\"type\": \"boolean\", \"default\": False},\n \"buffer_train_data_candles\": {\"type\": \"integer\", \"default\": 0}\n },\n \"required\": [\"include_timeframes\", \"include_corr_pairlist\", ]\n },\n \"data_split_parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"test_size\": {\"type\": \"number\"},\n \"random_state\": {\"type\": \"integer\"},\n \"shuffle\": {\"type\": \"boolean\", \"default\": False}\n },\n },\n \"model_training_parameters\": {\n \"type\": \"object\"\n },\n \"rl_config\": {\n \"type\": \"object\",\n \"properties\": {\n \"drop_ohlc_from_features\": {\"type\": \"boolean\", \"default\": False},\n \"train_cycles\": {\"type\": \"integer\"},\n \"max_trade_duration_candles\": {\"type\": \"integer\"},\n \"add_state_info\": {\"type\": \"boolean\", \"default\": False},\n \"max_training_drawdown_pct\": {\"type\": \"number\", \"default\": 0.02},\n \"cpu_count\": {\"type\": \"integer\", \"default\": 1},\n \"model_type\": {\"type\": \"string\", \"default\": \"PPO\"},\n \"policy_type\": {\"type\": \"string\", \"default\": \"MlpPolicy\"},\n \"net_arch\": {\"type\": \"array\", \"default\": [128, 128]},\n \"randomize_starting_position\": {\"type\": \"boolean\", \"default\": False},\n \"progress_bar\": {\"type\": \"boolean\", \"default\": True},\n \"model_reward_parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"rr\": {\"type\": \"number\", \"default\": 1},\n \"profit_aim\": {\"type\": \"number\", \"default\": 0.025}\n }\n }\n },\n },\n },\n \"required\": [\n \"enabled\",\n \"train_period_days\",\n \"backtest_period_days\",\n \"identifier\",\n \"feature_parameters\",\n \"data_split_parameters\"\n ]\n },\n },\n}\nSCHEMA_TRADE_REQUIRED = [\n 'exchange',\n 'timeframe',\n 'max_open_trades',\n 'stake_currency',\n 'stake_amount',\n 'tradable_balance_ratio',\n 'last_stake_amount_min_ratio',\n 'dry_run',\n 'dry_run_wallet',\n 'exit_pricing',\n 'entry_pricing',\n 'stoploss',\n 'minimal_roi',\n 'internals',\n 'dataformat_ohlcv',\n 'dataformat_trades',\n]\nSCHEMA_BACKTEST_REQUIRED = [\n 'exchange',\n 'stake_currency',\n 'stake_amount',\n 'dry_run_wallet',\n 'dataformat_ohlcv',\n 'dataformat_trades',\n]\nSCHEMA_BACKTEST_REQUIRED_FINAL = SCHEMA_BACKTEST_REQUIRED + [\n 'stoploss',\n 'minimal_roi',\n 'max_open_trades'\n]\nSCHEMA_MINIMAL_REQUIRED = [\n 'exchange',\n 'dry_run',\n 'dataformat_ohlcv',\n 'dataformat_trades',\n]\nSCHEMA_MINIMAL_WEBSERVER = SCHEMA_MINIMAL_REQUIRED + [\n 'api_server',\n]\nCANCEL_REASON = {\n \"TIMEOUT\": \"cancelled due to timeout\",\n \"PARTIALLY_FILLED_KEEP_OPEN\": \"partially filled - keeping order open\",\n \"PARTIALLY_FILLED\": \"partially filled\",\n \"FULLY_CANCELLED\": \"fully cancelled\",\n \"ALL_CANCELLED\": \"cancelled (all unfilled and partially filled open orders cancelled)\",\n \"CANCELLED_ON_EXCHANGE\": \"cancelled on exchange\",\n \"FORCE_EXIT\": \"forcesold\",\n \"REPLACE\": \"cancelled to be replaced by new limit order\",\n \"REPLACE_FAILED\": \"failed to replace order, deleting Trade\",\n \"USER_CANCEL\": \"user requested order cancel\"\n}" }, { "identifier": "OperationalException", "path": "freqtrade/exceptions.py", "snippet": "class OperationalException(FreqtradeException):\n \"\"\"\n Requires manual intervention and will stop the bot.\n Most of the time, this is caused by an invalid Configuration.\n \"\"\"" }, { "identifier": "Tickers", "path": "freqtrade/exchange/types.py", "snippet": "class Ticker(TypedDict):\nclass OrderBook(TypedDict):" }, { "identifier": "plural", "path": "freqtrade/misc.py", "snippet": "def plural(num: float, singular: str, plural: Optional[str] = None) -> str:\n return singular if (num == 1 or num == -1) else plural or singular + 's'" }, { "identifier": "IPairList", "path": "freqtrade/plugins/pairlist/IPairList.py", "snippet": "class __PairlistParameterBase(TypedDict):\nclass __NumberPairlistParameter(__PairlistParameterBase):\nclass __StringPairlistParameter(__PairlistParameterBase):\nclass __OptionPairlistParameter(__PairlistParameterBase):\nclass __BoolPairlistParameter(__PairlistParameterBase):\nclass IPairList(LoggingMixin, ABC):\n def __init__(self, exchange: Exchange, pairlistmanager,\n config: Config, pairlistconfig: Dict[str, Any],\n pairlist_pos: int) -> None:\n def name(self) -> str:\n def needstickers(self) -> bool:\n def description() -> str:\n def available_parameters() -> Dict[str, PairlistParameter]:\n def refresh_period_parameter() -> Dict[str, PairlistParameter]:\n def short_desc(self) -> str:\n def _validate_pair(self, pair: str, ticker: Optional[Ticker]) -> bool:\n def gen_pairlist(self, tickers: Tickers) -> List[str]:\n def filter_pairlist(self, pairlist: List[str], tickers: Tickers) -> List[str]:\n def verify_blacklist(self, pairlist: List[str], logmethod) -> List[str]:\n def verify_whitelist(self, pairlist: List[str], logmethod,\n keep_invalid: bool = False) -> List[str]:\n def _whitelist_for_active_markets(self, pairlist: List[str]) -> List[str]:" }, { "identifier": "dt_floor_day", "path": "freqtrade/util/datetime_helpers.py", "snippet": "def dt_floor_day(dt: datetime) -> datetime:\n \"\"\"Return the floor of the day for the given datetime.\"\"\"\n return dt.replace(hour=0, minute=0, second=0, microsecond=0)" }, { "identifier": "dt_now", "path": "freqtrade/util/datetime_helpers.py", "snippet": "def dt_now() -> datetime:\n \"\"\"Return the current datetime in UTC.\"\"\"\n return datetime.now(timezone.utc)" }, { "identifier": "dt_ts", "path": "freqtrade/util/datetime_helpers.py", "snippet": "def dt_ts(dt: Optional[datetime] = None) -> int:\n \"\"\"\n Return dt in ms as a timestamp in UTC.\n If dt is None, return the current datetime in UTC.\n \"\"\"\n if dt:\n return int(dt.timestamp() * 1000)\n return int(dt_now().timestamp() * 1000)" } ]
import logging import sys import numpy as np from copy import deepcopy from datetime import timedelta from typing import Any, Dict, List, Optional from cachetools import TTLCache from pandas import DataFrame from freqtrade.constants import Config, ListPairsWithTimeframes from freqtrade.exceptions import OperationalException from freqtrade.exchange.types import Tickers from freqtrade.misc import plural from freqtrade.plugins.pairlist.IPairList import IPairList, PairlistParameter from freqtrade.util import dt_floor_day, dt_now, dt_ts
8,785
""" Volatility pairlist filter """ logger = logging.getLogger(__name__) class VolatilityFilter(IPairList): """ Filters pairs by volatility """ def __init__(self, exchange, pairlistmanager, config: Config, pairlistconfig: Dict[str, Any], pairlist_pos: int) -> None: super().__init__(exchange, pairlistmanager, config, pairlistconfig, pairlist_pos) self._days = pairlistconfig.get('lookback_days', 10) self._min_volatility = pairlistconfig.get('min_volatility', 0) self._max_volatility = pairlistconfig.get('max_volatility', sys.maxsize) self._refresh_period = pairlistconfig.get('refresh_period', 1440) self._def_candletype = self._config['candle_type_def'] self._pair_cache: TTLCache = TTLCache(maxsize=1000, ttl=self._refresh_period) candle_limit = exchange.ohlcv_candle_limit('1d', self._config['candle_type_def']) if self._days < 1: raise OperationalException("VolatilityFilter requires lookback_days to be >= 1") if self._days > candle_limit: raise OperationalException("VolatilityFilter requires lookback_days to not " f"exceed exchange max request size ({candle_limit})") @property def needstickers(self) -> bool: """ Boolean property defining if tickers are necessary. If no Pairlist requires tickers, an empty List is passed as tickers argument to filter_pairlist """ return False def short_desc(self) -> str: """ Short whitelist method description - used for startup-messages """ return (f"{self.name} - Filtering pairs with volatility range " f"{self._min_volatility}-{self._max_volatility} " f" the last {self._days} {plural(self._days, 'day')}.") @staticmethod def description() -> str: return "Filter pairs by their recent volatility." @staticmethod def available_parameters() -> Dict[str, PairlistParameter]: return { "lookback_days": { "type": "number", "default": 10, "description": "Lookback Days", "help": "Number of days to look back at.", }, "min_volatility": { "type": "number", "default": 0, "description": "Minimum Volatility", "help": "Minimum volatility a pair must have to be considered.", }, "max_volatility": { "type": "number", "default": None, "description": "Maximum Volatility", "help": "Maximum volatility a pair must have to be considered.", }, **IPairList.refresh_period_parameter() } def filter_pairlist(self, pairlist: List[str], tickers: Tickers) -> List[str]: """ Validate trading range :param pairlist: pairlist to filter or sort :param tickers: Tickers (from exchange.get_tickers). May be cached. :return: new allowlist """
""" Volatility pairlist filter """ logger = logging.getLogger(__name__) class VolatilityFilter(IPairList): """ Filters pairs by volatility """ def __init__(self, exchange, pairlistmanager, config: Config, pairlistconfig: Dict[str, Any], pairlist_pos: int) -> None: super().__init__(exchange, pairlistmanager, config, pairlistconfig, pairlist_pos) self._days = pairlistconfig.get('lookback_days', 10) self._min_volatility = pairlistconfig.get('min_volatility', 0) self._max_volatility = pairlistconfig.get('max_volatility', sys.maxsize) self._refresh_period = pairlistconfig.get('refresh_period', 1440) self._def_candletype = self._config['candle_type_def'] self._pair_cache: TTLCache = TTLCache(maxsize=1000, ttl=self._refresh_period) candle_limit = exchange.ohlcv_candle_limit('1d', self._config['candle_type_def']) if self._days < 1: raise OperationalException("VolatilityFilter requires lookback_days to be >= 1") if self._days > candle_limit: raise OperationalException("VolatilityFilter requires lookback_days to not " f"exceed exchange max request size ({candle_limit})") @property def needstickers(self) -> bool: """ Boolean property defining if tickers are necessary. If no Pairlist requires tickers, an empty List is passed as tickers argument to filter_pairlist """ return False def short_desc(self) -> str: """ Short whitelist method description - used for startup-messages """ return (f"{self.name} - Filtering pairs with volatility range " f"{self._min_volatility}-{self._max_volatility} " f" the last {self._days} {plural(self._days, 'day')}.") @staticmethod def description() -> str: return "Filter pairs by their recent volatility." @staticmethod def available_parameters() -> Dict[str, PairlistParameter]: return { "lookback_days": { "type": "number", "default": 10, "description": "Lookback Days", "help": "Number of days to look back at.", }, "min_volatility": { "type": "number", "default": 0, "description": "Minimum Volatility", "help": "Minimum volatility a pair must have to be considered.", }, "max_volatility": { "type": "number", "default": None, "description": "Maximum Volatility", "help": "Maximum volatility a pair must have to be considered.", }, **IPairList.refresh_period_parameter() } def filter_pairlist(self, pairlist: List[str], tickers: Tickers) -> List[str]: """ Validate trading range :param pairlist: pairlist to filter or sort :param tickers: Tickers (from exchange.get_tickers). May be cached. :return: new allowlist """
needed_pairs: ListPairsWithTimeframes = [
0
2023-10-21 10:02:05+00:00
12k
yanzhh/HGERE
transformers/src/transformers/modeling_utils.py
[ { "identifier": "get_activation", "path": "transformers/src/transformers/activations.py", "snippet": "def get_activation(activation_string):\n if activation_string in ACT2FN:\n return ACT2FN[activation_string]\n else:\n raise KeyError(\n \"function {} not found in ACT2FN mapping {} or torch.nn.functional\".format(\n activation_string, list(ACT2FN.keys())\n )\n )" }, { "identifier": "PretrainedConfig", "path": "transformers/src/transformers/configuration_utils.py", "snippet": "class PretrainedConfig(object):\n r\"\"\" Base class for all configuration classes.\n Handles a few parameters common to all models' configurations as well as methods for loading/downloading/saving configurations.\n\n Note:\n A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to initialize a model does **not** load the model weights.\n It only affects the model's configuration.\n\n Class attributes (overridden by derived classes):\n - ``pretrained_config_archive_map``: a python ``dict`` with `shortcut names` (string) as keys and `url` (string) of associated pretrained model configurations as values.\n - ``model_type``: a string that identifies the model type, that we serialize into the JSON file, and that we use to recreate the correct object in :class:`~transformers.AutoConfig`.\n\n Args:\n finetuning_task (:obj:`string` or :obj:`None`, `optional`, defaults to :obj:`None`):\n Name of the task used to fine-tune the model. This can be used when converting from an original (TensorFlow or PyTorch) checkpoint.\n num_labels (:obj:`int`, `optional`, defaults to `2`):\n Number of classes to use when the model is a classification model (sequences/tokens)\n output_attentions (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Should the model returns attentions weights.\n output_hidden_states (:obj:`string`, `optional`, defaults to :obj:`False`):\n Should the model returns all hidden-states.\n torchscript (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Is the model used with Torchscript (for PyTorch models).\n \"\"\"\n pretrained_config_archive_map = {} # type: Dict[str, str]\n model_type = \"\" # type: str\n\n def __init__(self, **kwargs):\n # Attributes with defaults\n self.output_attentions = kwargs.pop(\"output_attentions\", False)\n self.output_hidden_states = kwargs.pop(\"output_hidden_states\", False)\n self.output_past = kwargs.pop(\"output_past\", True) # Not used by all models\n self.torchscript = kwargs.pop(\"torchscript\", False) # Only used by PyTorch models\n self.use_bfloat16 = kwargs.pop(\"use_bfloat16\", False)\n self.pruned_heads = kwargs.pop(\"pruned_heads\", {})\n\n # Is decoder is used in encoder-decoder models to differentiate encoder from decoder\n self.is_decoder = kwargs.pop(\"is_decoder\", False)\n\n # Parameters for sequence generation\n self.max_length = kwargs.pop(\"max_length\", 20)\n self.do_sample = kwargs.pop(\"do_sample\", False)\n self.num_beams = kwargs.pop(\"num_beams\", 1)\n self.temperature = kwargs.pop(\"temperature\", 1.0)\n self.top_k = kwargs.pop(\"top_k\", 50)\n self.top_p = kwargs.pop(\"top_p\", 1.0)\n self.repetition_penalty = kwargs.pop(\"repetition_penalty\", 1.0)\n self.bos_token_id = kwargs.pop(\"bos_token_id\", None)\n self.pad_token_id = kwargs.pop(\"pad_token_id\", None)\n self.eos_token_ids = kwargs.pop(\"eos_token_ids\", None)\n self.length_penalty = kwargs.pop(\"length_penalty\", 1.0)\n self.num_return_sequences = kwargs.pop(\"num_return_sequences\", 1)\n\n # Fine-tuning task arguments\n self.architectures = kwargs.pop(\"architectures\", None)\n self.finetuning_task = kwargs.pop(\"finetuning_task\", None)\n self.num_labels = kwargs.pop(\"num_labels\", 2)\n self.id2label = kwargs.pop(\"id2label\", {i: \"LABEL_{}\".format(i) for i in range(self.num_labels)})\n self.id2label = dict((int(key), value) for key, value in self.id2label.items())\n self.label2id = kwargs.pop(\"label2id\", dict(zip(self.id2label.values(), self.id2label.keys())))\n self.label2id = dict((key, int(value)) for key, value in self.label2id.items())\n\n # Additional attributes without default values\n for key, value in kwargs.items():\n try:\n setattr(self, key, value)\n except AttributeError as err:\n logger.error(\"Can't set {} with value {} for {}\".format(key, value, self))\n raise err\n\n def save_pretrained(self, save_directory):\n \"\"\"\n Save a configuration object to the directory `save_directory`, so that it\n can be re-loaded using the :func:`~transformers.PretrainedConfig.from_pretrained` class method.\n\n Args:\n save_directory (:obj:`string`):\n Directory where the configuration JSON file will be saved.\n \"\"\"\n assert os.path.isdir(\n save_directory\n ), \"Saving path should be a directory where the model and configuration can be saved\"\n\n # If we save using the predefined names, we can load using `from_pretrained`\n output_config_file = os.path.join(save_directory, CONFIG_NAME)\n\n self.to_json_file(output_config_file)\n logger.info(\"Configuration saved in {}\".format(output_config_file))\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path, **kwargs) -> \"PretrainedConfig\":\n r\"\"\"\n\n Instantiate a :class:`~transformers.PretrainedConfig` (or a derived class) from a pre-trained model configuration.\n\n Args:\n pretrained_model_name_or_path (:obj:`string`):\n either:\n - a string with the `shortcut name` of a pre-trained model configuration to load from cache or\n download, e.g.: ``bert-base-uncased``.\n - a string with the `identifier name` of a pre-trained model configuration that was user-uploaded to\n our S3, e.g.: ``dbmdz/bert-base-german-cased``.\n - a path to a `directory` containing a configuration file saved using the\n :func:`~transformers.PretrainedConfig.save_pretrained` method, e.g.: ``./my_model_directory/``.\n - a path or url to a saved configuration JSON `file`, e.g.:\n ``./my_model_directory/configuration.json``.\n cache_dir (:obj:`string`, `optional`):\n Path to a directory in which a downloaded pre-trained model\n configuration should be cached if the standard cache should not be used.\n kwargs (:obj:`Dict[str, any]`, `optional`):\n The values in kwargs of any keys which are configuration attributes will be used to override the loaded\n values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is\n controlled by the `return_unused_kwargs` keyword parameter.\n force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Force to (re-)download the model weights and configuration files and override the cached versions if they exist.\n resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.\n proxies (:obj:`Dict`, `optional`):\n A dictionary of proxy servers to use by protocol or endpoint, e.g.:\n :obj:`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.`\n The proxies are used on each request.\n return_unused_kwargs: (`optional`) bool:\n If False, then this function returns just the final configuration object.\n If True, then this functions returns a :obj:`Tuple(config, unused_kwargs)` where `unused_kwargs` is a\n dictionary consisting of the key/value pairs whose keys are not configuration attributes: ie the part\n of kwargs which has not been used to update `config` and is otherwise ignored.\n\n Returns:\n :class:`PretrainedConfig`: An instance of a configuration object\n\n Examples::\n\n # We can't instantiate directly the base class `PretrainedConfig` so let's show the examples on a\n # derived class: BertConfig\n config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.\n config = BertConfig.from_pretrained('./test/saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')`\n config = BertConfig.from_pretrained('./test/saved_model/my_configuration.json')\n config = BertConfig.from_pretrained('bert-base-uncased', output_attention=True, foo=False)\n assert config.output_attention == True\n config, unused_kwargs = BertConfig.from_pretrained('bert-base-uncased', output_attention=True,\n foo=False, return_unused_kwargs=True)\n assert config.output_attention == True\n assert unused_kwargs == {'foo': False}\n\n \"\"\"\n config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)\n return cls.from_dict(config_dict, **kwargs)\n\n @classmethod\n def get_config_dict(\n cls, pretrained_model_name_or_path: str, pretrained_config_archive_map: Optional[Dict] = None, **kwargs\n ) -> Tuple[Dict, Dict]:\n \"\"\"\n From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used\n for instantiating a Config using `from_dict`.\n\n Parameters:\n pretrained_model_name_or_path (:obj:`string`):\n The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.\n pretrained_config_archive_map: (:obj:`Dict[str, str]`, `optional`) Dict:\n A map of `shortcut names` to `url`. By default, will use the current class attribute.\n\n Returns:\n :obj:`Tuple[Dict, Dict]`: The dictionary that will be used to instantiate the configuration object.\n\n \"\"\"\n cache_dir = kwargs.pop(\"cache_dir\", None)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n\n if pretrained_config_archive_map is None:\n pretrained_config_archive_map = cls.pretrained_config_archive_map\n\n if pretrained_model_name_or_path in pretrained_config_archive_map:\n config_file = pretrained_config_archive_map[pretrained_model_name_or_path]\n elif os.path.isdir(pretrained_model_name_or_path):\n config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)\n elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):\n config_file = pretrained_model_name_or_path\n else:\n config_file = hf_bucket_url(pretrained_model_name_or_path, postfix=CONFIG_NAME)\n\n try:\n # Load from URL or cache if already cached\n resolved_config_file = cached_path(\n config_file,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n local_files_only=local_files_only,\n )\n # Load config dict\n if resolved_config_file is None:\n raise EnvironmentError\n config_dict = cls._dict_from_json_file(resolved_config_file)\n\n except EnvironmentError:\n if pretrained_model_name_or_path in pretrained_config_archive_map:\n msg = \"Couldn't reach server at '{}' to download pretrained model configuration file.\".format(\n config_file\n )\n else:\n msg = (\n \"Model name '{}' was not found in model name list. \"\n \"We assumed '{}' was a path, a model identifier, or url to a configuration file named {} or \"\n \"a directory containing such a file but couldn't find any such file at this path or url.\".format(\n pretrained_model_name_or_path, config_file, CONFIG_NAME,\n )\n )\n raise EnvironmentError(msg)\n\n except json.JSONDecodeError:\n msg = (\n \"Couldn't reach server at '{}' to download configuration file or \"\n \"configuration file is not a valid JSON file. \"\n \"Please check network or file content here: {}.\".format(config_file, resolved_config_file)\n )\n raise EnvironmentError(msg)\n\n if resolved_config_file == config_file:\n logger.info(\"loading configuration file {}\".format(config_file))\n else:\n logger.info(\"loading configuration file {} from cache at {}\".format(config_file, resolved_config_file))\n\n return config_dict, kwargs\n\n @classmethod\n def from_dict(cls, config_dict: Dict, **kwargs) -> \"PretrainedConfig\":\n \"\"\"\n Constructs a `Config` from a Python dictionary of parameters.\n\n Args:\n config_dict (:obj:`Dict[str, any]`):\n Dictionary that will be used to instantiate the configuration object. Such a dictionary can be retrieved\n from a pre-trained checkpoint by leveraging the :func:`~transformers.PretrainedConfig.get_config_dict`\n method.\n kwargs (:obj:`Dict[str, any]`):\n Additional parameters from which to initialize the configuration object.\n\n Returns:\n :class:`PretrainedConfig`: An instance of a configuration object\n \"\"\"\n return_unused_kwargs = kwargs.pop(\"return_unused_kwargs\", False)\n\n config = cls(**config_dict)\n\n if hasattr(config, \"pruned_heads\"):\n config.pruned_heads = dict((int(key), value) for key, value in config.pruned_heads.items())\n\n # Update config with kwargs if needed\n to_remove = []\n for key, value in kwargs.items():\n if hasattr(config, key):\n setattr(config, key, value)\n to_remove.append(key)\n for key in to_remove:\n kwargs.pop(key, None)\n\n logger.info(\"Model config %s\", str(config))\n if return_unused_kwargs:\n return config, kwargs\n else:\n return config\n\n @classmethod\n def from_json_file(cls, json_file: str) -> \"PretrainedConfig\":\n \"\"\"\n Constructs a `Config` from the path to a json file of parameters.\n\n Args:\n json_file (:obj:`string`):\n Path to the JSON file containing the parameters.\n\n Returns:\n :class:`PretrainedConfig`: An instance of a configuration object\n\n \"\"\"\n config_dict = cls._dict_from_json_file(json_file)\n return cls(**config_dict)\n\n @classmethod\n def _dict_from_json_file(cls, json_file: str):\n with open(json_file, \"r\", encoding=\"utf-8\") as reader:\n text = reader.read()\n return json.loads(text)\n\n def __eq__(self, other):\n return self.__dict__ == other.__dict__\n\n def __repr__(self):\n return \"{} {}\".format(self.__class__.__name__, self.to_json_string())\n\n def to_dict(self):\n \"\"\"\n Serializes this instance to a Python dictionary.\n\n Returns:\n :obj:`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,\n \"\"\"\n output = copy.deepcopy(self.__dict__)\n if hasattr(self.__class__, \"model_type\"):\n output[\"model_type\"] = self.__class__.model_type\n return output\n\n def to_json_string(self):\n \"\"\"\n Serializes this instance to a JSON string.\n\n Returns:\n :obj:`string`: String containing all the attributes that make up this configuration instance in JSON format.\n \"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n\n def to_json_file(self, json_file_path):\n \"\"\"\n Save this instance to a json file.\n\n Args:\n json_file_path (:obj:`string`):\n Path to the JSON file in which this configuration instance's parameters will be saved.\n \"\"\"\n with open(json_file_path, \"w\", encoding=\"utf-8\") as writer:\n writer.write(self.to_json_string())" }, { "identifier": "DUMMY_INPUTS", "path": "transformers/src/transformers/file_utils.py", "snippet": "DUMMY_INPUTS = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]" }, { "identifier": "TF2_WEIGHTS_NAME", "path": "transformers/src/transformers/file_utils.py", "snippet": "TF2_WEIGHTS_NAME = \"tf_model.h5\"" }, { "identifier": "TF_WEIGHTS_NAME", "path": "transformers/src/transformers/file_utils.py", "snippet": "TF_WEIGHTS_NAME = \"model.ckpt\"" }, { "identifier": "WEIGHTS_NAME", "path": "transformers/src/transformers/file_utils.py", "snippet": "WEIGHTS_NAME = \"pytorch_model.bin\"" }, { "identifier": "cached_path", "path": "transformers/src/transformers/file_utils.py", "snippet": "def cached_path(\n url_or_filename,\n cache_dir=None,\n force_download=False,\n proxies=None,\n resume_download=False,\n user_agent=None,\n extract_compressed_file=False,\n force_extract=False,\n local_files_only=False,\n) -> Optional[str]:\n \"\"\"\n Given something that might be a URL (or might be a local path),\n determine which. If it's a URL, download the file and cache it, and\n return the path to the cached file. If it's already a local path,\n make sure the file exists and then return the path.\n Args:\n cache_dir: specify a cache directory to save the file to (overwrite the default cache dir).\n force_download: if True, re-dowload the file even if it's already cached in the cache dir.\n resume_download: if True, resume the download if incompletly recieved file is found.\n user_agent: Optional string or dict that will be appended to the user-agent on remote requests.\n extract_compressed_file: if True and the path point to a zip or tar file, extract the compressed\n file in a folder along the archive.\n force_extract: if True when extract_compressed_file is True and the archive was already extracted,\n re-extract the archive and overide the folder where it was extracted.\n\n Return:\n None in case of non-recoverable file (non-existent or inaccessible url + no cache on disk).\n Local path (string) otherwise\n \"\"\"\n if cache_dir is None:\n cache_dir = TRANSFORMERS_CACHE\n if isinstance(url_or_filename, Path):\n url_or_filename = str(url_or_filename)\n if isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n\n if is_remote_url(url_or_filename):\n # URL, so get it from the cache (downloading if necessary)\n output_path = get_from_cache(\n url_or_filename,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n user_agent=user_agent,\n local_files_only=local_files_only,\n )\n elif os.path.exists(url_or_filename):\n # File, and it exists.\n output_path = url_or_filename\n elif urlparse(url_or_filename).scheme == \"\":\n # File, but it doesn't exist.\n raise EnvironmentError(\"file {} not found\".format(url_or_filename))\n else:\n # Something unknown\n raise ValueError(\"unable to parse {} as a URL or as a local path\".format(url_or_filename))\n\n if extract_compressed_file:\n if not is_zipfile(output_path) and not tarfile.is_tarfile(output_path):\n return output_path\n\n # Path where we extract compressed archives\n # We avoid '.' in dir name and add \"-extracted\" at the end: \"./model.zip\" => \"./model-zip-extracted/\"\n output_dir, output_file = os.path.split(output_path)\n output_extract_dir_name = output_file.replace(\".\", \"-\") + \"-extracted\"\n output_path_extracted = os.path.join(output_dir, output_extract_dir_name)\n\n if os.path.isdir(output_path_extracted) and os.listdir(output_path_extracted) and not force_extract:\n return output_path_extracted\n\n # Prevent parallel extractions\n lock_path = output_path + \".lock\"\n with FileLock(lock_path):\n shutil.rmtree(output_path_extracted, ignore_errors=True)\n os.makedirs(output_path_extracted)\n if is_zipfile(output_path):\n with ZipFile(output_path, \"r\") as zip_file:\n zip_file.extractall(output_path_extracted)\n zip_file.close()\n elif tarfile.is_tarfile(output_path):\n tar_file = tarfile.open(output_path)\n tar_file.extractall(output_path_extracted)\n tar_file.close()\n else:\n raise EnvironmentError(\"Archive format of {} could not be identified\".format(output_path))\n\n return output_path_extracted\n\n return output_path" }, { "identifier": "hf_bucket_url", "path": "transformers/src/transformers/file_utils.py", "snippet": "def hf_bucket_url(identifier, postfix=None, cdn=False) -> str:\n endpoint = CLOUDFRONT_DISTRIB_PREFIX if cdn else S3_BUCKET_PREFIX\n if postfix is None:\n return \"/\".join((endpoint, identifier))\n else:\n return \"/\".join((endpoint, identifier, postfix))" }, { "identifier": "is_remote_url", "path": "transformers/src/transformers/file_utils.py", "snippet": "def is_remote_url(url_or_filename):\n parsed = urlparse(url_or_filename)\n return parsed.scheme in (\"http\", \"https\", \"s3\")" } ]
import logging import os import typing import torch from torch import nn from torch.nn import CrossEntropyLoss from torch.nn import functional as F from .activations import get_activation from .configuration_utils import PretrainedConfig from .file_utils import ( DUMMY_INPUTS, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_NAME, cached_path, hf_bucket_url, is_remote_url, ) from torch.nn import Identity from transformers import load_tf2_checkpoint_in_pytorch_model
8,358
save_directory ), "Saving path should be a directory where the model and configuration can be saved" # Only save the model itself if we are using distributed training model_to_save = self.module if hasattr(self, "module") else self # Attach architecture to the config model_to_save.config.architectures = [model_to_save.__class__.__name__] # Save configuration file model_to_save.config.save_pretrained(save_directory) # If we save using the predefined names, we can load using `from_pretrained` output_model_file = os.path.join(save_directory, WEIGHTS_NAME) torch.save(model_to_save.state_dict(), output_model_file) logger.info("Model weights saved in {}".format(output_model_file)) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r"""Instantiate a pretrained pytorch model from a pre-trained model configuration. The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated) To train the model, you should first set it back in training mode with ``model.train()`` The warning ``Weights from XXX not initialized from pretrained model`` means that the weights of XXX do not come pre-trained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task. The warning ``Weights from XXX not used in YYY`` means that the layer XXX is not used by YYY, therefore those weights are discarded. Parameters: pretrained_model_name_or_path: either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``. - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. - None if you are both providing the configuration and state dictionary (resp. with keyword arguments ``config`` and ``state_dict``) model_args: (`optional`) Sequence of positional arguments: All remaning positional arguments will be passed to the underlying model's ``__init__`` method config: (`optional`) one of: - an instance of a class derived from :class:`~transformers.PretrainedConfig`, or - a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained()` Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when: - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory. - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory. state_dict: (`optional`) dict: an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file. This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option. cache_dir: (`optional`) string: Path to a directory in which a downloaded pre-trained model configuration should be cached if the standard cache should not be used. force_download: (`optional`) boolean, default False: Force to (re-)download the model weights and configuration files and override the cached versions if they exists. resume_download: (`optional`) boolean, default False: Do not delete incompletely recieved file. Attempt to resume the download if such a file exists. proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. output_loading_info: (`optional`) boolean: Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages. kwargs: (`optional`) Remaining dictionary of keyword arguments: Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded: - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done) - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function. Examples:: # For example purposes. Not runnable. model = BertModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache. model = BertModel.from_pretrained('./test/saved_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = BertModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading assert model.config.output_attention == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json') model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ config = kwargs.pop("config", None) state_dict = kwargs.pop("state_dict", None) cache_dir = kwargs.pop("cache_dir", None) from_tf = kwargs.pop("from_tf", False) force_download = kwargs.pop("force_download", False) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) output_loading_info = kwargs.pop("output_loading_info", False) local_files_only = kwargs.pop("local_files_only", False) # Load config if we don't provide a configuration if not isinstance(config, PretrainedConfig): config_path = config if config is not None else pretrained_model_name_or_path config, model_kwargs = cls.config_class.from_pretrained( config_path, *model_args, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, **kwargs, ) else: model_kwargs = kwargs # Load model if pretrained_model_name_or_path is not None: if pretrained_model_name_or_path in cls.pretrained_model_archive_map: archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path] elif os.path.isdir(pretrained_model_name_or_path):
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch BERT model.""" logger = logging.getLogger(__name__) try: except ImportError: # Older PyTorch compatibility class Identity(nn.Module): r"""A placeholder identity operator that is argument-insensitive. """ def __init__(self, *args, **kwargs): super().__init__() def forward(self, input): return input class ModuleUtilsMixin: """ A few utilities for torch.nn.Modules, to be used as a mixin. """ def num_parameters(self, only_trainable: bool = False) -> int: """ Get number of (optionally, trainable) parameters in the module. """ params = filter(lambda x: x.requires_grad, self.parameters()) if only_trainable else self.parameters() return sum(p.numel() for p in params) class PreTrainedModel(nn.Module, ModuleUtilsMixin): r""" Base class for all models. :class:`~transformers.PreTrainedModel` takes care of storing the configuration of the models and handles methods for loading/downloading/saving models as well as a few methods common to all models to (i) resize the input embeddings and (ii) prune heads in the self-attention heads. Class attributes (overridden by derived classes): - ``config_class``: a class derived from :class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture. - ``pretrained_model_archive_map``: a python ``dict`` of with `short-cut-names` (string) as keys and `url` (string) of associated pretrained weights as values. - ``load_tf_weights``: a python ``method`` for loading a TensorFlow checkpoint in a PyTorch model, taking as arguments: - ``model``: an instance of the relevant subclass of :class:`~transformers.PreTrainedModel`, - ``config``: an instance of the relevant subclass of :class:`~transformers.PretrainedConfig`, - ``path``: a path (string) to the TensorFlow checkpoint. - ``base_model_prefix``: a string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model. """ config_class = None pretrained_model_archive_map = {} base_model_prefix = "" @property def dummy_inputs(self): """ Dummy inputs to do a forward pass in the network. Returns: torch.Tensor with dummy inputs """ return {"input_ids": torch.tensor(DUMMY_INPUTS)} def __init__(self, config, *inputs, **kwargs): super().__init__() if not isinstance(config, PretrainedConfig): raise ValueError( "Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. " "To create a model from a pretrained model use " "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format( self.__class__.__name__, self.__class__.__name__ ) ) # Save config in model self.config = config @property def base_model(self): return getattr(self, self.base_model_prefix, self) def get_input_embeddings(self): """ Returns the model's input embeddings. Returns: :obj:`nn.Module`: A torch module mapping vocabulary to hidden states. """ base_model = getattr(self, self.base_model_prefix, self) if base_model is not self: return base_model.get_input_embeddings() else: raise NotImplementedError def set_input_embeddings(self, value): """ Set model's input embeddings Args: value (:obj:`nn.Module`): A module mapping vocabulary to hidden states. """ base_model = getattr(self, self.base_model_prefix, self) if base_model is not self: base_model.set_input_embeddings(value) else: raise NotImplementedError def get_output_embeddings(self): """ Returns the model's output embeddings. Returns: :obj:`nn.Module`: A torch module mapping hidden states to vocabulary. """ return None # Overwrite for models with output embeddings def tie_weights(self): """ Tie the weights between the input embeddings and the output embeddings. If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning the weights instead. """ output_embeddings = self.get_output_embeddings() if output_embeddings is not None: if isinstance(output_embeddings, list): for x in output_embeddings: self._tie_or_clone_weights(x, self.get_input_embeddings()) else: self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings()) def _tie_or_clone_weights(self, output_embeddings, input_embeddings): """ Tie or clone module weights depending of weither we are using TorchScript or not """ if self.config.torchscript: output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone()) else: output_embeddings.weight = input_embeddings.weight if hasattr(output_embeddings, "bias") and output_embeddings.bias is not None: output_embeddings.bias.data = torch.nn.functional.pad( output_embeddings.bias.data, (0, output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0]), "constant", 0, ) if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"): output_embeddings.out_features = input_embeddings.num_embeddings def resize_token_embeddings(self, new_num_tokens=None): """ Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size. Take care of tying weights embeddings afterwards if the model class has a `tie_weights()` method. Arguments: new_num_tokens: (`optional`) int: New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or None: does nothing and just returns a pointer to the input tokens ``torch.nn.Embeddings`` Module of the model. Return: ``torch.nn.Embeddings`` Pointer to the input tokens Embeddings Module of the model """ base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed model_embeds = base_model._resize_token_embeddings(new_num_tokens) if new_num_tokens is None: return model_embeds # Update base model and current model config self.config.vocab_size = new_num_tokens base_model.vocab_size = new_num_tokens # Tie weights again if needed self.tie_weights() return model_embeds def _resize_token_embeddings(self, new_num_tokens): old_embeddings = self.get_input_embeddings() new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) self.set_input_embeddings(new_embeddings) return self.get_input_embeddings() def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None): """ Build a resized Embedding Module from a provided token Embedding Module. Increasing the size will add newly initialized vectors at the end Reducing the size will remove vectors from the end Args: new_num_tokens: (`optional`) int New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end Reducing the size will remove vectors from the end If not provided or None: return the provided token Embedding Module. Return: ``torch.nn.Embeddings`` Pointer to the resized Embedding Module or the old Embedding Module if new_num_tokens is None """ if new_num_tokens is None: return old_embeddings old_num_tokens, old_embedding_dim = old_embeddings.weight.size() if old_num_tokens == new_num_tokens: return old_embeddings # Build new embeddings new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim) new_embeddings.to(old_embeddings.weight.device) # initialize all new embeddings (in particular added tokens) self._init_weights(new_embeddings) # Copy word embeddings from the previous weights num_tokens_to_copy = min(old_num_tokens, new_num_tokens) new_embeddings.weight.data[:num_tokens_to_copy, :] = old_embeddings.weight.data[:num_tokens_to_copy, :] return new_embeddings def init_weights(self): """ Initialize and prunes weights if needed. """ # Initialize weights self.apply(self._init_weights) # Prune heads if needed if self.config.pruned_heads: self.prune_heads(self.config.pruned_heads) # Tie weights if needed self.tie_weights() def prune_heads(self, heads_to_prune): """ Prunes heads of the base model. Arguments: heads_to_prune: dict with keys being selected layer indices (`int`) and associated values being the list of heads to prune in said layer (list of `int`). E.g. {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2. """ # save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads for layer, heads in heads_to_prune.items(): union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads) self.config.pruned_heads[layer] = list(union_heads) # Unfortunately we have to store it as list for JSON self.base_model._prune_heads(heads_to_prune) def save_pretrained(self, save_directory): """ Save a model and its configuration file to a directory, so that it can be re-loaded using the `:func:`~transformers.PreTrainedModel.from_pretrained`` class method. """ assert os.path.isdir( save_directory ), "Saving path should be a directory where the model and configuration can be saved" # Only save the model itself if we are using distributed training model_to_save = self.module if hasattr(self, "module") else self # Attach architecture to the config model_to_save.config.architectures = [model_to_save.__class__.__name__] # Save configuration file model_to_save.config.save_pretrained(save_directory) # If we save using the predefined names, we can load using `from_pretrained` output_model_file = os.path.join(save_directory, WEIGHTS_NAME) torch.save(model_to_save.state_dict(), output_model_file) logger.info("Model weights saved in {}".format(output_model_file)) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r"""Instantiate a pretrained pytorch model from a pre-trained model configuration. The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated) To train the model, you should first set it back in training mode with ``model.train()`` The warning ``Weights from XXX not initialized from pretrained model`` means that the weights of XXX do not come pre-trained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task. The warning ``Weights from XXX not used in YYY`` means that the layer XXX is not used by YYY, therefore those weights are discarded. Parameters: pretrained_model_name_or_path: either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``. - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. - None if you are both providing the configuration and state dictionary (resp. with keyword arguments ``config`` and ``state_dict``) model_args: (`optional`) Sequence of positional arguments: All remaning positional arguments will be passed to the underlying model's ``__init__`` method config: (`optional`) one of: - an instance of a class derived from :class:`~transformers.PretrainedConfig`, or - a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained()` Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when: - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory. - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory. state_dict: (`optional`) dict: an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file. This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option. cache_dir: (`optional`) string: Path to a directory in which a downloaded pre-trained model configuration should be cached if the standard cache should not be used. force_download: (`optional`) boolean, default False: Force to (re-)download the model weights and configuration files and override the cached versions if they exists. resume_download: (`optional`) boolean, default False: Do not delete incompletely recieved file. Attempt to resume the download if such a file exists. proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. output_loading_info: (`optional`) boolean: Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages. kwargs: (`optional`) Remaining dictionary of keyword arguments: Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded: - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done) - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function. Examples:: # For example purposes. Not runnable. model = BertModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache. model = BertModel.from_pretrained('./test/saved_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = BertModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading assert model.config.output_attention == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json') model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ config = kwargs.pop("config", None) state_dict = kwargs.pop("state_dict", None) cache_dir = kwargs.pop("cache_dir", None) from_tf = kwargs.pop("from_tf", False) force_download = kwargs.pop("force_download", False) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) output_loading_info = kwargs.pop("output_loading_info", False) local_files_only = kwargs.pop("local_files_only", False) # Load config if we don't provide a configuration if not isinstance(config, PretrainedConfig): config_path = config if config is not None else pretrained_model_name_or_path config, model_kwargs = cls.config_class.from_pretrained( config_path, *model_args, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, **kwargs, ) else: model_kwargs = kwargs # Load model if pretrained_model_name_or_path is not None: if pretrained_model_name_or_path in cls.pretrained_model_archive_map: archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path] elif os.path.isdir(pretrained_model_name_or_path):
if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")):
4
2023-10-15 02:31:09+00:00
12k
generative-skill-chaining/gsc-code
generative_skill_chaining/envs/pybullet/table/predicates.py
[ { "identifier": "primitive_actions", "path": "generative_skill_chaining/envs/pybullet/table/primitive_actions.py", "snippet": "class PrimitiveAction:\nclass PickAction(PrimitiveAction):\nclass PlaceAction(PrimitiveAction):\nclass PullAction(PrimitiveAction):\nclass PushAction(PrimitiveAction):\n RANGES: Dict[str, Tuple[float, float]]\n RANGES = {\n \"x\": (-0.2, 0.2),\n \"y\": (-0.1, 0.1),\n \"z\": (-0.05, 0.05),\n \"theta\": (-0.25 * np.pi, 0.75 * np.pi),\n }\n RANGES = {\n \"x\": (-1.0, 1.0),\n \"y\": (-1.0, 1.0),\n \"z\": (0.0, 0.1),\n \"theta\": (-0.25 * np.pi, 0.75 * np.pi),\n }\n RANGES = {\n \"r_reach\": (-0.2, 0.0),\n \"r_pull\": (-0.4, -0.1),\n \"y\": (-0.05, 0.05),\n \"theta\": (-0.5 * np.pi, 0.5 * np.pi),\n }\n RANGES = {\n \"r_reach\": (-0.4, -0.2),\n \"r_push\": (0.1, 0.4),\n \"y\": (-0.05, 0.05),\n \"theta\": (-0.5 * np.pi, 0.5 * np.pi),\n }\n def __init__(self, vector: Optional[np.ndarray] = None):\n def range(cls) -> np.ndarray:\n def random(cls):\n def __init__(\n self,\n vector: Optional[np.ndarray] = None,\n pos: Optional[np.ndarray] = None,\n theta: Optional[float] = None,\n ):\n def pos(self) -> np.ndarray:\n def pos(self, pos: np.ndarray) -> None:\n def theta(self) -> np.ndarray:\n def theta(self, theta: np.ndarray) -> None:\n def __repr__(self) -> str:\n def __init__(\n self,\n vector: Optional[np.ndarray] = None,\n pos: Optional[np.ndarray] = None,\n theta: Optional[float] = None,\n ):\n def pos(self) -> np.ndarray:\n def pos(self, pos: np.ndarray) -> None:\n def theta(self) -> np.ndarray:\n def theta(self, theta: np.ndarray) -> None:\n def __repr__(self) -> str:\n def __init__(\n self,\n vector: Optional[np.ndarray] = None,\n r_reach: Optional[float] = None,\n r_pull: Optional[float] = None,\n y: Optional[float] = None,\n theta: Optional[float] = None,\n ):\n def r_reach(self) -> np.ndarray:\n def r_reach(self, r_reach: np.ndarray) -> None:\n def r_pull(self) -> np.ndarray:\n def r_pull(self, r_pull: np.ndarray) -> None:\n def y(self) -> np.ndarray:\n def y(self, y: np.ndarray) -> None:\n def theta(self) -> np.ndarray:\n def theta(self, theta: np.ndarray) -> None:\n def __repr__(self) -> str:\n def __init__(\n self,\n vector: Optional[np.ndarray] = None,\n r_reach: Optional[float] = None,\n r_push: Optional[float] = None,\n y: Optional[float] = None,\n theta: Optional[float] = None,\n ):\n def r_reach(self) -> np.ndarray:\n def r_reach(self, r_reach: np.ndarray) -> None:\n def r_push(self) -> np.ndarray:\n def r_push(self, r_push: np.ndarray) -> None:\n def y(self) -> np.ndarray:\n def y(self, y: np.ndarray) -> None:\n def theta(self) -> np.ndarray:\n def theta(self, theta: np.ndarray) -> None:\n def __repr__(self) -> str:" }, { "identifier": "utils", "path": "generative_skill_chaining/envs/pybullet/table/utils.py", "snippet": "TABLE_CONSTRAINTS = {\n \"table_z_max\": 0.00,\n \"table_x_min\": 0.28,\n \"table_y_min\": -0.45,\n \"table_y_max\": 0.45,\n \"workspace_x_min\": 0.40,\n \"operational_x_min\": 0.50,\n \"operational_x_max\": 0.60,\n \"obstruction_x_min\": 0.575,\n \"workspace_radius\": 0.7,\n}\nEPSILONS = {\"aabb\": 0.01, \"align\": 0.99, \"twist\": 0.001, \"tipping\": 0.1}\nTWIST_HISTORY: Dict[str, Dict[Object, np.ndarray]] = collections.defaultdict(dict)\ndef compute_margins(obj: Object) -> np.ndarray:\ndef compute_object_pose(obj: Object, theta: float) -> math.Pose:\ndef is_above(obj_a: Object, obj_b: Object) -> bool:\ndef is_upright(obj: Object) -> bool:\ndef is_within_distance(\n obj_a: Object, obj_b: Object, distance: float, physics_id: int\n) -> bool:\ndef is_moving(obj: Object, use_history: Optional[str] = None) -> bool:\ndef is_below_table(obj: Object) -> bool:\ndef is_touching(\n body_a: body.Body,\n body_b: body.Body,\n link_id_a: Optional[int] = None,\n link_id_b: Optional[int] = None,\n) -> bool:\ndef is_intersecting(obj_a: Object, obj_b: Object) -> bool:\ndef is_under(obj_a: Object, obj_b: Object) -> bool:\ndef is_inworkspace(\n obj: Optional[Object] = None,\n obj_pos: Optional[np.ndarray] = None,\n distance: Optional[float] = None,\n) -> bool:\ndef is_beyondworkspace(\n obj: Optional[Object] = None,\n obj_pos: Optional[np.ndarray] = None,\n distance: Optional[float] = None,\n) -> bool:\ndef load_config(config: Union[str, Any]) -> Any:" }, { "identifier": "Box", "path": "generative_skill_chaining/envs/pybullet/table/objects.py", "snippet": "class Box(Object):\n def __init__(\n self,\n physics_id: int,\n name: str,\n size: Union[List[float], np.ndarray],\n color: Union[List[float], np.ndarray],\n mass: float = 0.1,\n ):\n box = shapes.Box(size=np.array(size), mass=mass, color=np.array(color))\n body_id = shapes.create_body(box, physics_id=physics_id)\n self._shape = box\n\n super().__init__(\n physics_id=physics_id, body_id=body_id, name=name, is_static=mass == 0.0\n )\n\n self._state.box_size = box.size\n self._bbox = np.array([-0.5 * self.size, 0.5 * self.size])\n\n @property\n def size(self) -> np.ndarray:\n return self._state.box_size\n\n @property\n def bbox(self) -> np.ndarray:\n return self._bbox\n\n @property\n def shapes(self) -> Sequence[shapes.Shape]:\n return [self._shape]" }, { "identifier": "Hook", "path": "generative_skill_chaining/envs/pybullet/table/objects.py", "snippet": "class Hook(Object):\n @staticmethod\n def compute_link_positions(\n head_length: float, handle_length: float, handle_y: float, radius: float\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n dy = (\n 0.5\n * np.sign(handle_y)\n * max(0, (abs(handle_y) - 1.0) * head_length / 2 + radius)\n )\n pos_handle = np.array([-radius / 2, handle_y * head_length / 2 - dy, 0.0])\n pos_head = np.array([(handle_length - radius) / 2, -dy, 0.0])\n pos_joint = np.array(\n [(handle_length - radius) / 2, handle_y * head_length / 2 - dy, 0.0]\n )\n\n return pos_handle, pos_head, pos_joint\n\n def __init__(\n self,\n physics_id: int,\n name: str,\n head_length: float,\n handle_length: float,\n handle_y: float,\n color: Union[List[float], np.ndarray],\n radius: float = 0.02,\n mass: float = 0.1,\n ):\n if not isinstance(color, np.ndarray):\n color = np.array(color)\n\n pos_handle, pos_head, pos_joint = Hook.compute_link_positions(\n head_length=head_length,\n handle_length=handle_length,\n handle_y=handle_y,\n radius=radius,\n )\n handle = shapes.Cylinder(\n radius=radius,\n length=handle_length,\n mass=(handle_length / (head_length + handle_length + radius)) * mass,\n color=color,\n pose=math.Pose(\n pos=pos_handle,\n quat=eigen.Quaterniond(\n eigen.AngleAxisd(angle=np.pi / 2, axis=np.array([0.0, 1.0, 0.0]))\n ).coeffs,\n ),\n )\n head = shapes.Cylinder(\n radius=radius,\n length=head_length,\n mass=(head_length / (head_length + handle_length + radius)) * mass,\n color=color,\n pose=math.Pose(\n pos=pos_head,\n quat=eigen.Quaterniond(\n eigen.AngleAxisd(angle=np.pi / 2, axis=np.array([1.0, 0.0, 0.0]))\n ).coeffs,\n ),\n )\n joint = shapes.Sphere(\n radius=radius,\n mass=(radius / (head_length + handle_length + radius)) * mass,\n color=color,\n pose=math.Pose(pos=pos_joint),\n )\n self._shapes = [joint, handle, head]\n body_id = shapes.create_body(\n self.shapes, link_parents=[0, 0], physics_id=physics_id\n )\n\n super().__init__(\n physics_id=physics_id, body_id=body_id, name=name, is_static=mass == 0.0\n )\n\n self._state.head_length = head_length\n self._state.handle_length = handle_length\n self._state.handle_y = handle_y\n self._radius = radius\n\n self._size = np.array(\n [handle_length + radius, head_length + 2 * abs(pos_head[1]), 2 * radius]\n )\n self._bbox = np.array([-0.5 * self.size, 0.5 * self.size])\n\n @property\n def head_length(self) -> float:\n return self._state.head_length # type: ignore\n\n @property\n def handle_length(self) -> float:\n return self._state.handle_length # type: ignore\n\n @property\n def handle_y(self) -> float:\n return self._state.handle_y # type: ignore\n\n @property\n def radius(self) -> float:\n return self._radius\n\n @property\n def size(self) -> np.ndarray:\n return self._size\n\n @property\n def bbox(self) -> np.ndarray:\n return self._bbox\n\n def convex_hulls(\n self, world_frame: bool = True, project_2d: bool = False\n ) -> List[np.ndarray]:\n \"\"\"Computes the convex hulls of the handle and head links.\"\"\"\n handle_pose = self.shapes[1].pose\n head_pose = self.shapes[2].pose\n assert handle_pose is not None and head_pose is not None\n\n positions = np.array(\n [\n [0.0, handle_pose.pos[1], 0.0],\n [head_pose.pos[0], 0.0, 0.0],\n ]\n )\n sizes = np.array(\n [\n [self.size[0], 2 * self.radius, 2 * self.radius],\n [2 * self.radius, self.size[1], 2 * self.radius],\n ]\n )\n bboxes = np.array([positions - 0.5 * sizes, positions + 0.5 * sizes]).swapaxes(\n 0, 1\n )\n\n pose = self.pose() if world_frame else None\n vertices = [compute_bbox_vertices(bbox, pose, project_2d) for bbox in bboxes]\n\n return vertices\n\n @property\n def shapes(self) -> Sequence[shapes.Shape]:\n return self._shapes\n\n # def aabb(self) -> np.ndarray:\n # raise NotImplementedError" }, { "identifier": "Null", "path": "generative_skill_chaining/envs/pybullet/table/objects.py", "snippet": "class Null(Object):\n def __init__(self, physics_id: int, name: str):\n sphere = shapes.Sphere(radius=0.001)\n body_id = shapes.create_body(sphere, physics_id=physics_id)\n\n super().__init__(\n physics_id=physics_id, body_id=body_id, name=name, is_static=True\n )\n\n def state(self) -> object_state.ObjectState:\n # Null object state is a zero vector.\n return self._state\n\n def enable_collisions(self) -> None:\n pass\n\n def unfreeze(self) -> bool:\n return False" }, { "identifier": "Object", "path": "generative_skill_chaining/envs/pybullet/table/objects.py", "snippet": "class Object(body.Body):\n name: str\n is_static: bool = False\n\n def __init__(\n self, physics_id: int, body_id: int, name: str, is_static: bool = False\n ):\n super().__init__(physics_id, body_id)\n\n self.name = name\n self.is_static = is_static\n\n T_pybullet_to_obj = super().pose().to_eigen()\n self._modified_axes = not T_pybullet_to_obj.is_approx(\n eigen.Isometry3d.identity()\n )\n if self._modified_axes:\n self._T_pybullet_to_obj = T_pybullet_to_obj\n self._T_obj_to_pybullet = T_pybullet_to_obj.inverse()\n\n self._state = object_state.ObjectState()\n\n def pose(self) -> math.Pose:\n if not self._modified_axes:\n return super().pose()\n\n return math.Pose.from_eigen(super().pose().to_eigen() * self._T_obj_to_pybullet)\n\n def set_pose(self, pose: math.Pose) -> None:\n if not self._modified_axes:\n return super().set_pose(pose)\n\n return super().set_pose(\n math.Pose.from_eigen(pose.to_eigen() * self._T_pybullet_to_obj)\n )\n\n def disable_collisions(self) -> None:\n for link_id in range(self.dof):\n p.setCollisionFilterGroupMask(\n self.body_id, link_id, 0, 0, physicsClientId=self.physics_id\n )\n\n def enable_collisions(self) -> None:\n for link_id in range(self.dof):\n p.setCollisionFilterGroupMask(\n self.body_id, link_id, 1, 0xFF, physicsClientId=self.physics_id\n )\n\n @property\n def inertia(self) -> dyn.SpatialInertiad:\n try:\n return self._obj_inertia # type: ignore\n except AttributeError:\n pass\n\n self._obj_inertia = super().inertia\n if self._modified_axes:\n self._obj_inertia = self._obj_inertia * self._T_pybullet_to_obj\n\n T_world_to_obj = self.pose().to_eigen().inverse()\n for link_id in range(self.dof):\n link = body.Link(self.physics_id, self.body_id, link_id)\n T_link_to_obj = T_world_to_obj * link.pose().to_eigen()\n self._obj_inertia += link.inertia * T_link_to_obj\n\n return self._obj_inertia\n\n def state(self) -> object_state.ObjectState:\n pose = self.pose()\n aa = eigen.AngleAxisd(eigen.Quaterniond(pose.quat))\n self._state.pos = pose.pos\n self._state.aa = aa.angle * aa.axis\n\n return self._state\n\n def set_state(self, state: object_state.ObjectState) -> None:\n self.set_pose(state.pose())\n\n def reset(self, action_skeleton: List) -> None:\n pass\n\n @classmethod\n def create(\n cls,\n physics_id: int,\n object_type: Optional[str],\n object_kwargs: Dict[str, Any] = {},\n object_groups: Dict[str, \"ObjectGroup\"] = {},\n **kwargs,\n ) -> \"Object\":\n object_class = Null if object_type is None else globals()[object_type]\n if issubclass(object_class, Variant):\n kwargs[\"object_groups\"] = object_groups\n object_kwargs = object_kwargs.copy()\n object_kwargs.update(kwargs)\n return object_class(physics_id=physics_id, **object_kwargs)\n\n def isinstance(self, class_or_tuple: Union[type, Tuple[type, ...]]) -> bool:\n return isinstance(self, class_or_tuple)\n\n def type(self) -> Type[\"Object\"]:\n return type(self)\n\n @property\n def size(self) -> np.ndarray:\n raise NotImplementedError\n\n @property\n def bbox(self) -> np.ndarray:\n \"\"\"Returns the bounding box in the object frame.\n\n If the origin of the object is at its geometric center, this will be\n equivalent to `(-0.5 * self.size, 0.5 * self.size)`.\n\n Returns:\n An array of shape [2, 3] (min/max, x/y/z).\n \"\"\"\n raise NotImplementedError\n\n def convex_hulls(\n self, world_frame: bool = True, project_2d: bool = False\n ) -> List[np.ndarray]:\n \"\"\"Computes the object's convex hull.\n\n These hulls will be used for rough collision checking. By default,\n the vertices will be the 6 corners of the object's bounding box\n (`Object.bbox`).\n\n Args:\n world_frame: Whether to transform the vertices in world frame or\n leave them in object frame.\n project_2d: Whether to return the 2d convex hull.\n\n Returns:\n List of arrays of shape [_, 3] or [_, 2], where each array is a\n convex hull.\n \"\"\"\n pose = self.pose() if world_frame else None\n vertices = compute_bbox_vertices(self.bbox, pose, project_2d)\n\n return [vertices]\n\n def aabb(self) -> np.ndarray:\n \"\"\"Computes the axis-aligned bounding box from the object pose and size.\n\n This should be more accurate than `super().aabb()`, which gets the aabb\n from Pybullet. Pybullet returns an *enlarged* aabb for the object *base*\n link, while this returns the exact aabb for the entire object.\n\n Returns:\n An array of shape [2, 3] (min/max, x/y/z).\n \"\"\"\n vertices = np.concatenate(self.convex_hulls(world_frame=True), axis=0)\n xyz_min = vertices.min(axis=0)\n xyz_max = vertices.max(axis=0)\n\n return np.array([xyz_min, xyz_max])\n\n @property\n def shapes(self) -> Sequence[shapes.Shape]:\n return []\n\n def __str__(self) -> str:\n return self.name\n\n def __hash__(self) -> int:\n return hash(str(self))\n\n def __eq__(self, other) -> bool:\n return str(self) == str(other)" }, { "identifier": "Rack", "path": "generative_skill_chaining/envs/pybullet/table/objects.py", "snippet": "class Rack(Object):\n TOP_THICKNESS = 0.01\n LEG_THICKNESS = 0.01\n\n def __init__(\n self,\n physics_id: int,\n name: str,\n size: Union[List[float], np.ndarray],\n color: Union[List[float], np.ndarray],\n mass: float = 1.0,\n ):\n mass /= 7 # Divide mass among all 7 parts.\n top = shapes.Box(\n size=np.array([*size[:2], Rack.TOP_THICKNESS]),\n mass=mass,\n color=np.array(color),\n pose=math.Pose(\n pos=np.array([0.0, 0.0, -Rack.TOP_THICKNESS / 2]),\n quat=eigen.Quaterniond.identity().coeffs,\n ),\n )\n xy_legs = np.array([(x, y) for x in (-1, 1) for y in (-1, 1)]) * (\n (np.array(size[:2])[None, :] - Rack.LEG_THICKNESS) / 2\n )\n legs = [\n shapes.Box(\n size=np.array(\n [\n Rack.LEG_THICKNESS,\n Rack.LEG_THICKNESS,\n size[2] - Rack.TOP_THICKNESS - Rack.LEG_THICKNESS,\n ]\n ),\n mass=mass,\n color=np.array([0.0, 0.0, 0.0, 1.0]),\n pose=math.Pose(\n pos=np.array(\n [\n *xy_leg,\n -(size[2] + Rack.TOP_THICKNESS - Rack.LEG_THICKNESS) / 2,\n ]\n ),\n quat=eigen.Quaterniond.identity().coeffs,\n ),\n )\n for xy_leg in xy_legs\n ]\n stabilizers = [\n shapes.Box(\n size=np.array([size[0], Rack.LEG_THICKNESS, Rack.LEG_THICKNESS]),\n mass=mass,\n color=np.array([0.0, 0.0, 0.0, 1.0]),\n pose=math.Pose(\n pos=np.array([0.0, y_leg, -size[2] + Rack.LEG_THICKNESS / 2]),\n quat=eigen.Quaterniond.identity().coeffs,\n ),\n )\n for y_leg in xy_legs[:2, 1]\n ]\n self._shapes = [top, *legs, *stabilizers]\n body_id = shapes.create_body(\n self.shapes,\n link_parents=[0] * (len(legs) + len(stabilizers)),\n physics_id=physics_id,\n )\n\n super().__init__(\n physics_id=physics_id, body_id=body_id, name=name, is_static=mass == 0.0\n )\n\n self._state.box_size = np.array(size)\n self._bbox = np.array([-0.5 * self.size, 0.5 * self.size])\n self._bbox[0, 2] = -size[2]\n self._bbox[1, 2] = 0\n\n @property\n def size(self) -> np.ndarray:\n return self._state.box_size\n\n @property\n def bbox(self) -> np.ndarray:\n return self._bbox\n\n @property\n def shapes(self) -> Sequence[shapes.Shape]:\n return self._shapes" }, { "identifier": "math", "path": "generative_skill_chaining/envs/pybullet/sim/math.py", "snippet": "PYBULLET_STEPS_PER_SEC = 240\nPYBULLET_TIMESTEP = 1 / PYBULLET_STEPS_PER_SEC\nclass Pose:\n def from_eigen(pose: eigen.Isometry3d) -> \"Pose\":\n def to_eigen(self) -> eigen.Isometry3d:\ndef comb(n: int, r: int) -> int:" }, { "identifier": "Robot", "path": "generative_skill_chaining/envs/pybullet/sim/robot.py", "snippet": "class Robot(body.Body):\n \"\"\"User-facing robot interface.\"\"\"\n\n def __init__(\n self,\n physics_id: int,\n step_simulation_fn: Callable[[], None],\n urdf: str,\n arm_class: Union[str, Type[arm.Arm]],\n arm_kwargs: Dict[str, Any],\n gripper_class: Union[str, Type[gripper.Gripper]],\n gripper_kwargs: Dict[str, Any],\n ):\n \"\"\"Loads the robot from a urdf file.\n\n Args:\n physics_id: Pybullet physics client id.\n step_simulation_fn: Function to step simulation.\n urdf: Path to urdf.\n arm_class: In the generative_skill_chaining.envs.pybullet namespace.\n arm_kwargs: Arm kwargs from yaml config.\n gripper_class: In the generative_skill_chaining.envs.pybullet namespace.\n gripper_kwargs: Gripper kwargs from yaml config.\n \"\"\"\n body_id = p.loadURDF(\n fileName=urdf,\n useFixedBase=True,\n flags=p.URDF_USE_INERTIA_FROM_FILE\n | p.URDF_MAINTAIN_LINK_ORDER, # | p.URDF_MERGE_FIXED_LINKS\n physicsClientId=physics_id,\n )\n super().__init__(physics_id, body_id)\n\n if isinstance(arm_class, str):\n arm_class = configs.get_class(arm_class, pybullet)\n if isinstance(gripper_class, str):\n gripper_class = configs.get_class(gripper_class, pybullet)\n\n self._arm = arm_class(self.physics_id, self.body_id, **arm_kwargs)\n T_world_to_ee = dyn.cartesian_pose(self.arm.ab).inverse()\n self._gripper = gripper_class(\n self.physics_id, self.body_id, T_world_to_ee, **gripper_kwargs\n )\n\n self.step_simulation = step_simulation_fn\n\n @property\n def arm(self) -> arm.Arm:\n \"\"\"Controllable arm.\"\"\"\n return self._arm\n\n @property\n def gripper(self) -> gripper.Gripper:\n \"\"\"Controllable gripper.\"\"\"\n return self._gripper\n\n @property\n def home_pose(self) -> math.Pose:\n return self.arm.home_pose\n\n def reset(self) -> bool:\n \"\"\"Resets the robot by setting the arm to its home configuration and the gripper to the open position.\n\n This method disables torque control and bypasses simulation.\n \"\"\"\n self.gripper.reset()\n self.clear_load()\n status = self.arm.reset()\n if isinstance(self.arm, real.arm.Arm):\n status = self.goto_configuration(self.arm.q_home)\n return status\n\n def clear_load(self) -> None:\n \"\"\"Resets the end-effector load to the gripper inertia.\"\"\"\n if self.gripper.inertia is not None:\n self.arm.ab.replace_load(self.gripper.inertia)\n else:\n self.arm.ab.clear_load()\n\n def set_load(self, inertia: dyn.SpatialInertiad) -> None:\n \"\"\"Sets the end-effector load to the sum of the given inertia and gripper inertia.\"\"\"\n if self.gripper.inertia is not None:\n inertia = inertia + self.gripper.inertia\n self.arm.ab.replace_load(inertia)\n\n def get_state(self) -> Dict[str, Any]:\n return {\n \"arm\": self.arm.get_state(),\n \"gripper\": self.gripper.get_state(),\n \"load\": copy.deepcopy(self.arm.ab.inertia_load),\n }\n\n def set_state(self, state: Dict[str, Any]) -> None:\n self.arm.set_state(state[\"arm\"])\n self.gripper.set_state(state[\"gripper\"])\n idx_link, load_inertia = next(iter(state[\"load\"].items()))\n self.arm.ab.replace_load(load_inertia, idx_link)\n\n def goto_home(self) -> bool:\n \"\"\"Uses opspace control to go to the home position.\"\"\"\n return self.goto_pose(\n self.home_pose.pos,\n self.home_pose.quat,\n pos_gains=(64, 16),\n ori_gains=(64, 16),\n )\n\n def _is_colliding(\n self, body_id_a: int, body_id_b: int, link_id_a: Optional[int] = None\n ) -> bool:\n kwargs = {}\n if link_id_a is not None:\n kwargs[\"linkIndexA\"] = link_id_a\n contacts = p.getContactPoints(\n bodyA=body_id_a, bodyB=body_id_b, physicsClientId=self.physics_id, **kwargs\n )\n\n if not contacts:\n return False\n\n force = contacts[0][9]\n return force > 0.0\n\n def goto_pose(\n self,\n pos: Optional[np.ndarray] = None,\n quat: Optional[Union[eigen.Quaterniond, np.ndarray]] = None,\n pos_gains: Optional[Union[Tuple[float, float], np.ndarray]] = None,\n ori_gains: Optional[Union[Tuple[float, float], np.ndarray]] = None,\n timeout: Optional[float] = None,\n check_collisions: Sequence[int] = [],\n check_collision_freq: int = 10,\n ) -> bool:\n \"\"\"Uses opspace control to go to the desired pose.\n\n This method blocks until the command finishes or times out. A\n ControlException will be raised if the grasp controller is aborted.\n\n Args:\n pos: Optional position. Maintains current position if None.\n quat: Optional quaternion. Maintains current orientation if None.\n pos_gains: (kp, kv) gains or [3 x 2] array of xyz gains.\n ori_gains: (kp, kv) gains or [3 x 2] array of xyz gains.\n timeout: Uses the timeout specified in the yaml arm config if None.\n check_collisions: Raise an exception if the gripper or grasped\n object collides with any of the body_ids in this list.\n check_collision_freq: Iteration interval with which to check\n collisions.\n Returns:\n True if the grasp controller converges to the desired position or\n zero velocity, false if the command times out.\n \"\"\"\n if check_collisions:\n body_ids_a = [self.body_id] * len(self.gripper.finger_links)\n link_ids_a: List[Optional[int]] = list(self.gripper.finger_links)\n grasp_body_id = self.gripper._gripper_state.grasp_body_id\n if grasp_body_id is not None:\n body_ids_a.append(grasp_body_id)\n link_ids_a.append(None)\n\n # Set the pose goal.\n self.arm.set_pose_goal(pos, quat, pos_gains, ori_gains, timeout)\n\n # Simulate until the pose goal is reached.\n status = self.arm.update_torques()\n self.gripper.update_torques()\n iter = 0\n while status == articulated_body.ControlStatus.IN_PROGRESS:\n self.step_simulation()\n status = self.arm.update_torques()\n self.gripper.update_torques()\n iter += 1\n\n if isinstance(self.arm, real.arm.Arm):\n continue\n\n if not check_collisions or iter % check_collision_freq != 0:\n continue\n\n # Terminate early if there are collisions with the gripper fingers\n # or grasped object.\n for body_id_a, link_id_a in zip(body_ids_a, link_ids_a):\n for body_id_b in check_collisions:\n if self._is_colliding(body_id_a, body_id_b, link_id_a):\n raise ControlException(\n f\"Robot.goto_pose({pos}, {quat}): Collision {body_id_a}:{link_id_a}, {body_id_b}\"\n )\n # print(\"Robot.goto_pose:\", pos, quat, status)\n\n if status == articulated_body.ControlStatus.ABORTED:\n raise ControlException(f\"Robot.goto_pose({pos}, {quat}): Singularity\")\n\n return status in (\n articulated_body.ControlStatus.POS_CONVERGED,\n articulated_body.ControlStatus.VEL_CONVERGED,\n )\n\n def goto_configuration(self, q: np.ndarray) -> bool:\n \"\"\"Sets the robot to the desired joint configuration.\n\n Args:\n q: Joint configuration.\n Returns:\n True if the controller converges to the desired position or zero\n velocity, false if the command times out.\n \"\"\"\n # Set the configuration goal.\n self.arm.set_configuration_goal(q)\n\n # Simulate until the pose goal is reached.\n status = self.arm.update_torques()\n self.gripper.update_torques()\n while status == articulated_body.ControlStatus.IN_PROGRESS:\n self.step_simulation()\n status = self.arm.update_torques()\n self.gripper.update_torques()\n\n return status in (\n articulated_body.ControlStatus.POS_CONVERGED,\n articulated_body.ControlStatus.VEL_CONVERGED,\n )\n\n def grasp(\n self,\n command: float,\n pos_gains: Optional[Union[Tuple[float, float], np.ndarray]] = None,\n timeout: Optional[float] = None,\n ) -> bool:\n \"\"\"Sets the gripper to the desired grasp (0.0 open, 1.0 closed).\n\n This method blocks until the command finishes or times out. A\n ControlException will be raised if the grasp controller is aborted.\n\n Any existing grasp constraints will be cleared and no new ones will be\n created. Use `Robot.grasp_object()` to create a grasp constraint.\n\n Args:\n command: Desired grasp (range from 0.0 open to 1.0 closed).\n pos_gains: kp gains (only used for sim).\n timeout: Uses the timeout specified in the yaml gripper config if None.\n Returns:\n True if the grasp controller converges to the desired position or\n zero velocity, false if the command times out.\n \"\"\"\n # Clear any existing grasp constraints.\n self.gripper.remove_grasp_constraint()\n self.clear_load()\n\n # Set the new grasp command.\n self.gripper.set_grasp(command, pos_gains, timeout)\n\n # Simulate until the grasp command finishes.\n status = self.gripper.update_torques()\n while status == articulated_body.ControlStatus.IN_PROGRESS:\n self.arm.update_torques()\n self.step_simulation()\n status = self.gripper.update_torques()\n # print(\"Robot.grasp:\", command, status)\n\n if status == articulated_body.ControlStatus.ABORTED:\n raise ControlException(f\"Robot.grasp({command})\")\n\n return status in (\n articulated_body.ControlStatus.POS_CONVERGED,\n articulated_body.ControlStatus.VEL_CONVERGED,\n )\n\n def grasp_object(\n self,\n obj: body.Body,\n pos_gains: Optional[Union[Tuple[float, float], np.ndarray]] = None,\n timeout: Optional[float] = None,\n realistic: bool = True,\n ) -> bool:\n \"\"\"Attempts to grasp an object and attaches the object to the gripper via a pose constraint.\n\n This method blocks until the command finishes or times out. A\n ControlException will be raised if the grasp controller is aborted.\n\n Args:\n command: Desired grasp (range from 0.0 open to 1.0 closed).\n pos_gains: kp gains (only used for sim).\n timeout: Uses the timeout specified in the yaml gripper config if None.\n realistic: If false, creates a pose constraint regardless of whether\n the object is in a secure grasp.\n Returns:\n True if the object is successfully grasped, false otherwise.\n \"\"\"\n if realistic:\n self.grasp(1, pos_gains, timeout)\n\n # Wait for grasped object to settle.\n status = self.gripper.update_torques()\n while (\n status\n in (\n articulated_body.ControlStatus.VEL_CONVERGED,\n articulated_body.ControlStatus.IN_PROGRESS,\n )\n and self.gripper._gripper_state.iter_timeout >= 0\n and (obj.twist() > 0.001).any()\n ):\n self.arm.update_torques()\n status = self.gripper.update_torques()\n self.step_simulation()\n\n # Make sure fingers aren't fully closed.\n if status == articulated_body.ControlStatus.POS_CONVERGED:\n return False\n\n # Lock the object in place with a grasp constraint.\n if not self.gripper.create_grasp_constraint(obj.body_id, realistic):\n return False\n\n # Add object load.\n T_obj_to_world = obj.pose().to_eigen()\n T_ee_to_world = dyn.cartesian_pose(self.arm.ab)\n T_obj_to_ee = T_ee_to_world.inverse() * T_obj_to_world\n self.set_load(obj.inertia * T_obj_to_ee)\n\n return True" } ]
import dataclasses import random import numpy as np import pybullet as p import symbolic from typing import Optional, Dict, List, Sequence, Tuple, Type from ctrlutils import eigen from shapely.geometry import Polygon, LineString from generative_skill_chaining.envs.pybullet.table import primitive_actions, utils from generative_skill_chaining.envs.pybullet.table.objects import Box, Hook, Null, Object, Rack from generative_skill_chaining.envs.pybullet.sim import math from generative_skill_chaining.envs.pybullet.sim.robot import Robot
9,367
dbprint = lambda *args: None # noqa # dbprint = print @dataclasses.dataclass class Predicate: args: List[str] @classmethod def create(cls, proposition: str) -> "Predicate": predicate, args = symbolic.parse_proposition(proposition) predicate_classes = { name.lower(): predicate_class for name, predicate_class in globals().items() } predicate_class = predicate_classes[predicate] return predicate_class(args) def sample( self, robot: Robot, objects: Dict[str, Object], state: Sequence["Predicate"] ) -> bool: """Generates a geometric grounding of a predicate.""" return True def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence["Predicate"] ) -> bool: """Evaluates to True if the geometrically grounded predicate is satisfied.""" return True def get_arg_objects(self, objects: Dict[str, Object]) -> List[Object]: return [objects[arg] for arg in self.args] def __str__(self) -> str: return f"{type(self).__name__.lower()}({', '.join(self.args)})" def __hash__(self) -> int: return hash(str(self)) def __eq__(self, other) -> bool: return str(self) == str(other) class HandleGrasp(Predicate): """Unary predicate enforcing a handle grasp towards the tail end on a hook object.""" pass class UpperHandleGrasp(Predicate): """Unary predicate enforcing a handle grasp towards the head on a hook object.""" pass class Free(Predicate): """Unary predicate enforcing that no top-down occlusions exist on the object.""" DISTANCE_MIN: Dict[Tuple[Type[Object], Type[Object]], float] = {
dbprint = lambda *args: None # noqa # dbprint = print @dataclasses.dataclass class Predicate: args: List[str] @classmethod def create(cls, proposition: str) -> "Predicate": predicate, args = symbolic.parse_proposition(proposition) predicate_classes = { name.lower(): predicate_class for name, predicate_class in globals().items() } predicate_class = predicate_classes[predicate] return predicate_class(args) def sample( self, robot: Robot, objects: Dict[str, Object], state: Sequence["Predicate"] ) -> bool: """Generates a geometric grounding of a predicate.""" return True def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence["Predicate"] ) -> bool: """Evaluates to True if the geometrically grounded predicate is satisfied.""" return True def get_arg_objects(self, objects: Dict[str, Object]) -> List[Object]: return [objects[arg] for arg in self.args] def __str__(self) -> str: return f"{type(self).__name__.lower()}({', '.join(self.args)})" def __hash__(self) -> int: return hash(str(self)) def __eq__(self, other) -> bool: return str(self) == str(other) class HandleGrasp(Predicate): """Unary predicate enforcing a handle grasp towards the tail end on a hook object.""" pass class UpperHandleGrasp(Predicate): """Unary predicate enforcing a handle grasp towards the head on a hook object.""" pass class Free(Predicate): """Unary predicate enforcing that no top-down occlusions exist on the object.""" DISTANCE_MIN: Dict[Tuple[Type[Object], Type[Object]], float] = {
(Box, Box): 0.05,
2
2023-10-16 00:22:40+00:00
12k
akashgreninja/GreSec
backend/venv/lib/python3.10/site-packages/h11/_connection.py
[ { "identifier": "ConnectionClosed", "path": "backend/venv/lib/python3.10/site-packages/h11/_events.py", "snippet": "class ConnectionClosed(Event):\n \"\"\"This event indicates that the sender has closed their outgoing\n connection.\n\n Note that this does not necessarily mean that they can't *receive* further\n data, because TCP connections are composed to two one-way channels which\n can be closed independently. See :ref:`closing` for details.\n\n No fields.\n \"\"\"\n\n pass" }, { "identifier": "Data", "path": "backend/venv/lib/python3.10/site-packages/h11/_events.py", "snippet": "class Data(Event):\n \"\"\"Part of an HTTP message body.\n\n Fields:\n\n .. attribute:: data\n\n A :term:`bytes-like object` containing part of a message body. Or, if\n using the ``combine=False`` argument to :meth:`Connection.send`, then\n any object that your socket writing code knows what to do with, and for\n which calling :func:`len` returns the number of bytes that will be\n written -- see :ref:`sendfile` for details.\n\n .. attribute:: chunk_start\n\n A marker that indicates whether this data object is from the start of a\n chunked transfer encoding chunk. This field is ignored when when a Data\n event is provided to :meth:`Connection.send`: it is only valid on\n events emitted from :meth:`Connection.next_event`. You probably\n shouldn't use this attribute at all; see\n :ref:`chunk-delimiters-are-bad` for details.\n\n .. attribute:: chunk_end\n\n A marker that indicates whether this data object is the last for a\n given chunked transfer encoding chunk. This field is ignored when when\n a Data event is provided to :meth:`Connection.send`: it is only valid\n on events emitted from :meth:`Connection.next_event`. You probably\n shouldn't use this attribute at all; see\n :ref:`chunk-delimiters-are-bad` for details.\n\n \"\"\"\n\n __slots__ = (\"data\", \"chunk_start\", \"chunk_end\")\n\n data: bytes\n chunk_start: bool\n chunk_end: bool\n\n def __init__(\n self, data: bytes, chunk_start: bool = False, chunk_end: bool = False\n ) -> None:\n object.__setattr__(self, \"data\", data)\n object.__setattr__(self, \"chunk_start\", chunk_start)\n object.__setattr__(self, \"chunk_end\", chunk_end)\n\n # This is an unhashable type.\n __hash__ = None # type: ignore" }, { "identifier": "EndOfMessage", "path": "backend/venv/lib/python3.10/site-packages/h11/_events.py", "snippet": "class EndOfMessage(Event):\n \"\"\"The end of an HTTP message.\n\n Fields:\n\n .. attribute:: headers\n\n Default value: ``[]``\n\n Any trailing headers attached to this message, represented as a list of\n (name, value) pairs. See :ref:`the header normalization rules\n <headers-format>` for details.\n\n Must be empty unless ``Transfer-Encoding: chunked`` is in use.\n\n \"\"\"\n\n __slots__ = (\"headers\",)\n\n headers: Headers\n\n def __init__(\n self,\n *,\n headers: Union[\n Headers, List[Tuple[bytes, bytes]], List[Tuple[str, str]], None\n ] = None,\n _parsed: bool = False,\n ) -> None:\n super().__init__()\n if headers is None:\n headers = Headers([])\n elif not isinstance(headers, Headers):\n headers = normalize_and_validate(headers, _parsed=_parsed)\n\n object.__setattr__(self, \"headers\", headers)\n\n # This is an unhashable type.\n __hash__ = None # type: ignore" }, { "identifier": "Event", "path": "backend/venv/lib/python3.10/site-packages/h11/_events.py", "snippet": "class Event(ABC):\n \"\"\"\n Base class for h11 events.\n \"\"\"\n\n __slots__ = ()" }, { "identifier": "InformationalResponse", "path": "backend/venv/lib/python3.10/site-packages/h11/_events.py", "snippet": "class InformationalResponse(_ResponseBase):\n \"\"\"An HTTP informational response.\n\n Fields:\n\n .. attribute:: status_code\n\n The status code of this response, as an integer. For an\n :class:`InformationalResponse`, this is always in the range [100,\n 200).\n\n .. attribute:: headers\n\n Request headers, represented as a list of (name, value) pairs. See\n :ref:`the header normalization rules <headers-format>` for\n details.\n\n .. attribute:: http_version\n\n The HTTP protocol version, represented as a byte string like\n ``b\"1.1\"``. See :ref:`the HTTP version normalization rules\n <http_version-format>` for details.\n\n .. attribute:: reason\n\n The reason phrase of this response, as a byte string. For example:\n ``b\"OK\"``, or ``b\"Not Found\"``.\n\n \"\"\"\n\n def __post_init__(self) -> None:\n if not (100 <= self.status_code < 200):\n raise LocalProtocolError(\n \"InformationalResponse status_code should be in range \"\n \"[100, 200), not {}\".format(self.status_code)\n )\n\n # This is an unhashable type.\n __hash__ = None # type: ignore" }, { "identifier": "Request", "path": "backend/venv/lib/python3.10/site-packages/h11/_events.py", "snippet": "class Request(Event):\n \"\"\"The beginning of an HTTP request.\n\n Fields:\n\n .. attribute:: method\n\n An HTTP method, e.g. ``b\"GET\"`` or ``b\"POST\"``. Always a byte\n string. :term:`Bytes-like objects <bytes-like object>` and native\n strings containing only ascii characters will be automatically\n converted to byte strings.\n\n .. attribute:: target\n\n The target of an HTTP request, e.g. ``b\"/index.html\"``, or one of the\n more exotic formats described in `RFC 7320, section 5.3\n <https://tools.ietf.org/html/rfc7230#section-5.3>`_. Always a byte\n string. :term:`Bytes-like objects <bytes-like object>` and native\n strings containing only ascii characters will be automatically\n converted to byte strings.\n\n .. attribute:: headers\n\n Request headers, represented as a list of (name, value) pairs. See\n :ref:`the header normalization rules <headers-format>` for details.\n\n .. attribute:: http_version\n\n The HTTP protocol version, represented as a byte string like\n ``b\"1.1\"``. See :ref:`the HTTP version normalization rules\n <http_version-format>` for details.\n\n \"\"\"\n\n __slots__ = (\"method\", \"headers\", \"target\", \"http_version\")\n\n method: bytes\n headers: Headers\n target: bytes\n http_version: bytes\n\n def __init__(\n self,\n *,\n method: Union[bytes, str],\n headers: Union[Headers, List[Tuple[bytes, bytes]], List[Tuple[str, str]]],\n target: Union[bytes, str],\n http_version: Union[bytes, str] = b\"1.1\",\n _parsed: bool = False,\n ) -> None:\n super().__init__()\n if isinstance(headers, Headers):\n object.__setattr__(self, \"headers\", headers)\n else:\n object.__setattr__(\n self, \"headers\", normalize_and_validate(headers, _parsed=_parsed)\n )\n if not _parsed:\n object.__setattr__(self, \"method\", bytesify(method))\n object.__setattr__(self, \"target\", bytesify(target))\n object.__setattr__(self, \"http_version\", bytesify(http_version))\n else:\n object.__setattr__(self, \"method\", method)\n object.__setattr__(self, \"target\", target)\n object.__setattr__(self, \"http_version\", http_version)\n\n # \"A server MUST respond with a 400 (Bad Request) status code to any\n # HTTP/1.1 request message that lacks a Host header field and to any\n # request message that contains more than one Host header field or a\n # Host header field with an invalid field-value.\"\n # -- https://tools.ietf.org/html/rfc7230#section-5.4\n host_count = 0\n for name, value in self.headers:\n if name == b\"host\":\n host_count += 1\n if self.http_version == b\"1.1\" and host_count == 0:\n raise LocalProtocolError(\"Missing mandatory Host: header\")\n if host_count > 1:\n raise LocalProtocolError(\"Found multiple Host: headers\")\n\n validate(method_re, self.method, \"Illegal method characters\")\n validate(request_target_re, self.target, \"Illegal target characters\")\n\n # This is an unhashable type.\n __hash__ = None # type: ignore" }, { "identifier": "Response", "path": "backend/venv/lib/python3.10/site-packages/h11/_events.py", "snippet": "class Response(_ResponseBase):\n \"\"\"The beginning of an HTTP response.\n\n Fields:\n\n .. attribute:: status_code\n\n The status code of this response, as an integer. For an\n :class:`Response`, this is always in the range [200,\n 1000).\n\n .. attribute:: headers\n\n Request headers, represented as a list of (name, value) pairs. See\n :ref:`the header normalization rules <headers-format>` for details.\n\n .. attribute:: http_version\n\n The HTTP protocol version, represented as a byte string like\n ``b\"1.1\"``. See :ref:`the HTTP version normalization rules\n <http_version-format>` for details.\n\n .. attribute:: reason\n\n The reason phrase of this response, as a byte string. For example:\n ``b\"OK\"``, or ``b\"Not Found\"``.\n\n \"\"\"\n\n def __post_init__(self) -> None:\n if not (200 <= self.status_code < 1000):\n raise LocalProtocolError(\n \"Response status_code should be in range [200, 1000), not {}\".format(\n self.status_code\n )\n )\n\n # This is an unhashable type.\n __hash__ = None # type: ignore" }, { "identifier": "get_comma_header", "path": "backend/venv/lib/python3.10/site-packages/h11/_headers.py", "snippet": "def get_comma_header(headers: Headers, name: bytes) -> List[bytes]:\n # Should only be used for headers whose value is a list of\n # comma-separated, case-insensitive values.\n #\n # The header name `name` is expected to be lower-case bytes.\n #\n # Connection: meets these criteria (including cast insensitivity).\n #\n # Content-Length: technically is just a single value (1*DIGIT), but the\n # standard makes reference to implementations that do multiple values, and\n # using this doesn't hurt. Ditto, case insensitivity doesn't things either\n # way.\n #\n # Transfer-Encoding: is more complex (allows for quoted strings), so\n # splitting on , is actually wrong. For example, this is legal:\n #\n # Transfer-Encoding: foo; options=\"1,2\", chunked\n #\n # and should be parsed as\n #\n # foo; options=\"1,2\"\n # chunked\n #\n # but this naive function will parse it as\n #\n # foo; options=\"1\n # 2\"\n # chunked\n #\n # However, this is okay because the only thing we are going to do with\n # any Transfer-Encoding is reject ones that aren't just \"chunked\", so\n # both of these will be treated the same anyway.\n #\n # Expect: the only legal value is the literal string\n # \"100-continue\". Splitting on commas is harmless. Case insensitive.\n #\n out: List[bytes] = []\n for _, found_name, found_raw_value in headers._full_items:\n if found_name == name:\n found_raw_value = found_raw_value.lower()\n for found_split_value in found_raw_value.split(b\",\"):\n found_split_value = found_split_value.strip()\n if found_split_value:\n out.append(found_split_value)\n return out" }, { "identifier": "has_expect_100_continue", "path": "backend/venv/lib/python3.10/site-packages/h11/_headers.py", "snippet": "def has_expect_100_continue(request: \"Request\") -> bool:\n # https://tools.ietf.org/html/rfc7231#section-5.1.1\n # \"A server that receives a 100-continue expectation in an HTTP/1.0 request\n # MUST ignore that expectation.\"\n if request.http_version < b\"1.1\":\n return False\n expect = get_comma_header(request.headers, b\"expect\")\n return b\"100-continue\" in expect" }, { "identifier": "set_comma_header", "path": "backend/venv/lib/python3.10/site-packages/h11/_headers.py", "snippet": "def set_comma_header(headers: Headers, name: bytes, new_values: List[bytes]) -> Headers:\n # The header name `name` is expected to be lower-case bytes.\n #\n # Note that when we store the header we use title casing for the header\n # names, in order to match the conventional HTTP header style.\n #\n # Simply calling `.title()` is a blunt approach, but it's correct\n # here given the cases where we're using `set_comma_header`...\n #\n # Connection, Content-Length, Transfer-Encoding.\n new_headers: List[Tuple[bytes, bytes]] = []\n for found_raw_name, found_name, found_raw_value in headers._full_items:\n if found_name != name:\n new_headers.append((found_raw_name, found_raw_value))\n for new_value in new_values:\n new_headers.append((name.title(), new_value))\n return normalize_and_validate(new_headers)" }, { "identifier": "READERS", "path": "backend/venv/lib/python3.10/site-packages/h11/_readers.py", "snippet": "def _obsolete_line_fold(lines: Iterable[bytes]) -> Iterable[bytes]:\ndef _decode_header_lines(\n lines: Iterable[bytes],\n) -> Iterable[Tuple[bytes, bytes]]:\ndef maybe_read_from_IDLE_client(buf: ReceiveBuffer) -> Optional[Request]:\ndef maybe_read_from_SEND_RESPONSE_server(\n buf: ReceiveBuffer,\n) -> Union[InformationalResponse, Response, None]:\n def __init__(self, length: int) -> None:\n def __call__(self, buf: ReceiveBuffer) -> Union[Data, EndOfMessage, None]:\n def read_eof(self) -> NoReturn:\n def __init__(self) -> None:\n def __call__(self, buf: ReceiveBuffer) -> Union[Data, EndOfMessage, None]:\n def read_eof(self) -> NoReturn:\n def __call__(self, buf: ReceiveBuffer) -> Optional[Data]:\n def read_eof(self) -> EndOfMessage:\ndef expect_nothing(buf: ReceiveBuffer) -> None:\nclass ContentLengthReader:\nclass ChunkedReader:\nclass Http10Reader:\nREADERS: ReadersType = {\n (CLIENT, IDLE): maybe_read_from_IDLE_client,\n (SERVER, IDLE): maybe_read_from_SEND_RESPONSE_server,\n (SERVER, SEND_RESPONSE): maybe_read_from_SEND_RESPONSE_server,\n (CLIENT, DONE): expect_nothing,\n (CLIENT, MUST_CLOSE): expect_nothing,\n (CLIENT, CLOSED): expect_nothing,\n (SERVER, DONE): expect_nothing,\n (SERVER, MUST_CLOSE): expect_nothing,\n (SERVER, CLOSED): expect_nothing,\n SEND_BODY: {\n \"chunked\": ChunkedReader,\n \"content-length\": ContentLengthReader,\n \"http/1.0\": Http10Reader,\n },\n}" }, { "identifier": "ReceiveBuffer", "path": "backend/venv/lib/python3.10/site-packages/h11/_receivebuffer.py", "snippet": "class ReceiveBuffer:\n def __init__(self) -> None:\n self._data = bytearray()\n self._next_line_search = 0\n self._multiple_lines_search = 0\n\n def __iadd__(self, byteslike: Union[bytes, bytearray]) -> \"ReceiveBuffer\":\n self._data += byteslike\n return self\n\n def __bool__(self) -> bool:\n return bool(len(self))\n\n def __len__(self) -> int:\n return len(self._data)\n\n # for @property unprocessed_data\n def __bytes__(self) -> bytes:\n return bytes(self._data)\n\n def _extract(self, count: int) -> bytearray:\n # extracting an initial slice of the data buffer and return it\n out = self._data[:count]\n del self._data[:count]\n\n self._next_line_search = 0\n self._multiple_lines_search = 0\n\n return out\n\n def maybe_extract_at_most(self, count: int) -> Optional[bytearray]:\n \"\"\"\n Extract a fixed number of bytes from the buffer.\n \"\"\"\n out = self._data[:count]\n if not out:\n return None\n\n return self._extract(count)\n\n def maybe_extract_next_line(self) -> Optional[bytearray]:\n \"\"\"\n Extract the first line, if it is completed in the buffer.\n \"\"\"\n # Only search in buffer space that we've not already looked at.\n search_start_index = max(0, self._next_line_search - 1)\n partial_idx = self._data.find(b\"\\r\\n\", search_start_index)\n\n if partial_idx == -1:\n self._next_line_search = len(self._data)\n return None\n\n # + 2 is to compensate len(b\"\\r\\n\")\n idx = partial_idx + 2\n\n return self._extract(idx)\n\n def maybe_extract_lines(self) -> Optional[List[bytearray]]:\n \"\"\"\n Extract everything up to the first blank line, and return a list of lines.\n \"\"\"\n # Handle the case where we have an immediate empty line.\n if self._data[:1] == b\"\\n\":\n self._extract(1)\n return []\n\n if self._data[:2] == b\"\\r\\n\":\n self._extract(2)\n return []\n\n # Only search in buffer space that we've not already looked at.\n match = blank_line_regex.search(self._data, self._multiple_lines_search)\n if match is None:\n self._multiple_lines_search = max(0, len(self._data) - 2)\n return None\n\n # Truncate the buffer and return it.\n idx = match.span(0)[-1]\n out = self._extract(idx)\n lines = out.split(b\"\\n\")\n\n for line in lines:\n if line.endswith(b\"\\r\"):\n del line[-1]\n\n assert lines[-2] == lines[-1] == b\"\"\n\n del lines[-2:]\n\n return lines\n\n # In theory we should wait until `\\r\\n` before starting to validate\n # incoming data. However it's interesting to detect (very) invalid data\n # early given they might not even contain `\\r\\n` at all (hence only\n # timeout will get rid of them).\n # This is not a 100% effective detection but more of a cheap sanity check\n # allowing for early abort in some useful cases.\n # This is especially interesting when peer is messing up with HTTPS and\n # sent us a TLS stream where we were expecting plain HTTP given all\n # versions of TLS so far start handshake with a 0x16 message type code.\n def is_next_line_obviously_invalid_request_line(self) -> bool:\n try:\n # HTTP header line must not contain non-printable characters\n # and should not start with a space\n return self._data[0] < 0x21\n except IndexError:\n return False" }, { "identifier": "_SWITCH_CONNECT", "path": "backend/venv/lib/python3.10/site-packages/h11/_state.py", "snippet": "class _SWITCH_CONNECT(Sentinel, metaclass=Sentinel):\n pass" }, { "identifier": "_SWITCH_UPGRADE", "path": "backend/venv/lib/python3.10/site-packages/h11/_state.py", "snippet": "class _SWITCH_UPGRADE(Sentinel, metaclass=Sentinel):\n pass" }, { "identifier": "CLIENT", "path": "backend/venv/lib/python3.10/site-packages/h11/_state.py", "snippet": "class CLIENT(Sentinel, metaclass=Sentinel):\n pass" }, { "identifier": "ConnectionState", "path": "backend/venv/lib/python3.10/site-packages/h11/_state.py", "snippet": "class ConnectionState:\n def __init__(self) -> None:\n # Extra bits of state that don't quite fit into the state model.\n\n # If this is False then it enables the automatic DONE -> MUST_CLOSE\n # transition. Don't set this directly; call .keep_alive_disabled()\n self.keep_alive = True\n\n # This is a subset of {UPGRADE, CONNECT}, containing the proposals\n # made by the client for switching protocols.\n self.pending_switch_proposals: Set[Type[Sentinel]] = set()\n\n self.states: Dict[Type[Sentinel], Type[Sentinel]] = {CLIENT: IDLE, SERVER: IDLE}\n\n def process_error(self, role: Type[Sentinel]) -> None:\n self.states[role] = ERROR\n self._fire_state_triggered_transitions()\n\n def process_keep_alive_disabled(self) -> None:\n self.keep_alive = False\n self._fire_state_triggered_transitions()\n\n def process_client_switch_proposal(self, switch_event: Type[Sentinel]) -> None:\n self.pending_switch_proposals.add(switch_event)\n self._fire_state_triggered_transitions()\n\n def process_event(\n self,\n role: Type[Sentinel],\n event_type: Type[Event],\n server_switch_event: Optional[Type[Sentinel]] = None,\n ) -> None:\n _event_type: Union[Type[Event], Tuple[Type[Event], Type[Sentinel]]] = event_type\n if server_switch_event is not None:\n assert role is SERVER\n if server_switch_event not in self.pending_switch_proposals:\n raise LocalProtocolError(\n \"Received server {} event without a pending proposal\".format(\n server_switch_event\n )\n )\n _event_type = (event_type, server_switch_event)\n if server_switch_event is None and _event_type is Response:\n self.pending_switch_proposals = set()\n self._fire_event_triggered_transitions(role, _event_type)\n # Special case: the server state does get to see Request\n # events.\n if _event_type is Request:\n assert role is CLIENT\n self._fire_event_triggered_transitions(SERVER, (Request, CLIENT))\n self._fire_state_triggered_transitions()\n\n def _fire_event_triggered_transitions(\n self,\n role: Type[Sentinel],\n event_type: Union[Type[Event], Tuple[Type[Event], Type[Sentinel]]],\n ) -> None:\n state = self.states[role]\n try:\n new_state = EVENT_TRIGGERED_TRANSITIONS[role][state][event_type]\n except KeyError:\n event_type = cast(Type[Event], event_type)\n raise LocalProtocolError(\n \"can't handle event type {} when role={} and state={}\".format(\n event_type.__name__, role, self.states[role]\n )\n ) from None\n self.states[role] = new_state\n\n def _fire_state_triggered_transitions(self) -> None:\n # We apply these rules repeatedly until converging on a fixed point\n while True:\n start_states = dict(self.states)\n\n # It could happen that both these special-case transitions are\n # enabled at the same time:\n #\n # DONE -> MIGHT_SWITCH_PROTOCOL\n # DONE -> MUST_CLOSE\n #\n # For example, this will always be true of a HTTP/1.0 client\n # requesting CONNECT. If this happens, the protocol switch takes\n # priority. From there the client will either go to\n # SWITCHED_PROTOCOL, in which case it's none of our business when\n # they close the connection, or else the server will deny the\n # request, in which case the client will go back to DONE and then\n # from there to MUST_CLOSE.\n if self.pending_switch_proposals:\n if self.states[CLIENT] is DONE:\n self.states[CLIENT] = MIGHT_SWITCH_PROTOCOL\n\n if not self.pending_switch_proposals:\n if self.states[CLIENT] is MIGHT_SWITCH_PROTOCOL:\n self.states[CLIENT] = DONE\n\n if not self.keep_alive:\n for role in (CLIENT, SERVER):\n if self.states[role] is DONE:\n self.states[role] = MUST_CLOSE\n\n # Tabular state-triggered transitions\n joint_state = (self.states[CLIENT], self.states[SERVER])\n changes = STATE_TRIGGERED_TRANSITIONS.get(joint_state, {})\n self.states.update(changes)\n\n if self.states == start_states:\n # Fixed point reached\n return\n\n def start_next_cycle(self) -> None:\n if self.states != {CLIENT: DONE, SERVER: DONE}:\n raise LocalProtocolError(\n \"not in a reusable state. self.states={}\".format(self.states)\n )\n # Can't reach DONE/DONE with any of these active, but still, let's be\n # sure.\n assert self.keep_alive\n assert not self.pending_switch_proposals\n self.states = {CLIENT: IDLE, SERVER: IDLE}" }, { "identifier": "DONE", "path": "backend/venv/lib/python3.10/site-packages/h11/_state.py", "snippet": "class DONE(Sentinel, metaclass=Sentinel):\n pass" }, { "identifier": "ERROR", "path": "backend/venv/lib/python3.10/site-packages/h11/_state.py", "snippet": "class ERROR(Sentinel, metaclass=Sentinel):\n pass" }, { "identifier": "MIGHT_SWITCH_PROTOCOL", "path": "backend/venv/lib/python3.10/site-packages/h11/_state.py", "snippet": "class MIGHT_SWITCH_PROTOCOL(Sentinel, metaclass=Sentinel):\n pass" }, { "identifier": "SEND_BODY", "path": "backend/venv/lib/python3.10/site-packages/h11/_state.py", "snippet": "class SEND_BODY(Sentinel, metaclass=Sentinel):\n pass" }, { "identifier": "SERVER", "path": "backend/venv/lib/python3.10/site-packages/h11/_state.py", "snippet": "class SERVER(Sentinel, metaclass=Sentinel):\n pass" }, { "identifier": "SWITCHED_PROTOCOL", "path": "backend/venv/lib/python3.10/site-packages/h11/_state.py", "snippet": "class SWITCHED_PROTOCOL(Sentinel, metaclass=Sentinel):\n pass" }, { "identifier": "LocalProtocolError", "path": "backend/venv/lib/python3.10/site-packages/h11/_util.py", "snippet": "class LocalProtocolError(ProtocolError):\n def _reraise_as_remote_protocol_error(self) -> NoReturn:\n # After catching a LocalProtocolError, use this method to re-raise it\n # as a RemoteProtocolError. This method must be called from inside an\n # except: block.\n #\n # An easy way to get an equivalent RemoteProtocolError is just to\n # modify 'self' in place.\n self.__class__ = RemoteProtocolError # type: ignore\n # But the re-raising is somewhat non-trivial -- you might think that\n # now that we've modified the in-flight exception object, that just\n # doing 'raise' to re-raise it would be enough. But it turns out that\n # this doesn't work, because Python tracks the exception type\n # (exc_info[0]) separately from the exception object (exc_info[1]),\n # and we only modified the latter. So we really do need to re-raise\n # the new type explicitly.\n # On py3, the traceback is part of the exception object, so our\n # in-place modification preserved it and we can just re-raise:\n raise self" }, { "identifier": "RemoteProtocolError", "path": "backend/venv/lib/python3.10/site-packages/h11/_util.py", "snippet": "class RemoteProtocolError(ProtocolError):\n pass" }, { "identifier": "Sentinel", "path": "backend/venv/lib/python3.10/site-packages/h11/_util.py", "snippet": "class Sentinel(type):\n def __new__(\n cls: Type[_T_Sentinel],\n name: str,\n bases: Tuple[type, ...],\n namespace: Dict[str, Any],\n **kwds: Any\n ) -> _T_Sentinel:\n assert bases == (Sentinel,)\n v = super().__new__(cls, name, bases, namespace, **kwds)\n v.__class__ = v # type: ignore\n return v\n\n def __repr__(self) -> str:\n return self.__name__" }, { "identifier": "WRITERS", "path": "backend/venv/lib/python3.10/site-packages/h11/_writers.py", "snippet": "def write_headers(headers: Headers, write: Writer) -> None:\ndef write_request(request: Request, write: Writer) -> None:\ndef write_any_response(\n response: Union[InformationalResponse, Response], write: Writer\n) -> None:\n def __call__(self, event: Event, write: Writer) -> None:\n def send_data(self, data: bytes, write: Writer) -> None:\n def send_eom(self, headers: Headers, write: Writer) -> None:\n def __init__(self, length: int) -> None:\n def send_data(self, data: bytes, write: Writer) -> None:\n def send_eom(self, headers: Headers, write: Writer) -> None:\n def send_data(self, data: bytes, write: Writer) -> None:\n def send_eom(self, headers: Headers, write: Writer) -> None:\n def send_data(self, data: bytes, write: Writer) -> None:\n def send_eom(self, headers: Headers, write: Writer) -> None:\nclass BodyWriter:\nclass ContentLengthWriter(BodyWriter):\nclass ChunkedWriter(BodyWriter):\nclass Http10Writer(BodyWriter):\nWRITERS: WritersType = {\n (CLIENT, IDLE): write_request,\n (SERVER, IDLE): write_any_response,\n (SERVER, SEND_RESPONSE): write_any_response,\n SEND_BODY: {\n \"chunked\": ChunkedWriter,\n \"content-length\": ContentLengthWriter,\n \"http/1.0\": Http10Writer,\n },\n}" } ]
from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Type, Union from ._events import ( ConnectionClosed, Data, EndOfMessage, Event, InformationalResponse, Request, Response, ) from ._headers import get_comma_header, has_expect_100_continue, set_comma_header from ._readers import READERS, ReadersType from ._receivebuffer import ReceiveBuffer from ._state import ( _SWITCH_CONNECT, _SWITCH_UPGRADE, CLIENT, ConnectionState, DONE, ERROR, MIGHT_SWITCH_PROTOCOL, SEND_BODY, SERVER, SWITCHED_PROTOCOL, ) from ._util import ( # Import the internal things we need LocalProtocolError, RemoteProtocolError, Sentinel, ) from ._writers import WRITERS, WritersType
10,206
See :ref:`switching-protocols` for discussion of why you'd want this. """ return (bytes(self._receive_buffer), self._receive_buffer_closed) def receive_data(self, data: bytes) -> None: """Add data to our internal receive buffer. This does not actually do any processing on the data, just stores it. To trigger processing, you have to call :meth:`next_event`. Args: data (:term:`bytes-like object`): The new data that was just received. Special case: If *data* is an empty byte-string like ``b""``, then this indicates that the remote side has closed the connection (end of file). Normally this is convenient, because standard Python APIs like :meth:`file.read` or :meth:`socket.recv` use ``b""`` to indicate end-of-file, while other failures to read are indicated using other mechanisms like raising :exc:`TimeoutError`. When using such an API you can just blindly pass through whatever you get from ``read`` to :meth:`receive_data`, and everything will work. But, if you have an API where reading an empty string is a valid non-EOF condition, then you need to be aware of this and make sure to check for such strings and avoid passing them to :meth:`receive_data`. Returns: Nothing, but after calling this you should call :meth:`next_event` to parse the newly received data. Raises: RuntimeError: Raised if you pass an empty *data*, indicating EOF, and then pass a non-empty *data*, indicating more data that somehow arrived after the EOF. (Calling ``receive_data(b"")`` multiple times is fine, and equivalent to calling it once.) """ if data: if self._receive_buffer_closed: raise RuntimeError("received close, then received more data?") self._receive_buffer += data else: self._receive_buffer_closed = True def _extract_next_receive_event( self, ) -> Union[Event, Type[NEED_DATA], Type[PAUSED]]: state = self.their_state # We don't pause immediately when they enter DONE, because even in # DONE state we can still process a ConnectionClosed() event. But # if we have data in our buffer, then we definitely aren't getting # a ConnectionClosed() immediately and we need to pause. if state is DONE and self._receive_buffer: return PAUSED if state is MIGHT_SWITCH_PROTOCOL or state is SWITCHED_PROTOCOL: return PAUSED assert self._reader is not None event = self._reader(self._receive_buffer) if event is None: if not self._receive_buffer and self._receive_buffer_closed: # In some unusual cases (basically just HTTP/1.0 bodies), EOF # triggers an actual protocol event; in that case, we want to # return that event, and then the state will change and we'll # get called again to generate the actual ConnectionClosed(). if hasattr(self._reader, "read_eof"): event = self._reader.read_eof() # type: ignore[attr-defined] else: event = ConnectionClosed() if event is None: event = NEED_DATA return event # type: ignore[no-any-return] def next_event(self) -> Union[Event, Type[NEED_DATA], Type[PAUSED]]: """Parse the next event out of our receive buffer, update our internal state, and return it. This is a mutating operation -- think of it like calling :func:`next` on an iterator. Returns: : One of three things: 1) An event object -- see :ref:`events`. 2) The special constant :data:`NEED_DATA`, which indicates that you need to read more data from your socket and pass it to :meth:`receive_data` before this method will be able to return any more events. 3) The special constant :data:`PAUSED`, which indicates that we are not in a state where we can process incoming data (usually because the peer has finished their part of the current request/response cycle, and you have not yet called :meth:`start_next_cycle`). See :ref:`flow-control` for details. Raises: RemoteProtocolError: The peer has misbehaved. You should close the connection (possibly after sending some kind of 4xx response). Once this method returns :class:`ConnectionClosed` once, then all subsequent calls will also return :class:`ConnectionClosed`. If this method raises any exception besides :exc:`RemoteProtocolError` then that's a bug -- if it happens please file a bug report! If this method raises any exception then it also sets :attr:`Connection.their_state` to :data:`ERROR` -- see :ref:`error-handling` for discussion. """ if self.their_state is ERROR:
# This contains the main Connection class. Everything in h11 revolves around # this. # Everything in __all__ gets re-exported as part of the h11 public API. __all__ = ["Connection", "NEED_DATA", "PAUSED"] class NEED_DATA(Sentinel, metaclass=Sentinel): pass class PAUSED(Sentinel, metaclass=Sentinel): pass # If we ever have this much buffered without it making a complete parseable # event, we error out. The only time we really buffer is when reading the # request/response line + headers together, so this is effectively the limit on # the size of that. # # Some precedents for defaults: # - node.js: 80 * 1024 # - tomcat: 8 * 1024 # - IIS: 16 * 1024 # - Apache: <8 KiB per line> DEFAULT_MAX_INCOMPLETE_EVENT_SIZE = 16 * 1024 # RFC 7230's rules for connection lifecycles: # - If either side says they want to close the connection, then the connection # must close. # - HTTP/1.1 defaults to keep-alive unless someone says Connection: close # - HTTP/1.0 defaults to close unless both sides say Connection: keep-alive # (and even this is a mess -- e.g. if you're implementing a proxy then # sending Connection: keep-alive is forbidden). # # We simplify life by simply not supporting keep-alive with HTTP/1.0 peers. So # our rule is: # - If someone says Connection: close, we will close # - If someone uses HTTP/1.0, we will close. def _keep_alive(event: Union[Request, Response]) -> bool: connection = get_comma_header(event.headers, b"connection") if b"close" in connection: return False if getattr(event, "http_version", b"1.1") < b"1.1": return False return True def _body_framing( request_method: bytes, event: Union[Request, Response] ) -> Tuple[str, Union[Tuple[()], Tuple[int]]]: # Called when we enter SEND_BODY to figure out framing information for # this body. # # These are the only two events that can trigger a SEND_BODY state: assert type(event) in (Request, Response) # Returns one of: # # ("content-length", count) # ("chunked", ()) # ("http/1.0", ()) # # which are (lookup key, *args) for constructing body reader/writer # objects. # # Reference: https://tools.ietf.org/html/rfc7230#section-3.3.3 # # Step 1: some responses always have an empty body, regardless of what the # headers say. if type(event) is Response: if ( event.status_code in (204, 304) or request_method == b"HEAD" or (request_method == b"CONNECT" and 200 <= event.status_code < 300) ): return ("content-length", (0,)) # Section 3.3.3 also lists another case -- responses with status_code # < 200. For us these are InformationalResponses, not Responses, so # they can't get into this function in the first place. assert event.status_code >= 200 # Step 2: check for Transfer-Encoding (T-E beats C-L): transfer_encodings = get_comma_header(event.headers, b"transfer-encoding") if transfer_encodings: assert transfer_encodings == [b"chunked"] return ("chunked", ()) # Step 3: check for Content-Length content_lengths = get_comma_header(event.headers, b"content-length") if content_lengths: return ("content-length", (int(content_lengths[0]),)) # Step 4: no applicable headers; fallback/default depends on type if type(event) is Request: return ("content-length", (0,)) else: return ("http/1.0", ()) ################################################################ # # The main Connection class # ################################################################ class Connection: """An object encapsulating the state of an HTTP connection. Args: our_role: If you're implementing a client, pass :data:`h11.CLIENT`. If you're implementing a server, pass :data:`h11.SERVER`. max_incomplete_event_size (int): The maximum number of bytes we're willing to buffer of an incomplete event. In practice this mostly sets a limit on the maximum size of the request/response line + headers. If this is exceeded, then :meth:`next_event` will raise :exc:`RemoteProtocolError`. """ def __init__( self, our_role: Type[Sentinel], max_incomplete_event_size: int = DEFAULT_MAX_INCOMPLETE_EVENT_SIZE, ) -> None: self._max_incomplete_event_size = max_incomplete_event_size # State and role tracking if our_role not in (CLIENT, SERVER): raise ValueError("expected CLIENT or SERVER, not {!r}".format(our_role)) self.our_role = our_role self.their_role: Type[Sentinel] if our_role is CLIENT: self.their_role = SERVER else: self.their_role = CLIENT self._cstate = ConnectionState() # Callables for converting data->events or vice-versa given the # current state self._writer = self._get_io_object(self.our_role, None, WRITERS) self._reader = self._get_io_object(self.their_role, None, READERS) # Holds any unprocessed received data self._receive_buffer = ReceiveBuffer() # If this is true, then it indicates that the incoming connection was # closed *after* the end of whatever's in self._receive_buffer: self._receive_buffer_closed = False # Extra bits of state that don't fit into the state machine. # # These two are only used to interpret framing headers for figuring # out how to read/write response bodies. their_http_version is also # made available as a convenient public API. self.their_http_version: Optional[bytes] = None self._request_method: Optional[bytes] = None # This is pure flow-control and doesn't at all affect the set of legal # transitions, so no need to bother ConnectionState with it: self.client_is_waiting_for_100_continue = False @property def states(self) -> Dict[Type[Sentinel], Type[Sentinel]]: """A dictionary like:: {CLIENT: <client state>, SERVER: <server state>} See :ref:`state-machine` for details. """ return dict(self._cstate.states) @property def our_state(self) -> Type[Sentinel]: """The current state of whichever role we are playing. See :ref:`state-machine` for details. """ return self._cstate.states[self.our_role] @property def their_state(self) -> Type[Sentinel]: """The current state of whichever role we are NOT playing. See :ref:`state-machine` for details. """ return self._cstate.states[self.their_role] @property def they_are_waiting_for_100_continue(self) -> bool: return self.their_role is CLIENT and self.client_is_waiting_for_100_continue def start_next_cycle(self) -> None: """Attempt to reset our connection state for a new request/response cycle. If both client and server are in :data:`DONE` state, then resets them both to :data:`IDLE` state in preparation for a new request/response cycle on this same connection. Otherwise, raises a :exc:`LocalProtocolError`. See :ref:`keepalive-and-pipelining`. """ old_states = dict(self._cstate.states) self._cstate.start_next_cycle() self._request_method = None # self.their_http_version gets left alone, since it presumably lasts # beyond a single request/response cycle assert not self.client_is_waiting_for_100_continue self._respond_to_state_changes(old_states) def _process_error(self, role: Type[Sentinel]) -> None: old_states = dict(self._cstate.states) self._cstate.process_error(role) self._respond_to_state_changes(old_states) def _server_switch_event(self, event: Event) -> Optional[Type[Sentinel]]: if type(event) is InformationalResponse and event.status_code == 101: return _SWITCH_UPGRADE if type(event) is Response: if ( _SWITCH_CONNECT in self._cstate.pending_switch_proposals and 200 <= event.status_code < 300 ): return _SWITCH_CONNECT return None # All events go through here def _process_event(self, role: Type[Sentinel], event: Event) -> None: # First, pass the event through the state machine to make sure it # succeeds. old_states = dict(self._cstate.states) if role is CLIENT and type(event) is Request: if event.method == b"CONNECT": self._cstate.process_client_switch_proposal(_SWITCH_CONNECT) if get_comma_header(event.headers, b"upgrade"): self._cstate.process_client_switch_proposal(_SWITCH_UPGRADE) server_switch_event = None if role is SERVER: server_switch_event = self._server_switch_event(event) self._cstate.process_event(role, type(event), server_switch_event) # Then perform the updates triggered by it. if type(event) is Request: self._request_method = event.method if role is self.their_role and type(event) in ( Request, Response, InformationalResponse, ): event = cast(Union[Request, Response, InformationalResponse], event) self.their_http_version = event.http_version # Keep alive handling # # RFC 7230 doesn't really say what one should do if Connection: close # shows up on a 1xx InformationalResponse. I think the idea is that # this is not supposed to happen. In any case, if it does happen, we # ignore it. if type(event) in (Request, Response) and not _keep_alive( cast(Union[Request, Response], event) ): self._cstate.process_keep_alive_disabled() # 100-continue if type(event) is Request and has_expect_100_continue(event): self.client_is_waiting_for_100_continue = True if type(event) in (InformationalResponse, Response): self.client_is_waiting_for_100_continue = False if role is CLIENT and type(event) in (Data, EndOfMessage): self.client_is_waiting_for_100_continue = False self._respond_to_state_changes(old_states, event) def _get_io_object( self, role: Type[Sentinel], event: Optional[Event], io_dict: Union[ReadersType, WritersType], ) -> Optional[Callable[..., Any]]: # event may be None; it's only used when entering SEND_BODY state = self._cstate.states[role] if state is SEND_BODY: # Special case: the io_dict has a dict of reader/writer factories # that depend on the request/response framing. framing_type, args = _body_framing( cast(bytes, self._request_method), cast(Union[Request, Response], event) ) return io_dict[SEND_BODY][framing_type](*args) # type: ignore[index] else: # General case: the io_dict just has the appropriate reader/writer # for this state return io_dict.get((role, state)) # type: ignore[return-value] # This must be called after any action that might have caused # self._cstate.states to change. def _respond_to_state_changes( self, old_states: Dict[Type[Sentinel], Type[Sentinel]], event: Optional[Event] = None, ) -> None: # Update reader/writer if self.our_state != old_states[self.our_role]: self._writer = self._get_io_object(self.our_role, event, WRITERS) if self.their_state != old_states[self.their_role]: self._reader = self._get_io_object(self.their_role, event, READERS) @property def trailing_data(self) -> Tuple[bytes, bool]: """Data that has been received, but not yet processed, represented as a tuple with two elements, where the first is a byte-string containing the unprocessed data itself, and the second is a bool that is True if the receive connection was closed. See :ref:`switching-protocols` for discussion of why you'd want this. """ return (bytes(self._receive_buffer), self._receive_buffer_closed) def receive_data(self, data: bytes) -> None: """Add data to our internal receive buffer. This does not actually do any processing on the data, just stores it. To trigger processing, you have to call :meth:`next_event`. Args: data (:term:`bytes-like object`): The new data that was just received. Special case: If *data* is an empty byte-string like ``b""``, then this indicates that the remote side has closed the connection (end of file). Normally this is convenient, because standard Python APIs like :meth:`file.read` or :meth:`socket.recv` use ``b""`` to indicate end-of-file, while other failures to read are indicated using other mechanisms like raising :exc:`TimeoutError`. When using such an API you can just blindly pass through whatever you get from ``read`` to :meth:`receive_data`, and everything will work. But, if you have an API where reading an empty string is a valid non-EOF condition, then you need to be aware of this and make sure to check for such strings and avoid passing them to :meth:`receive_data`. Returns: Nothing, but after calling this you should call :meth:`next_event` to parse the newly received data. Raises: RuntimeError: Raised if you pass an empty *data*, indicating EOF, and then pass a non-empty *data*, indicating more data that somehow arrived after the EOF. (Calling ``receive_data(b"")`` multiple times is fine, and equivalent to calling it once.) """ if data: if self._receive_buffer_closed: raise RuntimeError("received close, then received more data?") self._receive_buffer += data else: self._receive_buffer_closed = True def _extract_next_receive_event( self, ) -> Union[Event, Type[NEED_DATA], Type[PAUSED]]: state = self.their_state # We don't pause immediately when they enter DONE, because even in # DONE state we can still process a ConnectionClosed() event. But # if we have data in our buffer, then we definitely aren't getting # a ConnectionClosed() immediately and we need to pause. if state is DONE and self._receive_buffer: return PAUSED if state is MIGHT_SWITCH_PROTOCOL or state is SWITCHED_PROTOCOL: return PAUSED assert self._reader is not None event = self._reader(self._receive_buffer) if event is None: if not self._receive_buffer and self._receive_buffer_closed: # In some unusual cases (basically just HTTP/1.0 bodies), EOF # triggers an actual protocol event; in that case, we want to # return that event, and then the state will change and we'll # get called again to generate the actual ConnectionClosed(). if hasattr(self._reader, "read_eof"): event = self._reader.read_eof() # type: ignore[attr-defined] else: event = ConnectionClosed() if event is None: event = NEED_DATA return event # type: ignore[no-any-return] def next_event(self) -> Union[Event, Type[NEED_DATA], Type[PAUSED]]: """Parse the next event out of our receive buffer, update our internal state, and return it. This is a mutating operation -- think of it like calling :func:`next` on an iterator. Returns: : One of three things: 1) An event object -- see :ref:`events`. 2) The special constant :data:`NEED_DATA`, which indicates that you need to read more data from your socket and pass it to :meth:`receive_data` before this method will be able to return any more events. 3) The special constant :data:`PAUSED`, which indicates that we are not in a state where we can process incoming data (usually because the peer has finished their part of the current request/response cycle, and you have not yet called :meth:`start_next_cycle`). See :ref:`flow-control` for details. Raises: RemoteProtocolError: The peer has misbehaved. You should close the connection (possibly after sending some kind of 4xx response). Once this method returns :class:`ConnectionClosed` once, then all subsequent calls will also return :class:`ConnectionClosed`. If this method raises any exception besides :exc:`RemoteProtocolError` then that's a bug -- if it happens please file a bug report! If this method raises any exception then it also sets :attr:`Connection.their_state` to :data:`ERROR` -- see :ref:`error-handling` for discussion. """ if self.their_state is ERROR:
raise RemoteProtocolError("Can't receive data when peer state is ERROR")
23
2023-10-23 18:09:28+00:00
12k
f0uriest/quadax
tests/test_adaptive.py
[ { "identifier": "romberg", "path": "quadax/romberg.py", "snippet": "def romberg(\n fun,\n interval,\n args=(),\n full_output=False,\n epsabs=1.4e-8,\n epsrel=1.4e-8,\n divmax=20,\n norm=jnp.inf,\n):\n \"\"\"Romberg integration of a callable function or method.\n\n Returns the integral of `fun` (a function of one variable) over `interval`.\n\n Good for non-smooth or piecewise smooth integrands.\n\n Not recommended for infinite intervals, or functions with singularities.\n\n Parameters\n ----------\n fun : callable\n Function to integrate, should have a signature of the form\n ``fun(x, *args)`` -> float, Array. Should be JAX transformable.\n interval : array-like\n Lower and upper limits of integration. Use np.inf to denote infinite intervals.\n args : tuple\n additional arguments passed to fun\n full_output : bool, optional\n If True, return the full state of the integrator. See below for more\n information.\n epsabs, epsrel : float\n Absolute and relative tolerances. If I1 and I2 are two\n successive approximations to the integral, algorithm terminates\n when abs(I1-I2) < max(epsabs, epsrel*|I2|)\n divmax : int, optional\n Maximum order of extrapolation. Default is 20.\n Total number of function evaluations will be at\n most 2**divmax + 1\n norm : int, callable\n Norm to use for measuring error for vector valued integrands. No effect if the\n integrand is scalar valued. If an int, uses p-norm of the given order, otherwise\n should be callable.\n\n Returns\n -------\n y : float, Array\n Approximation to the integral\n info : QuadratureInfo\n Named tuple with the following fields:\n\n * err : (float) Estimate of the error in the approximation.\n * neval : (int) Total number of function evaluations.\n * status : (int) Flag indicating reason for termination. status of 0 means\n normal termination, any other value indicates a possible error. A human\n readable message can be obtained by ``print(quadax.STATUS[status])``\n * info : (dict or None) Other information returned by the algorithm.\n Only present if ``full_output`` is True. Contains the following:\n\n * table : (ndarray, size(dixmax+1, divmax+1, ...)) Estimate of the integral\n from each level of discretization and each step of extrapolation.\n\n Notes\n -----\n Due to limitations on dynamically sized arrays in JAX, this algorithm is fully\n sequential and does not vectorize integrand evaluations, so may not be the most\n efficient on GPU/TPU.\n\n Also, it is currently only forward mode differentiable.\n\n \"\"\"\n errorif(\n len(interval) != 2,\n NotImplementedError,\n \"Romberg integration with breakpoints not supported\",\n )\n _norm = norm if callable(norm) else lambda x: jnp.linalg.norm(x.flatten(), ord=norm)\n # map a, b -> [-1, 1]\n fun, interval = map_interval(fun, interval)\n vfunc = wrap_func(fun, args)\n a, b = interval\n f = jax.eval_shape(vfunc, (a + b / 2))\n\n result = jnp.zeros((divmax + 1, divmax + 1, *f.shape), f.dtype)\n result = result.at[0, 0].set(vfunc(a) + vfunc(b))\n neval = 2\n err = jnp.inf\n state = (result, 1, neval, err)\n\n def ncond(state):\n result, n, neval, err = state\n return (n < divmax + 1) & (\n err > jnp.maximum(epsabs, epsrel * _norm(result[n, n]))\n )\n\n def nloop(state):\n # loop over outer number of subdivisions\n result, n, neval, err = state\n h = (b - a) / 2**n\n s = jnp.zeros(f.shape, f.dtype)\n\n def sloop(i, s):\n # loop to evaluate fun. Can't be vectorized due to different number\n # of evals per nloop step\n s += vfunc(a + h * (2 * i - 1))\n return s\n\n result = result.at[n, 0].set(\n 0.5 * result[n - 1, 0]\n + h * jax.lax.fori_loop(1, (2**n) // 2 + 1, sloop, s)\n )\n neval += (2**n) // 2\n\n def mloop(m, result):\n # richardson extrapolation\n temp = 1 / (4.0**m - 1.0) * (result[n, m - 1] - result[n - 1, m - 1])\n result = result.at[n, m].set(result[n, m - 1] + temp)\n return result\n\n result = jax.lax.fori_loop(1, n + 1, mloop, result)\n err = _norm(result[n, n] - result[n - 1, n - 1])\n return result, n + 1, neval, err\n\n result, n, neval, err = bounded_while_loop(ncond, nloop, state, divmax + 1)\n\n y = result[n - 1, n - 1]\n status = 2 * (err > jnp.maximum(epsabs, epsrel * _norm(y)))\n info = result if full_output else None\n out = QuadratureInfo(err, neval, status, info)\n return y, out" }, { "identifier": "rombergts", "path": "quadax/romberg.py", "snippet": "def rombergts(\n fun,\n interval,\n args=(),\n full_output=False,\n epsabs=1.4e-8,\n epsrel=1.4e-8,\n divmax=20,\n norm=jnp.inf,\n):\n \"\"\"Romberg integration with tanh-sinh (aka double exponential) transformation.\n\n Returns the integral of `fun` (a function of one variable) over `interval`.\n\n Performs well for functions with singularities at the endpoints or integration\n over infinite intervals. May be slightly less efficient than ``quadgk`` or\n ``quadcc`` for smooth integrands.\n\n Parameters\n ----------\n fun : callable\n Function to integrate, should have a signature of the form\n ``fun(x, *args)`` -> float, Array. Should be JAX transformable.\n interval : array-like\n Lower and upper limits of integration. Use np.inf to denote infinite intervals.\n args : tuple\n additional arguments passed to fun\n full_output : bool, optional\n If True, return the full state of the integrator. See below for more\n information.\n epsabs, epsrel : float\n Absolute and relative tolerances. If I1 and I2 are two\n successive approximations to the integral, algorithm terminates\n when abs(I1-I2) < max(epsabs, epsrel*|I2|)\n divmax : int, optional\n Maximum order of extrapolation. Default is 20.\n Total number of function evaluations will be at\n most 2**divmax + 1\n norm : int, callable\n Norm to use for measuring error for vector valued integrands. No effect if the\n integrand is scalar valued. If an int, uses p-norm of the given order, otherwise\n should be callable.\n\n\n Returns\n -------\n y : float, Array\n Approximation to the integral\n info : QuadratureInfo\n Named tuple with the following fields:\n\n * err : (float) Estimate of the error in the approximation.\n * neval : (int) Total number of function evaluations.\n * status : (int) Flag indicating reason for termination. status of 0 means\n normal termination, any other value indicates a possible error. A human\n readable message can be obtained by ``print(quadax.STATUS[status])``\n * info : (dict or None) Other information returned by the algorithm.\n Only present if ``full_output`` is True. Contains the following:\n\n * table : (ndarray, size(dixmax+1, divmax+1, ...)) Estimate of the integral\n from each level of discretization and each step of extrapolation.\n\n Notes\n -----\n Due to limitations on dynamically sized arrays in JAX, this algorithm is fully\n sequential and does not vectorize integrand evaluations, so may not be the most\n efficient on GPU/TPU.\n\n Also, it is currently only forward mode differentiable.\n\n \"\"\"\n fun, interval = tanhsinh_transform(fun, interval)\n return romberg(fun, interval, args, full_output, epsabs, epsrel, divmax, norm)" }, { "identifier": "quadcc", "path": "quadax/adaptive.py", "snippet": "def quadcc(\n fun,\n interval,\n args=(),\n full_output=False,\n epsabs=1.4e-8,\n epsrel=1.4e-8,\n max_ninter=50,\n order=32,\n norm=jnp.inf,\n):\n \"\"\"Global adaptive quadrature using Clenshaw-Curtis rule.\n\n Integrate fun from `interval[0]` to `interval[-1]` using a h-adaptive scheme with\n error estimate. Breakpoints can be specified in `interval` where integration\n difficulty may occur.\n\n A good general purpose integrator for most reasonably well behaved functions over\n finite or infinite intervals.\n\n Parameters\n ----------\n fun : callable\n Function to integrate, should have a signature of the form\n ``fun(x, *args)`` -> float, Array. Should be JAX transformable.\n interval : array-like\n Lower and upper limits of integration with possible breakpoints. Use np.inf to\n denote infinite intervals.\n args : tuple, optional\n Extra arguments passed to fun.\n full_output : bool, optional\n If True, return the full state of the integrator. See below for more\n information.\n epsabs, epsrel : float, optional\n Absolute and relative error tolerance. Default is 1.4e-8. Algorithm tries to\n obtain an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``\n where ``i`` = integral of `fun` over `interval`, and ``result`` is the\n numerical approximation.\n max_ninter : int, optional\n An upper bound on the number of sub-intervals used in the adaptive\n algorithm.\n order : {8, 16, 32, 64, 128, 256}\n Order of local integration rule.\n norm : int, callable\n Norm to use for measuring error for vector valued integrands. No effect if the\n integrand is scalar valued. If an int, uses p-norm of the given order, otherwise\n should be callable.\n\n Returns\n -------\n y : float, Array\n The integral of fun from `a` to `b`.\n info : QuadratureInfo\n Named tuple with the following fields:\n\n * err : (float) Estimate of the error in the approximation.\n * neval : (int) Total number of function evaluations.\n * status : (int) Flag indicating reason for termination. status of 0 means\n normal termination, any other value indicates a possible error. A human\n readable message can be obtained by ``print(quadax.STATUS[status])``\n * info : (dict or None) Other information returned by the algorithm.\n Only present if ``full_output`` is True. Contains the following:\n\n * 'ninter' : (int) The number, K, of sub-intervals produced in the\n subdivision process.\n * 'a_arr' : (ndarray) rank-1 array of length max_ninter, the first K\n elements of which are the left end points of the (remapped) sub-intervals\n in the partition of the integration range.\n * 'b_arr' : (ndarray) rank-1 array of length max_ninter, the first K\n elements of which are the right end points of the (remapped) sub-intervals.\n * 'r_arr' : (ndarray) rank-1 array of length max_ninter, the first K\n elements of which are the integral approximations on the sub-intervals.\n * 'e_arr' : (ndarray) rank-1 array of length max_ninter, the first K\n elements of which are the moduli of the absolute error estimates on the\n sub-intervals.\n\n Notes\n -----\n Adaptive algorithms are inherently somewhat sequential, so perfect parallelism\n is generally not achievable. The local quadrature rule vmaps integrand evaluation at\n ``order`` points, so using higher order methods will generally be more efficient on\n GPU/TPU.\n\n \"\"\"\n y, info = adaptive_quadrature(\n fixed_quadcc,\n fun,\n interval,\n args,\n full_output,\n epsabs,\n epsrel,\n max_ninter,\n n=order,\n norm=norm,\n )\n info = QuadratureInfo(info.err, info.neval * order, info.status, info.info)\n return y, info" }, { "identifier": "quadgk", "path": "quadax/adaptive.py", "snippet": "def quadgk(\n fun,\n interval,\n args=(),\n full_output=False,\n epsabs=1.4e-8,\n epsrel=1.4e-8,\n max_ninter=50,\n order=21,\n norm=jnp.inf,\n):\n \"\"\"Global adaptive quadrature using Gauss-Konrod rule.\n\n Integrate fun from `interval[0]` to `interval[-1]` using a h-adaptive scheme with\n error estimate. Breakpoints can be specified in `interval` where integration\n difficulty may occur.\n\n Basically the same as ``scipy.integrate.quad`` but without extrapolation. A good\n general purpose integrator for most reasonably well behaved functions over finite\n or infinite intervals.\n\n Parameters\n ----------\n fun : callable\n Function to integrate, should have a signature of the form\n ``fun(x, *args)`` -> float, Array. Should be JAX transformable.\n interval : array-like\n Lower and upper limits of integration with possible breakpoints. Use np.inf to\n denote infinite intervals.\n args : tuple, optional\n Extra arguments passed to fun.\n full_output : bool, optional\n If True, return the full state of the integrator. See below for more\n information.\n epsabs, epsrel : float, optional\n Absolute and relative error tolerance. Default is 1.4e-8. Algorithm tries to\n obtain an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``\n where ``i`` = integral of `fun` over `interval`, and ``result`` is the\n numerical approximation.\n max_ninter : int, optional\n An upper bound on the number of sub-intervals used in the adaptive\n algorithm.\n order : {15, 21, 31, 41, 51, 61}\n Order of local integration rule.\n norm : int, callable\n Norm to use for measuring error for vector valued integrands. No effect if the\n integrand is scalar valued. If an int, uses p-norm of the given order, otherwise\n should be callable.\n\n Returns\n -------\n y : float, Array\n The integral of fun from `a` to `b`.\n info : QuadratureInfo\n Named tuple with the following fields:\n\n * err : (float) Estimate of the error in the approximation.\n * neval : (int) Total number of function evaluations.\n * status : (int) Flag indicating reason for termination. status of 0 means\n normal termination, any other value indicates a possible error. A human\n readable message can be obtained by ``print(quadax.STATUS[status])``\n * info : (dict or None) Other information returned by the algorithm.\n Only present if ``full_output`` is True. Contains the following:\n\n * 'ninter' : (int) The number, K, of sub-intervals produced in the\n subdivision process.\n * 'a_arr' : (ndarray) rank-1 array of length max_ninter, the first K\n elements of which are the left end points of the (remapped) sub-intervals\n in the partition of the integration range.\n * 'b_arr' : (ndarray) rank-1 array of length max_ninter, the first K\n elements of which are the right end points of the (remapped) sub-intervals.\n * 'r_arr' : (ndarray) rank-1 array of length max_ninter, the first K\n elements of which are the integral approximations on the sub-intervals.\n * 'e_arr' : (ndarray) rank-1 array of length max_ninter, the first K\n elements of which are the moduli of the absolute error estimates on the\n sub-intervals.\n\n Notes\n -----\n Adaptive algorithms are inherently somewhat sequential, so perfect parallelism\n is generally not achievable. The local quadrature rule vmaps integrand evaluation at\n ``order`` points, so using higher order methods will generally be more efficient on\n GPU/TPU.\n\n \"\"\"\n y, info = adaptive_quadrature(\n fixed_quadgk,\n fun,\n interval,\n args,\n full_output,\n epsabs,\n epsrel,\n max_ninter,\n n=order,\n norm=norm,\n )\n info = QuadratureInfo(info.err, info.neval * order, info.status, info.info)\n return y, info" }, { "identifier": "quadts", "path": "quadax/adaptive.py", "snippet": "def quadts(\n fun,\n interval,\n args=(),\n full_output=False,\n epsabs=1.4e-8,\n epsrel=1.4e-8,\n max_ninter=50,\n order=61,\n norm=jnp.inf,\n):\n \"\"\"Global adaptive quadrature using trapezoidal tanh-sinh rule.\n\n Integrate fun from `interval[0]` to `interval[-1]` using a h-adaptive scheme with\n error estimate. Breakpoints can be specified in `interval` where integration\n difficulty may occur.\n\n Especially good for integrands with singular behavior at an endpoint.\n\n Parameters\n ----------\n fun : callable\n Function to integrate, should have a signature of the form\n ``fun(x, *args)`` -> float, Array. Should be JAX transformable.\n interval : array-like\n Lower and upper limits of integration with possible breakpoints. Use np.inf to\n denote infinite intervals.\n args : tuple, optional\n Extra arguments passed to fun.\n full_output : bool, optional\n If True, return the full state of the integrator. See below for more\n information.\n epsabs, epsrel : float, optional\n Absolute and relative error tolerance. Default is 1.4e-8. Algorithm tries to\n obtain an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``\n where ``i`` = integral of `fun` over `interval`, and ``result`` is the\n numerical approximation.\n max_ninter : int, optional\n An upper bound on the number of sub-intervals used in the adaptive\n algorithm.\n order : {41, 61, 81, 101}\n Order of local integration rule.\n norm : int, callable\n Norm to use for measuring error for vector valued integrands. No effect if the\n integrand is scalar valued. If an int, uses p-norm of the given order, otherwise\n should be callable.\n\n Returns\n -------\n y : float, Array\n The integral of fun from `a` to `b`.\n info : QuadratureInfo\n Named tuple with the following fields:\n\n * err : (float) Estimate of the error in the approximation.\n * neval : (int) Total number of function evaluations.\n * status : (int) Flag indicating reason for termination. status of 0 means\n normal termination, any other value indicates a possible error. A human\n readable message can be obtained by ``print(quadax.STATUS[status])``\n * info : (dict or None) Other information returned by the algorithm.\n Only present if ``full_output`` is True. Contains the following:\n\n * 'ninter' : (int) The number, K, of sub-intervals produced in the\n subdivision process.\n * 'a_arr' : (ndarray) rank-1 array of length max_ninter, the first K\n elements of which are the left end points of the (remapped) sub-intervals\n in the partition of the integration range.\n * 'b_arr' : (ndarray) rank-1 array of length max_ninter, the first K\n elements of which are the right end points of the (remapped) sub-intervals.\n * 'r_arr' : (ndarray) rank-1 array of length max_ninter, the first K\n elements of which are the integral approximations on the sub-intervals.\n * 'e_arr' : (ndarray) rank-1 array of length max_ninter, the first K\n elements of which are the moduli of the absolute error estimates on the\n sub-intervals.\n\n Notes\n -----\n Adaptive algorithms are inherently somewhat sequential, so perfect parallelism\n is generally not achievable. The local quadrature rule vmaps integrand evaluation at\n ``order`` points, so using higher order methods will generally be more efficient on\n GPU/TPU.\n\n \"\"\"\n y, info = adaptive_quadrature(\n fixed_quadts,\n fun,\n interval,\n args,\n full_output,\n epsabs,\n epsrel,\n max_ninter,\n n=order,\n norm=norm,\n )\n info = QuadratureInfo(info.err, info.neval * order, info.status, info.info)\n return y, info" } ]
import jax.numpy as jnp import numpy as np import pytest import scipy from jax.config import config as jax_config from quadax import quadcc, quadgk, quadts, romberg, rombergts
8,290
class TestRombergTS: """Tests for tanh-sinh quadrature with adaptive refinement.""" def _base(self, i, tol, fudge=1, **kwargs): prob = example_problems[i] y, info = rombergts( prob["fun"], prob["interval"], epsabs=tol, epsrel=tol, **kwargs ) if info.status == 0: assert info.err < max(tol, tol * np.max(np.abs(y))) np.testing.assert_allclose( y, prob["val"], rtol=fudge * tol, atol=fudge * tol, err_msg=f"problem {i}, tol={tol}", ) def test_prob0(self): """Test for example problem #0.""" self._base(0, 1e-4) self._base(0, 1e-8) self._base(0, 1e-12) def test_prob1(self): """Test for example problem #1.""" self._base(1, 1e-4) self._base(1, 1e-8) self._base(1, 1e-12) def test_prob2(self): """Test for example problem #2.""" self._base(2, 1e-4) self._base(2, 1e-8) self._base(2, 1e-12) def test_prob3(self): """Test for example problem #3.""" self._base(3, 1e-4) self._base(3, 1e-8) self._base(3, 1e-12) def test_prob4(self): """Test for example problem #4.""" self._base(4, 1e-4) self._base(4, 1e-8) self._base(4, 1e-12) def test_prob5(self): """Test for example problem #5.""" self._base(5, 1e-4) self._base(5, 1e-8) self._base(5, 1e-12) def test_prob6(self): """Test for example problem #6.""" self._base(6, 1e-4) self._base(6, 1e-8, fudge=10) self._base(6, 1e-12, divmax=22, fudge=1e5) def test_prob7(self): """Test for example problem #7.""" self._base(7, 1e-4) self._base(7, 1e-8) self._base(7, 1e-12) def test_prob8(self): """Test for example problem #8.""" self._base(8, 1e-4) self._base(8, 1e-8) self._base(8, 1e-12) def test_prob9(self): """Test for example problem #9.""" self._base(9, 1e-4) self._base(9, 1e-8, fudge=10) self._base(9, 1e-12, fudge=1e5) def test_prob10(self): """Test for example problem #10.""" self._base(10, 1e-4) self._base(10, 1e-8) self._base(10, 1e-12) def test_prob11(self): """Test for example problem #11.""" self._base(11, 1e-4) self._base(11, 1e-8, fudge=10) self._base(11, 1e-12, fudge=1e5) def test_prob12(self): """Test for example problem #12.""" self._base(12, 1e-4) self._base(12, 1e-8) self._base(12, 1e-12) def test_prob13(self): """Test for example problem #13.""" self._base(13, 1e-4) self._base(13, 1e-8) self._base(13, 1e-12) def test_prob14(self): """Test for example problem #14.""" self._base(14, 1e-4) self._base(14, 1e-8) self._base(14, 1e-12) def test_prob15(self): """Test for example problem #15.""" self._base(14, 1e-4) self._base(14, 1e-8) self._base(14, 1e-12) class TestRomberg: """Tests for Romberg's method (only for well behaved integrands).""" def _base(self, i, tol, fudge=1, **kwargs): prob = example_problems[i]
"""Tests for adaptive quadrature routines.""" jax_config.update("jax_enable_x64", True) example_problems = [ # problem 0 {"fun": lambda t: t * jnp.log(1 + t), "interval": [0, 1], "val": 1 / 4}, # problem 1 { "fun": lambda t: t**2 * jnp.arctan(t), "interval": [0, 1], "val": (jnp.pi - 2 + 2 * jnp.log(2)) / 12, }, # problem 2 { "fun": lambda t: jnp.exp(t) * jnp.cos(t), "interval": [0, jnp.pi / 2], "val": (jnp.exp(jnp.pi / 2) - 1) / 2, }, # problem 3 { "fun": lambda t: jnp.arctan(jnp.sqrt(2 + t**2)) / ((1 + t**2) * jnp.sqrt(2 + t**2)), "interval": [0, 1], "val": 5 * jnp.pi**2 / 96, }, # problem 4 {"fun": lambda t: jnp.sqrt(t) * jnp.log(t), "interval": [0, 1], "val": -4 / 9}, # problem 5 {"fun": lambda t: jnp.sqrt(1 - t**2), "interval": [0, 1], "val": jnp.pi / 4}, # problem 6 { "fun": lambda t: jnp.sqrt(t) / jnp.sqrt(1 - t**2), "interval": [0, 1], "val": 2 * jnp.sqrt(jnp.pi) * scipy.special.gamma(3 / 4) / scipy.special.gamma(1 / 4), }, # problem 7 {"fun": lambda t: jnp.log(t) ** 2, "interval": [0, 1], "val": 2}, # problem 8 { "fun": lambda t: jnp.log(jnp.cos(t)), "interval": [0, jnp.pi / 2], "val": -jnp.pi * jnp.log(2) / 2, }, # problem 9 { "fun": lambda t: jnp.sqrt(jnp.tan(t)), "interval": [0, jnp.pi / 2], "val": jnp.pi * jnp.sqrt(2) / 2, }, # problem 10 {"fun": lambda t: 1 / (1 + t**2), "interval": [0, jnp.inf], "val": jnp.pi / 2}, # problem 11 { "fun": lambda t: jnp.exp(-t) / jnp.sqrt(t), "interval": [0, jnp.inf], "val": jnp.sqrt(jnp.pi), }, # problem 12 { "fun": lambda t: jnp.exp(-(t**2) / 2), "interval": [-jnp.inf, jnp.inf], "val": jnp.sqrt(2 * jnp.pi), }, # problem 13 {"fun": lambda t: jnp.exp(-t) * jnp.cos(t), "interval": [0, jnp.inf], "val": 1 / 2}, # problem 14 - vector valued integrand made of up problems 0 and 1 { "fun": lambda t: jnp.array([t * jnp.log(1 + t), t**2 * jnp.arctan(t)]), "interval": [0, 1], "val": jnp.array([1 / 4, (jnp.pi - 2 + 2 * jnp.log(2)) / 12]), }, # problem 15 - intergral with breakpoints { "fun": lambda t: jnp.log((t - 1) ** 2), "interval": [0, 1, 2], "val": -4, }, ] class TestQuadGK: """Tests for Gauss-Konrod quadrature.""" def _base(self, i, tol, fudge=1, **kwargs): prob = example_problems[i] status = kwargs.pop("status", 0) y, info = quadgk( prob["fun"], prob["interval"], epsabs=tol, epsrel=tol, **kwargs, ) assert info.status == status if status == 0: assert info.err < max(tol, tol * np.max(np.abs(y))) np.testing.assert_allclose( y, prob["val"], rtol=fudge * tol, atol=fudge * tol, err_msg=f"problem {i}, tol={tol}", ) def test_prob0(self): """Test for example problem #0.""" self._base(0, 1e-4, order=21) self._base(0, 1e-8, order=21) self._base(0, 1e-12, order=21) def test_prob1(self): """Test for example problem #1.""" self._base(1, 1e-4, order=31) self._base(1, 1e-8, order=31) self._base(1, 1e-12, order=31) def test_prob2(self): """Test for example problem #2.""" self._base(2, 1e-4, order=41) self._base(2, 1e-8, order=41) self._base(2, 1e-12, order=41) def test_prob3(self): """Test for example problem #3.""" self._base(3, 1e-4, order=51) self._base(3, 1e-8, order=51) self._base(3, 1e-12, order=51) def test_prob4(self): """Test for example problem #4.""" self._base(4, 1e-4, order=61) self._base(4, 1e-8, order=61) self._base(4, 1e-12, order=61) def test_prob5(self): """Test for example problem #5.""" self._base(5, 1e-4, order=21) self._base(5, 1e-8, order=21) self._base(5, 1e-12, order=21) def test_prob6(self): """Test for example problem #6.""" self._base(6, 1e-4, order=15) self._base(6, 1e-8, 100, order=15) self._base(6, 1e-12, 1e5, order=15, max_ninter=100, status=8) def test_prob7(self): """Test for example problem #7.""" self._base(7, 1e-4, order=61) self._base(7, 1e-8, order=61) self._base(7, 1e-12, order=61, status=4) def test_prob8(self): """Test for example problem #8.""" self._base(8, 1e-4, order=51) self._base(8, 1e-8, order=51) self._base(8, 1e-12, order=51, status=4) def test_prob9(self): """Test for example problem #9.""" self._base(9, 1e-4, order=15) self._base(9, 1e-8, 100, order=15) self._base(9, 1e-12, 1e4, order=15, max_ninter=100, status=8) def test_prob10(self): """Test for example problem #10.""" self._base(10, 1e-4, order=15) self._base(10, 1e-8, order=15) self._base(10, 1e-12, order=15) def test_prob11(self): """Test for example problem #11.""" self._base(11, 1e-4, order=21) self._base(11, 1e-8, 100, order=21) self._base(11, 1e-12, 1e4, order=21, status=8, max_ninter=100) def test_prob12(self): """Test for example problem #12.""" self._base(12, 1e-4, order=15) self._base(12, 1e-8, order=15) self._base(12, 1e-12, order=15) def test_prob13(self): """Test for example problem #13.""" self._base(13, 1e-4, order=31) self._base(13, 1e-8, order=31) self._base(13, 1e-12, order=31) def test_prob14(self): """Test for example problem #14.""" self._base(14, 1e-4) self._base(14, 1e-8) self._base(14, 1e-12) def test_prob15(self): """Test for example problem #15.""" self._base(14, 1e-4) self._base(14, 1e-8) self._base(14, 1e-12) class TestQuadCC: """Tests for Clenshaw-Curtis quadrature.""" def _base(self, i, tol, fudge=1, **kwargs): prob = example_problems[i] status = kwargs.pop("status", 0) y, info = quadcc( prob["fun"], prob["interval"], epsabs=tol, epsrel=tol, **kwargs, ) assert info.status == status if status == 0: assert info.err < max(tol, tol * np.max(np.abs(y))) np.testing.assert_allclose( y, prob["val"], rtol=fudge * tol, atol=fudge * tol, err_msg=f"problem {i}, tol={tol}", ) def test_prob0(self): """Test for example problem #0.""" self._base(0, 1e-4, order=32) self._base(0, 1e-8, order=32) self._base(0, 1e-12, order=32) def test_prob1(self): """Test for example problem #1.""" self._base(1, 1e-4, order=64) self._base(1, 1e-8, order=64) self._base(1, 1e-12, order=64) def test_prob2(self): """Test for example problem #2.""" self._base(2, 1e-4, order=128) self._base(2, 1e-8, order=128) self._base(2, 1e-12, order=128) def test_prob3(self): """Test for example problem #3.""" self._base(3, 1e-4, order=256) self._base(3, 1e-8, order=256) self._base(3, 1e-12, order=256) def test_prob4(self): """Test for example problem #4.""" self._base(4, 1e-4, order=8) self._base(4, 1e-8, order=8) self._base(4, 1e-12, order=8, max_ninter=100) def test_prob5(self): """Test for example problem #5.""" self._base(5, 1e-4, order=16) self._base(5, 1e-8, order=16) self._base(5, 1e-12, order=16) def test_prob6(self): """Test for example problem #6.""" self._base(6, 1e-4) self._base(6, 1e-8, 100) self._base(6, 1e-12, 1e5, max_ninter=100, status=8) def test_prob7(self): """Test for example problem #7.""" self._base(7, 1e-4) self._base(7, 1e-8, 10) self._base(7, 1e-12, status=8) def test_prob8(self): """Test for example problem #8.""" self._base(8, 1e-4) self._base(8, 1e-8) self._base(8, 1e-12, status=8) def test_prob9(self): """Test for example problem #9.""" self._base(9, 1e-4) self._base(9, 1e-8, max_ninter=100, status=8) self._base(9, 1e-12, 1e4, max_ninter=100, status=8) def test_prob10(self): """Test for example problem #10.""" self._base(10, 1e-4) self._base(10, 1e-8) self._base(10, 1e-12, 10) def test_prob11(self): """Test for example problem #11.""" self._base(11, 1e-4) self._base(11, 1e-8, 100) self._base(11, 1e-12, 1e4, status=8) def test_prob12(self): """Test for example problem #12.""" self._base(12, 1e-4) self._base(12, 1e-8) self._base(12, 1e-12) def test_prob13(self): """Test for example problem #13.""" self._base(13, 1e-4) self._base(13, 1e-8) self._base(13, 1e-12) def test_prob14(self): """Test for example problem #14.""" self._base(14, 1e-4) self._base(14, 1e-8) self._base(14, 1e-12) def test_prob15(self): """Test for example problem #15.""" self._base(14, 1e-4) self._base(14, 1e-8) self._base(14, 1e-12) class TestQuadTS: """Tests for adaptive tanh-sinh quadrature.""" def _base(self, i, tol, fudge=1, **kwargs): prob = example_problems[i] status = kwargs.pop("status", 0) y, info = quadts( prob["fun"], prob["interval"], epsabs=tol, epsrel=tol, **kwargs, ) assert info.status == status if status == 0: assert info.err < max(tol, tol * np.max(np.abs(y))) np.testing.assert_allclose( y, prob["val"], rtol=fudge * tol, atol=fudge * tol, err_msg=f"problem {i}, tol={tol}", ) def test_prob0(self): """Test for example problem #0.""" self._base(0, 1e-4) self._base(0, 1e-8) self._base(0, 1e-12) def test_prob1(self): """Test for example problem #1.""" self._base(1, 1e-4) self._base(1, 1e-8) self._base(1, 1e-12) def test_prob2(self): """Test for example problem #2.""" self._base(2, 1e-4, order=41) self._base(2, 1e-8, order=41) self._base(2, 1e-12, order=41) def test_prob3(self): """Test for example problem #3.""" self._base(3, 1e-4, order=61) self._base(3, 1e-8, order=61) self._base(3, 1e-12, order=61) def test_prob4(self): """Test for example problem #4.""" self._base(4, 1e-4, order=81) self._base(4, 1e-8, order=81) self._base(4, 1e-12, order=81) def test_prob5(self): """Test for example problem #5.""" self._base(5, 1e-4, order=101) self._base(5, 1e-8, order=101) self._base(5, 1e-12, order=101) def test_prob6(self): """Test for example problem #6.""" self._base(6, 1e-4) self._base(6, 1e-8) self._base(6, 1e-12, 1e4) def test_prob7(self): """Test for example problem #7.""" self._base(7, 1e-4) self._base(7, 1e-8) self._base(7, 1e-12) def test_prob8(self): """Test for example problem #8.""" self._base(8, 1e-4) self._base(8, 1e-8) self._base(8, 1e-12) def test_prob9(self): """Test for example problem #9.""" self._base(9, 1e-4) self._base(9, 1e-8, 10) self._base(9, 1e-12, 1e4) def test_prob10(self): """Test for example problem #10.""" self._base(10, 1e-4) self._base(10, 1e-8) self._base(10, 1e-12) def test_prob11(self): """Test for example problem #11.""" self._base(11, 1e-4) self._base(11, 1e-8) self._base(11, 1e-12, 1e4) def test_prob12(self): """Test for example problem #12.""" self._base(12, 1e-4) self._base(12, 1e-8) self._base(12, 1e-12) def test_prob13(self): """Test for example problem #13.""" self._base(13, 1e-4) self._base(13, 1e-8) self._base(13, 1e-12) def test_prob14(self): """Test for example problem #14.""" self._base(14, 1e-4) self._base(14, 1e-8) self._base(14, 1e-12) def test_prob15(self): """Test for example problem #15.""" self._base(14, 1e-4) self._base(14, 1e-8) self._base(14, 1e-12) class TestRombergTS: """Tests for tanh-sinh quadrature with adaptive refinement.""" def _base(self, i, tol, fudge=1, **kwargs): prob = example_problems[i] y, info = rombergts( prob["fun"], prob["interval"], epsabs=tol, epsrel=tol, **kwargs ) if info.status == 0: assert info.err < max(tol, tol * np.max(np.abs(y))) np.testing.assert_allclose( y, prob["val"], rtol=fudge * tol, atol=fudge * tol, err_msg=f"problem {i}, tol={tol}", ) def test_prob0(self): """Test for example problem #0.""" self._base(0, 1e-4) self._base(0, 1e-8) self._base(0, 1e-12) def test_prob1(self): """Test for example problem #1.""" self._base(1, 1e-4) self._base(1, 1e-8) self._base(1, 1e-12) def test_prob2(self): """Test for example problem #2.""" self._base(2, 1e-4) self._base(2, 1e-8) self._base(2, 1e-12) def test_prob3(self): """Test for example problem #3.""" self._base(3, 1e-4) self._base(3, 1e-8) self._base(3, 1e-12) def test_prob4(self): """Test for example problem #4.""" self._base(4, 1e-4) self._base(4, 1e-8) self._base(4, 1e-12) def test_prob5(self): """Test for example problem #5.""" self._base(5, 1e-4) self._base(5, 1e-8) self._base(5, 1e-12) def test_prob6(self): """Test for example problem #6.""" self._base(6, 1e-4) self._base(6, 1e-8, fudge=10) self._base(6, 1e-12, divmax=22, fudge=1e5) def test_prob7(self): """Test for example problem #7.""" self._base(7, 1e-4) self._base(7, 1e-8) self._base(7, 1e-12) def test_prob8(self): """Test for example problem #8.""" self._base(8, 1e-4) self._base(8, 1e-8) self._base(8, 1e-12) def test_prob9(self): """Test for example problem #9.""" self._base(9, 1e-4) self._base(9, 1e-8, fudge=10) self._base(9, 1e-12, fudge=1e5) def test_prob10(self): """Test for example problem #10.""" self._base(10, 1e-4) self._base(10, 1e-8) self._base(10, 1e-12) def test_prob11(self): """Test for example problem #11.""" self._base(11, 1e-4) self._base(11, 1e-8, fudge=10) self._base(11, 1e-12, fudge=1e5) def test_prob12(self): """Test for example problem #12.""" self._base(12, 1e-4) self._base(12, 1e-8) self._base(12, 1e-12) def test_prob13(self): """Test for example problem #13.""" self._base(13, 1e-4) self._base(13, 1e-8) self._base(13, 1e-12) def test_prob14(self): """Test for example problem #14.""" self._base(14, 1e-4) self._base(14, 1e-8) self._base(14, 1e-12) def test_prob15(self): """Test for example problem #15.""" self._base(14, 1e-4) self._base(14, 1e-8) self._base(14, 1e-12) class TestRomberg: """Tests for Romberg's method (only for well behaved integrands).""" def _base(self, i, tol, fudge=1, **kwargs): prob = example_problems[i]
y, info = romberg(
0
2023-10-24 04:44:34+00:00
12k
zju3dv/nr_in_a_room
models_neurecon/neus_multi_rendering.py
[ { "identifier": "NeuS", "path": "models_neurecon/neus.py", "snippet": "class NeuS(nn.Module):\n def __init__(\n self,\n variance_init=0.05,\n speed_factor=1.0,\n input_ch=3,\n input_obj_ch=0,\n input_light_ch=0,\n input_appearance_ch=0,\n W_geo_feat=-1,\n use_outside_nerf=False,\n obj_bounding_radius=1.0,\n surface_cfg=dict(),\n radiance_cfg=dict(),\n ):\n super().__init__()\n\n self.ln_s = nn.Parameter(\n data=torch.Tensor([-np.log(variance_init) / speed_factor]),\n requires_grad=True,\n )\n self.speed_factor = speed_factor\n\n # ------- surface network\n self.implicit_surface = ImplicitSurface(\n W_geo_feat=W_geo_feat,\n input_ch=input_ch,\n input_obj_ch=input_obj_ch,\n obj_bounding_size=obj_bounding_radius,\n **surface_cfg,\n )\n\n # ------- radiance network\n if W_geo_feat < 0:\n W_geo_feat = self.implicit_surface.W\n self.radiance_net = RadianceNet(\n W_geo_feat=W_geo_feat,\n input_light_ch=input_light_ch,\n input_appearance_ch=input_appearance_ch,\n **radiance_cfg,\n )\n\n # -------- outside nerf++\n if use_outside_nerf:\n self.nerf_outside = NeRF(\n input_ch=4, multires=10, multires_view=4, use_view_dirs=True\n )\n\n def forward_radiance(\n self,\n x: torch.Tensor,\n obj_code: torch.Tensor,\n light_code: torch.Tensor,\n view_dirs: torch.Tensor,\n appearance_code: torch.Tensor = None,\n ):\n _, nablas, geometry_feature = self.implicit_surface.forward_with_nablas(\n x, obj_code\n )\n radiance = self.radiance_net.forward(\n x,\n light_code,\n view_dirs,\n nablas,\n geometry_feature,\n appearance_code=appearance_code,\n )\n return radiance\n\n def forward_s(self):\n return torch.exp(self.ln_s * self.speed_factor)\n\n def forward(\n self,\n x: torch.Tensor,\n obj_code: torch.Tensor,\n light_code: torch.Tensor,\n view_dirs: torch.Tensor,\n appearance_code: torch.Tensor = None,\n ):\n sdf, nablas, geometry_feature = self.implicit_surface.forward_with_nablas(\n x, obj_code\n )\n radiances = self.radiance_net.forward(\n x,\n light_code,\n view_dirs,\n nablas,\n geometry_feature,\n appearance_code=appearance_code,\n )\n return radiances, sdf, nablas" }, { "identifier": "volume_render", "path": "models_neurecon/neus.py", "snippet": "def volume_render(\n rays_o,\n rays_d,\n model: NeuS,\n obj_code=None,\n light_code=None,\n appearance_code=None,\n obj_bounding_radius=1.0,\n batched=False,\n batched_info={},\n # render algorithm config\n calc_normal=False,\n use_view_dirs=True,\n rayschunk=65536,\n netchunk=1048576,\n white_bkgd=False,\n near_bypass: Optional[torch.Tensor] = None,\n far_bypass: Optional[torch.Tensor] = None,\n # render function config\n detailed_output=True,\n show_progress=False,\n # sampling related\n perturb=False, # config whether do stratified sampling\n fixed_s_recp=1 / 64.0,\n N_samples=64,\n N_importance=64,\n N_outside=0, # whether to use outside nerf\n # upsample related\n upsample_algo=\"official_solution\",\n N_nograd_samples=2048,\n N_upsample_iters=4,\n skip_accumulation=False, # skip accumulation and directly output opacity and radiance\n **dummy_kwargs # just place holder\n):\n \"\"\"\n input:\n rays_o: [(B,) N_rays, 3]\n rays_d: [(B,) N_rays, 3] NOTE: not normalized. contains info about ratio of len(this ray)/len(principle ray)\n \"\"\"\n # we add obj_code, which may break the batched\n assert batched == False\n device = rays_o.device\n if batched:\n DIM_BATCHIFY = 1\n B = rays_d.shape[0] # batch_size\n flat_vec_shape = [B, -1, 3]\n else:\n DIM_BATCHIFY = 0\n flat_vec_shape = [-1, 3]\n\n rays_o = torch.reshape(rays_o, flat_vec_shape).float()\n rays_d = torch.reshape(rays_d, flat_vec_shape).float()\n # NOTE: already normalized\n rays_d = F.normalize(rays_d, dim=-1)\n\n batchify_query = functools.partial(\n train_util.batchify_query, chunk=netchunk, dim_batchify=DIM_BATCHIFY\n )\n\n # ---------------\n # Render a ray chunk\n # ---------------\n def render_rayschunk(\n rays_o: torch.Tensor,\n rays_d: torch.Tensor,\n near: torch.Tensor,\n far: torch.Tensor,\n obj_code: torch.Tensor = None,\n light_code: torch.Tensor = None,\n appearance_code: torch.Tensor = None,\n ):\n # rays_o: [(B), N_rays, 3]\n # rays_d: [(B), N_rays, 3]\n\n # [(B), N_rays] x 2\n # near, far = rend_util.near_far_from_sphere(rays_o, rays_d, r=obj_bounding_radius)\n # if near_bypass is not None:\n # near = near_bypass * torch.ones_like(near).to(device)\n # if far_bypass is not None:\n # far = far_bypass * torch.ones_like(far).to(device)\n\n if use_view_dirs:\n view_dirs = rays_d\n else:\n view_dirs = None\n\n prefix_batch = [B] if batched else []\n N_rays = rays_o.shape[-2]\n\n # ---------------\n # Sample points on the rays\n # ---------------\n\n # ---------------\n # Coarse Points\n\n # [(B), N_rays, N_samples]\n # d_coarse = torch.linspace(near, far, N_samples).float().to(device)\n # d_coarse = d_coarse.view([*[1]*len(prefix_batch), 1, N_samples]).repeat([*prefix_batch, N_rays, 1])\n _t = torch.linspace(0, 1, N_samples).float().to(device)\n d_coarse = near * (1 - _t) + far * _t\n\n if obj_code is not None:\n obj_code = obj_code.unsqueeze(1) # [N_rays, 1, N_obj_ch]\n if light_code is not None:\n light_code = light_code.unsqueeze(1) # [N_rays, 1, N_light_ch]\n if appearance_code is not None:\n appearance_code = appearance_code.unsqueeze(1) # [N_rays, 1, N_light_ch]\n\n # ---------------\n # Up Sampling\n with torch.no_grad():\n if upsample_algo == \"official_solution\":\n _d = d_coarse\n # [(B), N_rays, N_sample, 3]\n # N_rays, N_obj_ch = obj_code.shape\n # obj_code = obj_code.view(N_rays, 1, N_obj_ch)\n\n _sdf = batchify_query(\n model.implicit_surface.forward,\n rays_o.unsqueeze(-2) + _d.unsqueeze(-1) * rays_d.unsqueeze(-2),\n None if obj_code is None else obj_code.expand(-1, _d.shape[1], -1),\n )\n for i in range(N_upsample_iters):\n prev_sdf, next_sdf = _sdf[..., :-1], _sdf[..., 1:]\n prev_z_vals, next_z_vals = _d[..., :-1], _d[..., 1:]\n mid_sdf = (prev_sdf + next_sdf) * 0.5\n dot_val = (next_sdf - prev_sdf) / (next_z_vals - prev_z_vals + 1e-5)\n prev_dot_val = torch.cat(\n [\n torch.zeros_like(dot_val[..., :1], device=device),\n dot_val[..., :-1],\n ],\n dim=-1,\n ) # jianfei: prev_slope, right shifted\n dot_val = torch.stack(\n [prev_dot_val, dot_val], dim=-1\n ) # jianfei: concat prev_slope with slope\n dot_val, _ = torch.min(\n dot_val, dim=-1, keepdim=False\n ) # jianfei: find the minimum of prev_slope and current slope. (forward diff vs. backward diff., or the prev segment's slope vs. this segment's slope)\n dot_val = dot_val.clamp(-10.0, 0.0)\n\n dist = next_z_vals - prev_z_vals\n prev_esti_sdf = mid_sdf - dot_val * dist * 0.5\n next_esti_sdf = mid_sdf + dot_val * dist * 0.5\n\n prev_cdf = cdf_Phi_s(prev_esti_sdf, 64 * (2**i))\n next_cdf = cdf_Phi_s(next_esti_sdf, 64 * (2**i))\n alpha = (prev_cdf - next_cdf + 1e-5) / (prev_cdf + 1e-5)\n _w = alpha_to_w(alpha)\n d_fine = rend_util.sample_pdf(\n _d, _w, N_importance // N_upsample_iters, det=not perturb\n )\n _d = torch.cat([_d, d_fine], dim=-1)\n sdf_fine = batchify_query(\n model.implicit_surface.forward,\n rays_o.unsqueeze(-2)\n + d_fine.unsqueeze(-1) * rays_d.unsqueeze(-2),\n None\n if obj_code is None\n else obj_code.expand(-1, d_fine.shape[1], -1),\n )\n _sdf = torch.cat([_sdf, sdf_fine], dim=-1)\n _d, d_sort_indices = torch.sort(_d, dim=-1)\n _sdf = torch.gather(_sdf, DIM_BATCHIFY + 1, d_sort_indices)\n d_all = _d\n else:\n raise NotImplementedError\n\n # ------------------\n # Calculate Points\n # [(B), N_rays, N_samples+N_importance, 3]\n pts = rays_o[..., None, :] + rays_d[..., None, :] * d_all[..., :, None]\n # [(B), N_rays, N_pts-1, 3]\n # pts_mid = 0.5 * (pts[..., 1:, :] + pts[..., :-1, :])\n d_mid = 0.5 * (d_all[..., 1:] + d_all[..., :-1])\n pts_mid = rays_o[..., None, :] + rays_d[..., None, :] * d_mid[..., :, None]\n\n # ------------------\n # Inside Scene\n # ------------------\n # sdf, nablas, _ = model.implicit_surface.forward_with_nablas(pts)\n sdf, nablas, _ = batchify_query(\n model.implicit_surface.forward_with_nablas,\n pts,\n None if obj_code is None else obj_code.expand(-1, pts.shape[1], -1),\n )\n # [(B), N_ryas, N_pts], [(B), N_ryas, N_pts-1]\n cdf, opacity_alpha = sdf_to_alpha(sdf, model.forward_s())\n # radiances = model.forward_radiance(pts_mid, view_dirs_mid)\n _, N_sample_mid, _ = pts_mid.shape\n radiances = batchify_query(\n model.forward_radiance,\n pts_mid,\n None if obj_code is None else obj_code.expand(-1, pts_mid.shape[1], -1),\n None if light_code is None else light_code.expand(-1, pts_mid.shape[1], -1),\n view_dirs.unsqueeze(-2).expand_as(pts_mid) if use_view_dirs else None,\n None\n if appearance_code is None\n else appearance_code.expand(-1, pts_mid.shape[1], -1),\n )\n\n # ------------------\n # Outside Scene\n # ------------------\n if N_outside > 0:\n assert False, \"obj_code not implemented\"\n _t = torch.linspace(0, 1, N_outside + 2)[..., 1:-1].float().to(device)\n d_vals_out = far / torch.flip(_t, dims=[-1])\n if perturb:\n _mids = 0.5 * (d_vals_out[..., 1:] + d_vals_out[..., :-1])\n _upper = torch.cat([_mids, d_vals_out[..., -1:]], -1)\n _lower = torch.cat([d_vals_out[..., :1], _mids], -1)\n _t_rand = torch.rand(_upper.shape).float().to(device)\n d_vals_out = _lower + (_upper - _lower) * _t_rand\n\n d_vals_out = torch.cat([d_mid, d_vals_out], dim=-1) # already sorted\n pts_out = (\n rays_o[..., None, :] + rays_d[..., None, :] * d_vals_out[..., :, None]\n )\n r = pts_out.norm(dim=-1, keepdim=True)\n x_out = torch.cat([pts_out / r, 1.0 / r], dim=-1)\n views_out = (\n view_dirs.unsqueeze(-2).expand_as(x_out[..., :3])\n if use_view_dirs\n else None\n )\n\n sigma_out, radiance_out = batchify_query(\n model.nerf_outside.forward, x_out, views_out\n )\n dists = d_vals_out[..., 1:] - d_vals_out[..., :-1]\n dists = torch.cat(\n [dists, 1e10 * torch.ones(dists[..., :1].shape).to(device)], dim=-1\n )\n alpha_out = 1 - torch.exp(\n -F.softplus(sigma_out) * dists\n ) # use softplus instead of relu as NeuS's official repo\n\n # --------------\n # Ray Integration\n # --------------\n # [(B), N_rays, N_pts-1]\n if N_outside > 0:\n assert False, \"obj_code not implemented\"\n N_pts_1 = d_mid.shape[-1]\n # [(B), N_ryas, N_pts-1]\n mask_inside = pts_mid.norm(dim=-1) <= obj_bounding_radius\n # [(B), N_ryas, N_pts-1]\n alpha_in = (\n opacity_alpha * mask_inside.float()\n + alpha_out[..., :N_pts_1] * (~mask_inside).float()\n )\n # [(B), N_ryas, N_pts-1 + N_outside]\n opacity_alpha = torch.cat([alpha_in, alpha_out[..., N_pts_1:]], dim=-1)\n\n # [(B), N_ryas, N_pts-1, 3]\n radiance_in = (\n radiances * mask_inside.float()[..., None]\n + radiance_out[..., :N_pts_1, :] * (~mask_inside).float()[..., None]\n )\n # [(B), N_ryas, N_pts-1 + N_outside, 3]\n radiances = torch.cat([radiance_in, radiance_out[..., N_pts_1:, :]], dim=-2)\n d_final = d_vals_out\n else:\n d_final = d_mid\n\n if skip_accumulation:\n return {\n \"z_vals\": d_final,\n \"opacity\": opacity_alpha,\n \"radiances\": radiances,\n }\n\n # [(B), N_ryas, N_pts-1 + N_outside]\n visibility_weights = alpha_to_w(opacity_alpha)\n # [(B), N_rays]\n rgb_map = torch.sum(visibility_weights[..., None] * radiances, -2)\n # depth_map = torch.sum(visibility_weights * d_mid, -1)\n # NOTE: to get the correct depth map, the sum of weights must be 1!\n depth_map = torch.sum(\n visibility_weights\n / (visibility_weights.sum(-1, keepdim=True) + 1e-10)\n * d_final,\n -1,\n )\n acc_map = torch.sum(visibility_weights, -1)\n\n if white_bkgd:\n rgb_map = rgb_map + (1.0 - acc_map[..., None])\n\n ret_i = OrderedDict(\n [\n (\"rgb\", rgb_map), # [(B), N_rays, 3]\n (\"depth_volume\", depth_map), # [(B), N_rays]\n # ('depth_surface', d_pred_out), # [(B), N_rays]\n (\"mask_volume\", acc_map), # [(B), N_rays]\n ]\n )\n\n if calc_normal:\n normals_map = F.normalize(nablas, dim=-1)\n N_pts = min(visibility_weights.shape[-1], normals_map.shape[-2])\n normals_map = (\n normals_map[..., :N_pts, :] * visibility_weights[..., :N_pts, None]\n ).sum(dim=-2)\n ret_i[\"normals_volume\"] = normals_map\n\n if detailed_output:\n ret_i[\"implicit_nablas\"] = nablas\n ret_i[\"implicit_surface\"] = sdf\n ret_i[\"radiance\"] = radiances\n ret_i[\"alpha\"] = opacity_alpha\n ret_i[\"cdf\"] = cdf\n ret_i[\"visibility_weights\"] = visibility_weights\n ret_i[\"d_final\"] = d_final\n if N_outside > 0:\n assert False, \"obj_code not implemented\"\n ret_i[\"sigma_out\"] = sigma_out\n ret_i[\"radiance_out\"] = radiance_out\n\n return ret_i\n\n ret = {}\n for i in tqdm(\n range(0, rays_o.shape[DIM_BATCHIFY], rayschunk), disable=not show_progress\n ):\n if obj_code is not None:\n obj_code_chunk = (\n obj_code[:, i : i + rayschunk]\n if batched\n else obj_code[i : i + rayschunk]\n )\n else:\n obj_code_chunk = None\n if light_code is not None:\n light_code_chunk = (\n light_code[:, i : i + rayschunk]\n if batched\n else light_code[i : i + rayschunk]\n )\n else:\n light_code_chunk = None\n\n if appearance_code is not None:\n appearance_code_chunk = (\n appearance_code[:, i : i + rayschunk]\n if batched\n else appearance_code[i : i + rayschunk]\n )\n else:\n appearance_code_chunk = None\n\n ret_i = render_rayschunk(\n rays_o=rays_o[:, i : i + rayschunk]\n if batched\n else rays_o[i : i + rayschunk],\n rays_d=rays_d[:, i : i + rayschunk]\n if batched\n else rays_d[i : i + rayschunk],\n near=near_bypass[:, i : i + rayschunk]\n if batched\n else near_bypass[i : i + rayschunk],\n far=far_bypass[:, i : i + rayschunk]\n if batched\n else far_bypass[i : i + rayschunk],\n obj_code=obj_code_chunk,\n light_code=light_code_chunk,\n appearance_code=appearance_code_chunk,\n )\n for k, v in ret_i.items():\n if k not in ret:\n ret[k] = []\n ret[k].append(v)\n for k, v in ret.items():\n ret[k] = torch.cat(v, DIM_BATCHIFY)\n\n if skip_accumulation:\n return ret\n\n return ret[\"rgb\"], ret[\"depth_volume\"], ret" }, { "identifier": "ImplicitSurface", "path": "models_neurecon/base.py", "snippet": "class ImplicitSurface(nn.Module):\n def __init__(\n self,\n W=256,\n D=8,\n skips=[4],\n W_geo_feat=256,\n input_ch=3,\n input_obj_ch=0,\n radius_init=1.0,\n radius_init_inside_out=1.0,\n obj_bounding_size=2.0,\n geometric_init=True,\n inside_out=False,\n embed_multires=6,\n weight_norm=True,\n use_siren=False,\n ):\n \"\"\"\n W_geo_feat: to set whether to use nerf-like geometry feature or IDR-like geometry feature.\n set to -1: nerf-like, the output feature is the second to last level's feature of the geometry network.\n set to >0: IDR-like ,the output feature is the last part of the geometry network's output.\n \"\"\"\n super().__init__()\n # occ_net_list = [\n # nn.Sequential(\n # nn.Linear(input_ch, W),\n # nn.Softplus(),\n # )\n # ] + [\n # nn.Sequential(\n # nn.Linear(W, W),\n # nn.Softplus()\n # ) for _ in range(D-2)\n # ] + [\n # nn.Linear(W, 1)\n # ]\n self.radius_init = radius_init\n self.radius_init_inside_out = radius_init_inside_out\n self.register_buffer(\n \"obj_bounding_size\", torch.tensor([obj_bounding_size]).float()\n )\n self.geometric_init = geometric_init\n self.D = D\n self.W = W\n self.W_geo_feat = W_geo_feat\n if use_siren:\n assert len(skips) == 0, \"do not use skips for siren\"\n self.register_buffer(\n \"is_pretrained\", torch.tensor([False], dtype=torch.bool)\n )\n self.skips = skips\n self.use_siren = use_siren\n self.embed_fn, input_ch = get_embedder(embed_multires)\n input_ch += input_obj_ch\n self.input_obj_ch = input_obj_ch\n\n surface_fc_layers = []\n # NOTE: as in IDR/NeuS, the network's has D+1 layers\n for l in range(D + 1):\n # decide out_dim\n if l == D:\n if W_geo_feat > 0:\n out_dim = 1 + W_geo_feat\n else:\n out_dim = 1\n elif (l + 1) in self.skips:\n out_dim = (\n W - input_ch\n ) # recude output dim before the skips layers, as in IDR / NeuS\n else:\n out_dim = W\n\n # decide in_dim\n if l == 0:\n in_dim = input_ch\n else:\n in_dim = W\n\n if l != D:\n if use_siren:\n layer = SirenLayer(in_dim, out_dim, is_first=(l == 0))\n else:\n # NOTE: beta=100 is important! Otherwise, the initial output would all be > 10, and there is not initial sphere.\n layer = DenseLayer(\n in_dim, out_dim, activation=nn.Softplus(beta=100)\n )\n else:\n layer = nn.Linear(in_dim, out_dim)\n\n # if true preform preform geometric initialization\n if geometric_init and not use_siren:\n # --------------\n # sphere init, as in SAL / IDR.\n # --------------\n if l == D:\n if inside_out:\n nn.init.normal_(\n layer.weight,\n mean=-np.sqrt(np.pi) / np.sqrt(in_dim),\n std=0.0001,\n )\n nn.init.constant_(layer.bias, radius_init_inside_out)\n else:\n nn.init.normal_(\n layer.weight,\n mean=np.sqrt(np.pi) / np.sqrt(in_dim),\n std=0.0001,\n )\n nn.init.constant_(layer.bias, -radius_init)\n elif embed_multires > 0 and l == 0:\n torch.nn.init.constant_(layer.bias, 0.0)\n torch.nn.init.constant_(\n layer.weight[:, 3:], 0.0\n ) # let the initial weights for octaves to be 0.\n torch.nn.init.normal_(\n layer.weight[:, :3], 0.0, np.sqrt(2) / np.sqrt(out_dim)\n )\n elif embed_multires > 0 and l in self.skips:\n torch.nn.init.constant_(layer.bias, 0.0)\n torch.nn.init.normal_(\n layer.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim)\n )\n torch.nn.init.constant_(\n layer.weight[:, -(input_ch - 3) :], 0.0\n ) # NOTE: this contrains the concat order to be [h, x_embed]\n else:\n nn.init.constant_(layer.bias, 0.0)\n nn.init.normal_(layer.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim))\n\n if weight_norm:\n layer = nn.utils.weight_norm(layer)\n\n surface_fc_layers.append(layer)\n\n self.surface_fc_layers = nn.ModuleList(surface_fc_layers)\n\n def pretrain_hook(self, configs={}):\n configs[\"target_radius\"] = self.radius_init\n # TODO: more flexible, bbox-like scene bound.\n configs[\"obj_bounding_size\"] = self.obj_bounding_size.item()\n if self.geometric_init and self.use_siren and not self.is_pretrained:\n pretrain_siren_sdf(self, **configs)\n self.is_pretrained = ~self.is_pretrained\n return True\n return False\n\n def forward(self, x: torch.Tensor, obj_code: torch.Tensor = None, return_h=False):\n if self.input_obj_ch > 0:\n x = self.embed_fn(x)\n x = torch.cat([x, obj_code], -1)\n else:\n # x: [1, N_rays, 3]\n x = self.embed_fn(x)\n # assert obj_code is None\n\n h = x\n for i in range(self.D):\n if i in self.skips:\n # NOTE: concat order can not change! there are special operations taken in intialization.\n h = torch.cat([h, x], dim=-1) / np.sqrt(2)\n h = self.surface_fc_layers[i](h)\n\n out = self.surface_fc_layers[-1](h)\n\n if self.W_geo_feat > 0:\n h = out[..., 1:]\n out = out[..., :1].squeeze(-1)\n else:\n out = out.squeeze(-1)\n if return_h:\n return out, h\n else:\n return out\n\n def forward_with_nablas(\n self,\n x: torch.Tensor,\n obj_code: torch.Tensor = None,\n has_grad_bypass: bool = None,\n ):\n has_grad = (\n torch.is_grad_enabled() if has_grad_bypass is None else has_grad_bypass\n )\n # force enabling grad for normal calculation\n with torch.enable_grad():\n x = x.requires_grad_(True)\n implicit_surface_val, h = self.forward(x, obj_code=obj_code, return_h=True)\n nabla = autograd.grad(\n implicit_surface_val,\n x,\n torch.ones_like(implicit_surface_val, device=x.device),\n create_graph=has_grad,\n retain_graph=has_grad,\n only_inputs=True,\n )[0]\n if not has_grad:\n implicit_surface_val = implicit_surface_val.detach()\n nabla = nabla.detach()\n h = h.detach()\n return implicit_surface_val, nabla, h" } ]
import ipdb import torch import sys import os import copy from typing import List, Dict, Any from einops import rearrange, reduce, repeat from models_neurecon.neus import NeuS, volume_render from models_neurecon.base import ImplicitSurface
9,660
render_mask=False, # only works for NeuS refine_edge_obj_ids=[], # chunk=4096, chunk=99999999, # chunk should be controlled outside extra_dict: Dict[str, Any] = {}, render_kwargs: Dict[str, Any] = {}, ): assert len(rays_list) == len(obj_instance_ids) if render_mask: assert use_sphere_tracing, "render_mask only support sphere_tracing mode" results = {} if use_sphere_tracing: chunk = 99999999 # sphere_tracing allows larger chunk size else: # hit_test_only works only for sphere tracing mode hit_test_only = False rgbs_list = [] alphas_list = [] z_vals_list = [] for i in range(len(rays_list)): # hack to suppress zero points # zero_mask = z_vals[:, -1] == 0 # xyz_fine[zero_mask] = 0 obj_id = obj_instance_ids[i] if len(refine_edge_obj_ids) > 0: if obj_id in refine_edge_obj_ids: refine_edge = True else: refine_edge = False rays = rays_list[i] N_rays = rays.shape[0] obj_code = extra_dict[f"embedding_inst_{obj_id}"].view(N_rays, -1) light_code = ( extra_dict[f"embedding_light_{obj_id}"].view(N_rays, -1) if f"embedding_light_{obj_id}" in extra_dict else None ) appearance_code = ( extra_dict[f"embedding_appearance_{obj_id}"].view(N_rays, -1) if f"embedding_appearance_{obj_id}" in extra_dict else None ) model = models[f"neus_{obj_id}"] rays_o = rays[:, 0:3].view(N_rays, 3) rays_d = rays[:, 3:6].view(N_rays, 3) near_bypass = rays[:, 6].view(N_rays, 1) far_bypass = rays[:, 7].view(N_rays, 1) zero_mask = (far_bypass != 0).squeeze() device = rays_o.device dtype = rays_o.dtype rays_o = rays_o[zero_mask] rays_d = rays_d[zero_mask] near_bypass = near_bypass[zero_mask] far_bypass = far_bypass[zero_mask] obj_code = obj_code[zero_mask] light_code = None if light_code is None else light_code[zero_mask] appearance_code = ( None if appearance_code is None else appearance_code[zero_mask] ) if rays_o.shape[0] > 0: # if have valid rays to render if use_sphere_tracing: render_res = sphere_tracing_rendering( model=model, rays_o=rays_o, rays_d=rays_d, near=near_bypass, far=far_bypass, obj_code=obj_code, light_code=light_code, appearance_code=appearance_code, hit_test_only=hit_test_only, need_normal=need_normal, refine_edge=False if obj_id == 0 else refine_edge, # do not refine edge for background chunk=chunk, ) z_vals = render_res["z_vals"] alphas = render_res["alphas"] if not hit_test_only: rgbs = render_res["rgbs"] if need_normal: results[f"normals_{obj_id}"] = render_res["normals"] else: if ( safe_region_volume_rendering ): # we first use sphere tracing to get the exact distance with torch.no_grad(): # acceletate with sphere tracing render_res_sphere = sphere_tracing_rendering( model=model, rays_o=rays_o, rays_d=rays_d, near=near_bypass, far=far_bypass, obj_code=obj_code, light_code=light_code, appearance_code=appearance_code, refine_edge=False, hit_test_only=True, need_normal=need_normal, chunk=chunk, ) # get exact depth to the surface depth = render_res_sphere["z_vals"].view(-1, 1) # set near/far near the surface near_bypass = torch.clamp_min(depth - 0.1, 0.0) far_bypass = torch.clamp_min(depth + 0.05, 0.0) render_kwargs = copy.deepcopy(render_kwargs) # with the correct surface, we can render with little sampling points render_kwargs["N_samples"] = 8 render_kwargs["N_importance"] = 16
sys.path.append(os.getcwd()) # noqa def volume_rendering_multi_neus( results, typ, z_vals_list, rgbs_list, alphas_list, noise_std, white_back, obj_ids_list=None, ): N_objs = len(z_vals_list) # order via z_vals z_vals = torch.cat(z_vals_list, 1) # (N_rays, N_samples*N_objs) rgbs = torch.cat(rgbs_list, 1) # (N_rays, N_samples*N_objs, 3) alphas = torch.cat(alphas_list, 1) # (N_rays, N_samples*N_objs) z_vals, idx_sorted = torch.sort(z_vals, -1) for i in range(3): rgbs[:, :, i] = torch.gather(rgbs[:, :, i].clone(), dim=1, index=idx_sorted) alphas = torch.gather(alphas, dim=1, index=idx_sorted) # record object ids for recovering weights of each object after sorting if obj_ids_list != None: obj_ids = torch.cat(obj_ids_list, -1) results[f"obj_ids_{typ}"] = torch.gather(obj_ids, dim=1, index=idx_sorted) alphas_shifted = torch.cat( [torch.ones_like(alphas[:, :1]), 1 - alphas + 1e-10], -1 ) # [1, 1-a1, 1-a2, ...] weights = alphas * torch.cumprod(alphas_shifted[:, :-1], -1) # (N_rays, N_samples_) weights_sum = reduce( weights, "n1 n2 -> n1", "sum" ) # (N_rays), the accumulated opacity along the rays # equals "1 - (1-a1)(1-a2)...(1-an)" mathematically # results[f"weights_{typ}"] = weights results[f"opacity_{typ}"] = weights_sum # results[f"z_vals_{typ}"] = z_vals rgb_map = reduce( rearrange(weights, "n1 n2 -> n1 n2 1") * rgbs, "n1 n2 c -> n1 c", "sum" ) depth_map = reduce(weights * z_vals, "n1 n2 -> n1", "sum") if white_back: rgb_map = rgb_map + 1 - weights_sum.unsqueeze(-1) results[f"rgb_{typ}"] = rgb_map results[f"depth_{typ}"] = depth_map # adopt from neurecon/ray_casting.py def sphere_tracing_surface_points( implicit_surface: ImplicitSurface, rays_o: torch.Tensor, rays_d: torch.Tensor, # function config obj_code: torch.Tensor, near: torch.Tensor, far: torch.Tensor, # algorithm config # stop_sdf_th: float = 0.0, # N_iters: int = 20, N_iters: int = 50, near_surface_th: float = 0.0, sdf_eps: float = 5e-3, ): """ rays_o, rays_d: torch.Tensor [N_rays, 3] obj_code: torch.Tensor [N_rays, N_channel] near: torch.Tensor [N_rays] far: torch.Tensor [N_rays] near_surface_th: also set the output mask to false when hit point not near the surface """ device = rays_o.device if isinstance(near, float): d_preds = torch.ones([*rays_o.shape[:-1]], device=device) * near else: d_preds = near mask = torch.ones_like(d_preds, dtype=torch.bool, device=device) N_rays = d_preds.shape[0] for _ in range(N_iters): pts = rays_o + rays_d * d_preds[..., :, None] surface_val = implicit_surface.forward(pts, obj_code) # surface_val = surface_val - stop_sdf_th # d_preds[mask] += surface_val[mask] d_preds = d_preds + surface_val * mask.float() mask[d_preds > far] = False mask[d_preds < 0] = False # mark unfinished mask_unfinish = surface_val.abs() > sdf_eps mask_unfinish[~mask] = False if mask_unfinish.sum() == 0: # print(_) break pts = rays_o + rays_d * d_preds[..., :, None] if near_surface_th != 0: mask = torch.logical_and(mask, surface_val.abs() < near_surface_th) return d_preds, pts, mask, surface_val def sphere_tracing_rendering( model: NeuS, rays_o: torch.Tensor, rays_d: torch.Tensor, near: torch.Tensor, far: torch.Tensor, obj_code: torch.Tensor, light_code: torch.Tensor, appearance_code: torch.Tensor, hit_test_only: bool, need_normal: bool, refine_edge: bool, chunk: int, ): d_pred_chunk = [] rgb_chunk = [] normal_chunk = [] alpha_chunk = [] # pt_pred_chunk = [] # mask_chunk = [] B = rays_o.shape[0] for i in range(0, B, chunk): d_pred, pt_pred, mask, last_sdf = sphere_tracing_surface_points( implicit_surface=model.implicit_surface, rays_o=rays_o[i : i + chunk], rays_d=rays_d[i : i + chunk], near=near[i : i + chunk].squeeze(1), far=far[i : i + chunk].squeeze(1), obj_code=obj_code[i : i + chunk], near_surface_th=0.05 if hit_test_only else 0, ) d_pred_chunk += [d_pred] alpha = torch.zeros_like(d_pred) alpha[mask] = 1 alpha_chunk += [alpha] if not hit_test_only: rgb, sdf, nablas = model.forward( pt_pred, obj_code[i : i + chunk], None if light_code is None else light_code[i : i + chunk], rays_d[i : i + chunk], None if appearance_code is None else appearance_code[i : i + chunk], ) rgb_chunk += [rgb] if need_normal or refine_edge: _, normal, _ = model.implicit_surface.forward_with_nablas( pt_pred, obj_code[i : i + chunk] ) normal_chunk += [normal] if refine_edge: # compute cos_angle of hit ray and surface normal # for edges near to the perpendicular, we dim the alpha normal_reg = torch.nn.functional.normalize(normal, dim=1) cos_angle = -(rays_d[i : i + chunk] * normal_reg).sum(-1) # do not affect other visible part that far from perpendicular mask_merged = torch.logical_and(mask, cos_angle < 0) alpha[mask_merged] = 0 # just set to 0 is enough # alpha[mask] = torch.relu(torch.tanh(cos_angle[mask] * 2)) alpha_chunk[-1] = alpha d_pred = torch.cat(d_pred_chunk, 0) alpha = torch.cat(alpha_chunk, 0) ret_res = { "alphas": alpha.unsqueeze(1), "z_vals": d_pred.unsqueeze(1), } if not hit_test_only: ret_res["rgbs"] = torch.cat(rgb_chunk, 0).unsqueeze(1) if need_normal: ret_res["normals"] = torch.cat(normal_chunk, 0).unsqueeze(1) return ret_res def render_rays_multi_neus( room_optimizer, models: Dict[str, NeuS], rays_list: List[torch.Tensor], obj_instance_ids: List[int], noise_std=0, white_back=False, use_sphere_tracing=True, refine_edge=False, safe_region_volume_rendering=True, hit_test_only=False, # only works for NeuS need_normal=False, # only works for NeuS render_mask=False, # only works for NeuS refine_edge_obj_ids=[], # chunk=4096, chunk=99999999, # chunk should be controlled outside extra_dict: Dict[str, Any] = {}, render_kwargs: Dict[str, Any] = {}, ): assert len(rays_list) == len(obj_instance_ids) if render_mask: assert use_sphere_tracing, "render_mask only support sphere_tracing mode" results = {} if use_sphere_tracing: chunk = 99999999 # sphere_tracing allows larger chunk size else: # hit_test_only works only for sphere tracing mode hit_test_only = False rgbs_list = [] alphas_list = [] z_vals_list = [] for i in range(len(rays_list)): # hack to suppress zero points # zero_mask = z_vals[:, -1] == 0 # xyz_fine[zero_mask] = 0 obj_id = obj_instance_ids[i] if len(refine_edge_obj_ids) > 0: if obj_id in refine_edge_obj_ids: refine_edge = True else: refine_edge = False rays = rays_list[i] N_rays = rays.shape[0] obj_code = extra_dict[f"embedding_inst_{obj_id}"].view(N_rays, -1) light_code = ( extra_dict[f"embedding_light_{obj_id}"].view(N_rays, -1) if f"embedding_light_{obj_id}" in extra_dict else None ) appearance_code = ( extra_dict[f"embedding_appearance_{obj_id}"].view(N_rays, -1) if f"embedding_appearance_{obj_id}" in extra_dict else None ) model = models[f"neus_{obj_id}"] rays_o = rays[:, 0:3].view(N_rays, 3) rays_d = rays[:, 3:6].view(N_rays, 3) near_bypass = rays[:, 6].view(N_rays, 1) far_bypass = rays[:, 7].view(N_rays, 1) zero_mask = (far_bypass != 0).squeeze() device = rays_o.device dtype = rays_o.dtype rays_o = rays_o[zero_mask] rays_d = rays_d[zero_mask] near_bypass = near_bypass[zero_mask] far_bypass = far_bypass[zero_mask] obj_code = obj_code[zero_mask] light_code = None if light_code is None else light_code[zero_mask] appearance_code = ( None if appearance_code is None else appearance_code[zero_mask] ) if rays_o.shape[0] > 0: # if have valid rays to render if use_sphere_tracing: render_res = sphere_tracing_rendering( model=model, rays_o=rays_o, rays_d=rays_d, near=near_bypass, far=far_bypass, obj_code=obj_code, light_code=light_code, appearance_code=appearance_code, hit_test_only=hit_test_only, need_normal=need_normal, refine_edge=False if obj_id == 0 else refine_edge, # do not refine edge for background chunk=chunk, ) z_vals = render_res["z_vals"] alphas = render_res["alphas"] if not hit_test_only: rgbs = render_res["rgbs"] if need_normal: results[f"normals_{obj_id}"] = render_res["normals"] else: if ( safe_region_volume_rendering ): # we first use sphere tracing to get the exact distance with torch.no_grad(): # acceletate with sphere tracing render_res_sphere = sphere_tracing_rendering( model=model, rays_o=rays_o, rays_d=rays_d, near=near_bypass, far=far_bypass, obj_code=obj_code, light_code=light_code, appearance_code=appearance_code, refine_edge=False, hit_test_only=True, need_normal=need_normal, chunk=chunk, ) # get exact depth to the surface depth = render_res_sphere["z_vals"].view(-1, 1) # set near/far near the surface near_bypass = torch.clamp_min(depth - 0.1, 0.0) far_bypass = torch.clamp_min(depth + 0.05, 0.0) render_kwargs = copy.deepcopy(render_kwargs) # with the correct surface, we can render with little sampling points render_kwargs["N_samples"] = 8 render_kwargs["N_importance"] = 16
render_res = volume_render(
1
2023-10-15 08:41:29+00:00
12k
chenxn2020/GOSE
GOSEfinetune/models/layoutlmv2/modeling_layoutlmv2.py
[ { "identifier": "ReOutput", "path": "GOSEfinetune/utils.py", "snippet": "class ReOutput(ModelOutput):\n loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n entities: Optional[Dict] = None\n relations: Optional[Dict] = None\n pred_relations: Optional[Dict] = None" }, { "identifier": "LayoutLMv2Config", "path": "GOSEfinetune/models/layoutlmv2/configuration_layoutlmv2.py", "snippet": "class LayoutLMv2Config(LayoutLMConfig):\n model_type = \"layoutlmv2\"\n\n def __init__(\n self,\n vocab_size=30522,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=2,\n initializer_range=0.02,\n layer_norm_eps=1e-12,\n pad_token_id=0,\n gradient_checkpointing=False,\n max_2d_position_embeddings=1024,\n max_rel_pos=128,\n rel_pos_bins=32,\n fast_qkv=True,\n max_rel_2d_pos=256,\n rel_2d_pos_bins=64,\n convert_sync_batchnorm=True,\n image_feature_pool_shape=[7, 7, 256],\n coordinate_size=128,\n shape_size=128,\n has_relative_attention_bias=True,\n has_spatial_attention_bias=True,\n has_visual_segment_embedding=False,\n **kwargs\n ):\n super().__init__(\n vocab_size=vocab_size,\n hidden_size=hidden_size,\n num_hidden_layers=num_hidden_layers,\n num_attention_heads=num_attention_heads,\n intermediate_size=intermediate_size,\n hidden_act=hidden_act,\n hidden_dropout_prob=hidden_dropout_prob,\n attention_probs_dropout_prob=attention_probs_dropout_prob,\n max_position_embeddings=max_position_embeddings,\n type_vocab_size=type_vocab_size,\n initializer_range=initializer_range,\n layer_norm_eps=layer_norm_eps,\n pad_token_id=pad_token_id,\n gradient_checkpointing=gradient_checkpointing,\n **kwargs,\n )\n self.max_2d_position_embeddings = max_2d_position_embeddings\n self.max_rel_pos = max_rel_pos\n self.rel_pos_bins = rel_pos_bins\n self.fast_qkv = fast_qkv\n self.max_rel_2d_pos = max_rel_2d_pos\n self.rel_2d_pos_bins = rel_2d_pos_bins\n self.convert_sync_batchnorm = convert_sync_batchnorm\n self.image_feature_pool_shape = image_feature_pool_shape\n self.coordinate_size = coordinate_size\n self.shape_size = shape_size\n self.has_relative_attention_bias = has_relative_attention_bias\n self.has_spatial_attention_bias = has_spatial_attention_bias\n self.has_visual_segment_embedding = has_visual_segment_embedding" }, { "identifier": "add_layoutlmv2_config", "path": "GOSEfinetune/models/layoutlmv2/detectron2_config.py", "snippet": "def add_layoutlmv2_config(cfg):\n _C = cfg\n # -----------------------------------------------------------------------------\n # Config definition\n # -----------------------------------------------------------------------------\n _C.MODEL.MASK_ON = True\n\n # When using pre-trained models in Detectron1 or any MSRA models,\n # std has been absorbed into its conv1 weights, so the std needs to be set 1.\n # Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std)\n _C.MODEL.PIXEL_STD = [57.375, 57.120, 58.395]\n\n # ---------------------------------------------------------------------------- #\n # Backbone options\n # ---------------------------------------------------------------------------- #\n _C.MODEL.BACKBONE.NAME = \"build_resnet_fpn_backbone\"\n\n # ---------------------------------------------------------------------------- #\n # FPN options\n # ---------------------------------------------------------------------------- #\n # Names of the input feature maps to be used by FPN\n # They must have contiguous power of 2 strides\n # e.g., [\"res2\", \"res3\", \"res4\", \"res5\"]\n _C.MODEL.FPN.IN_FEATURES = [\"res2\", \"res3\", \"res4\", \"res5\"]\n\n # ---------------------------------------------------------------------------- #\n # Anchor generator options\n # ---------------------------------------------------------------------------- #\n # Anchor sizes (i.e. sqrt of area) in absolute pixels w.r.t. the network input.\n # Format: list[list[float]]. SIZES[i] specifies the list of sizes\n # to use for IN_FEATURES[i]; len(SIZES) == len(IN_FEATURES) must be true,\n # or len(SIZES) == 1 is true and size list SIZES[0] is used for all\n # IN_FEATURES.\n _C.MODEL.ANCHOR_GENERATOR.SIZES = [[32], [64], [128], [256], [512]]\n\n # ---------------------------------------------------------------------------- #\n # RPN options\n # ---------------------------------------------------------------------------- #\n # Names of the input feature maps to be used by RPN\n # e.g., [\"p2\", \"p3\", \"p4\", \"p5\", \"p6\"] for FPN\n _C.MODEL.RPN.IN_FEATURES = [\"p2\", \"p3\", \"p4\", \"p5\", \"p6\"]\n # Number of top scoring RPN proposals to keep before applying NMS\n # When FPN is used, this is *per FPN level* (not total)\n _C.MODEL.RPN.PRE_NMS_TOPK_TRAIN = 2000\n _C.MODEL.RPN.PRE_NMS_TOPK_TEST = 1000\n # Number of top scoring RPN proposals to keep after applying NMS\n # When FPN is used, this limit is applied per level and then again to the union\n # of proposals from all levels\n # NOTE: When FPN is used, the meaning of this config is different from Detectron1.\n # It means per-batch topk in Detectron1, but per-image topk here.\n # See the \"find_top_rpn_proposals\" function for details.\n _C.MODEL.RPN.POST_NMS_TOPK_TRAIN = 1000\n _C.MODEL.RPN.POST_NMS_TOPK_TEST = 1000\n\n # ---------------------------------------------------------------------------- #\n # ROI HEADS options\n # ---------------------------------------------------------------------------- #\n _C.MODEL.ROI_HEADS.NAME = \"StandardROIHeads\"\n # Number of foreground classes\n _C.MODEL.ROI_HEADS.NUM_CLASSES = 5\n # Names of the input feature maps to be used by ROI heads\n # Currently all heads (box, mask, ...) use the same input feature map list\n # e.g., [\"p2\", \"p3\", \"p4\", \"p5\"] is commonly used for FPN\n _C.MODEL.ROI_HEADS.IN_FEATURES = [\"p2\", \"p3\", \"p4\", \"p5\"]\n\n # ---------------------------------------------------------------------------- #\n # Box Head\n # ---------------------------------------------------------------------------- #\n # C4 don't use head name option\n # Options for non-C4 models: FastRCNNConvFCHead,\n _C.MODEL.ROI_BOX_HEAD.NAME = \"FastRCNNConvFCHead\"\n _C.MODEL.ROI_BOX_HEAD.NUM_FC = 2\n _C.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = 14\n\n # ---------------------------------------------------------------------------- #\n # Mask Head\n # ---------------------------------------------------------------------------- #\n _C.MODEL.ROI_MASK_HEAD.NAME = \"MaskRCNNConvUpsampleHead\"\n _C.MODEL.ROI_MASK_HEAD.NUM_CONV = 4 # The number of convs in the mask head\n _C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 7\n\n # ---------------------------------------------------------------------------- #\n # ResNe[X]t options (ResNets = {ResNet, ResNeXt}\n # Note that parts of a resnet may be used for both the backbone and the head\n # These options apply to both\n # ---------------------------------------------------------------------------- #\n _C.MODEL.RESNETS.DEPTH = 101\n _C.MODEL.RESNETS.SIZES = [[32], [64], [128], [256], [512]]\n _C.MODEL.RESNETS.ASPECT_RATIOS = [[0.5, 1.0, 2.0]]\n _C.MODEL.RESNETS.OUT_FEATURES = [\"res2\", \"res3\", \"res4\", \"res5\"] # res4 for C4 backbone, res2..5 for FPN backbone\n\n # Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt\n _C.MODEL.RESNETS.NUM_GROUPS = 32\n\n # Baseline width of each group.\n # Scaling this parameters will scale the width of all bottleneck layers.\n _C.MODEL.RESNETS.WIDTH_PER_GROUP = 8\n\n # Place the stride 2 conv on the 1x1 filter\n # Use True only for the original MSRA ResNet; use False for C2 and Torch models\n _C.MODEL.RESNETS.STRIDE_IN_1X1 = False" }, { "identifier": "GOSE", "path": "GOSEfinetune/modules/decoders/gose.py", "snippet": "class GOSE(nn.Module):\n def __init__(self, args):\n super().__init__()\n #(rounds,num_heads)\n # self.rounds = 4\n self.args = args\n self.rounds = args.rounds+1\n self.norm = False\n if args.backbone_name == 'lilt':\n self.hidden_size = 960\n elif args.backbone_name == 'xlm':\n self.hidden_size = 768\n self.hidden_dropout_prob = 0.5\n #默认only-mean pooling\n self.pooling_mode = args.pooling_mode\n self.use_gam = args.use_gam\n self.loss_fct = CrossEntropyLoss()\n self.use_prefix = args.use_prefix\n #---对global-attention使用稀疏注意力\n self.use_global_mask = args.use_global_mask\n #--------\n self.use_gate = args.use_gate\n print(f\"**********************************Backbone: {args.backbone_name}****************************\")\n print(f\"**********************************Use_GAM: {self.use_gam}************************************\")\n print(f\"**********************************Use_Prefix: {self.use_prefix}********************************\")\n print(f\"**********************************Use_Gate: {self.use_gate}************************************\")\n # print(f\"**********************************Use_Global_Mask: {self.use_global_mask}**********************\")\n print(f\"**********************************Pooling_Mode: {self.pooling_mode}****************************\")\n print(f\"**********************************Iterative_Rounds: {self.rounds-1}****************************\")\n print(f\"**************************************************************\")\n print(f\"**********************************No_Iteration: {self.args.no_it}********************************\")\n print(f\"**********************************No_Global: {self.args.no_global}********************************\")\n print(f\"**********************************Window_size: {self.args.window_size}********************************\")\n # self.mode = 'only-mean'\n # self.mode = 'only-max'\n # self.mode = 'attn-max'\n\n\n \n self.dropout = nn.Dropout(self.hidden_dropout_prob)\n self.elu=nn.ELU()\n self.biaffine = BiaffineAttention(self.hidden_size//2 , 2)\n self.ffn = nn.Linear(2, self.hidden_size//2)\n self.ffn_key = nn.Linear(self.hidden_size, self.hidden_size//2)\n self.ffn_value = nn.Linear(self.hidden_size, self.hidden_size//2)\n\n # attention config\n self.dim = self.hidden_size //2\n self.num_heads = 1\n self.num_tokens = 8 # max_len = 8\n self.window_size = args.window_size # 8 # window_size * S = H \n self.qkv_bias = False\n self.drop = 0\n self.attn_drop = 0\n self.drop_path = 0\n self.max_len = args.max_len #64\n self.norm1 = nn.LayerNorm(self.dim)\n self.norm2 = nn.LayerNorm(self.dim)\n self.global_token_num = args.global_token_num\n print(f\"**********************************Global_token: {self.global_token_num}****************************\")\n self.global_token = nn.Parameter(torch.zeros(1, self.global_token_num, self.hidden_size //2))\n self.attn = Attention(self.dim,num_heads=self.num_heads, num_tokens=self.num_tokens, \n window_size=self.window_size,qkv_bias=self.qkv_bias, \n attn_drop=self.attn_drop, proj_drop=self.drop, args=args)\n\n self.cnt = 0\n self.loss_fcn = CrossEntropyLoss()\n self.normal = True\n self.dummy_vec = nn.Parameter(torch.Tensor(1, self.hidden_size//2))\n nn.init.normal_(self.dummy_vec)\n #----gate\n self.gru = GRU(self.hidden_size//2) \n #---layout-prefix-tuning\n self.axis_dis_fn = nn.Linear(1, self.hidden_size//12)\n self.axis_angle_fn = nn.Linear(1, self.hidden_size//12)\n \n def create_global_mask(self):\n global_mask = torch.zeros(self.global_token_num, self.max_len, self.max_len).cuda()\n step = self.num_tokens\n for idx in range(self.global_token_num):\n row_ids = idx // self.num_tokens\n column_ids = idx % self.num_tokens\n row_start = row_ids * step\n column_start = column_ids * step\n global_mask[idx, row_start:row_start+self.num_tokens,:] = 1\n global_mask[idx, :, column_start:column_start+self.num_tokens] = 1\n return global_mask\n \n def get_entities_kv_index_list(self, entities):\n\n M = self.max_len\n entities_label = entities['label']\n\n entities_key_index = [index for index,label in enumerate(entities_label) if label == 1 ]\n entities_value_index = [index for index,label in enumerate(entities_label) if label == 2 ] \n key_num, value_num = len(entities_key_index),len(entities_value_index)\n '''\n in re.py\n if len(all_possible_relations) == 0:\n all_possible_relations = set([(0, 1)])\n '''\n if key_num * value_num == 0:\n #print(\"all_possible_relations == 0\")\n entities_key_index = [0]\n entities_value_index = [1]\n if key_num > M :\n entities_key_index = entities_key_index[:M]\n self.normal = False\n if value_num > M :\n entities_value_index = entities_value_index[:M]\n self.normal = False\n\n return entities_key_index, entities_value_index\n\n \n def forward(self, hidden_state, entities,relations, bbox):\n #if self.cnt == 30: set the num + 1 which failed\n # from IPython import embed;embed()\n self.cnt += 1\n B ,_ ,H = hidden_state.shape\n M = self.max_len\n device = hidden_state.device\n\n loss = 0\n all_pred_relations = []\n\n # B len(entities)\n # entities_label = torch.stack([torch.tensor(dict['label']) for dict in entities],dim=0)\n # padding to max_len M 64\n \n key_repr_list = []\n value_repr_list = []\n key_mask_list = []\n value_mask_list = []\n key_bbox_list, value_bbox_list = [], []\n for b in range(B):\n #key_repr ~ N,H -> 64,H/2\n #value_repr ~ M,H -> 64,H/2\n if len(entities[b][\"start\"]) <= 2:\n entities[b] = {\"end\": [1, 1], \"label\": [0, 0], \"start\": [0, 0]}\n \n entities_key_index, entities_value_index = self.get_entities_kv_index_list(entities[b])\n entities_first_token_index = torch.tensor(entities[b]['start'])\n \n entities_key_first_token_index = entities_first_token_index[entities_key_index]\n entities_value_first_token_index = entities_first_token_index[entities_value_index]\n key_repr = hidden_state[b][entities_key_first_token_index,:]\n value_repr = hidden_state[b][entities_value_first_token_index,:]\n \n key_num,value_num = key_repr.shape[0],value_repr.shape[0]\n # padding key_repr key_num,H -> max_len,H\n # generate mask shape like max_len,H\n \n key_mask_list.append(torch.tensor([[1.]] * key_num + [[0.]] * (M - key_num),device=device).repeat(1,H//2))\n value_mask_list.append(torch.tensor([[1.]] * value_num + [[0.]] * (M - value_num),device=device).repeat(1,H//2))\n # padding key_repr key_num,H -> max_len,H\n key_repr_list.append(F.pad(key_repr,(0, 0, 0, M - key_num)))\n value_repr_list.append(F.pad(value_repr,(0, 0, 0, M - value_num)))\n #----得到kv实体的bbox\n key_bbox = bbox[b][entities_key_first_token_index]\n value_bbox = bbox[b][entities_value_first_token_index]\n key_bbox_list.append(F.pad(key_bbox,(0, 0, 0, M - key_num)))\n value_bbox_list.append(F.pad(value_bbox,(0, 0, 0, M - value_num)))\n\n # batch max_len hidden_size\n key_repr = torch.stack(key_repr_list,dim=0) \n key_mask = torch.stack(key_mask_list,dim=0)\n \n value_repr = torch.stack(value_repr_list,dim=0)\n value_mask = torch.stack(value_mask_list,dim=0)\n \n\n #key_mask * value_mask -> table_mask B,M,H * B,M,H -> B M M H\n table_mask = key_mask.unsqueeze(2).repeat(1,1,M,1)\\\n *value_mask.unsqueeze(1).repeat(1,M,1,1)\n #---global_mask\n if self.use_global_mask:\n self.global_mask = self.create_global_mask()\n global_mask = self.global_mask.unsqueeze(0).repeat(B,1,1,1) #shape[bsz,global_token_num,M,M]\n # global_mask = global_mask.view(B, self.global_token_num, -1)\n else:\n global_mask = None\n \n \n key_mask = key_mask[:,:,0].bool()\n value_mask = value_mask[:,:,0].bool()\n key_ffn = self.ffn_key(key_repr)\n value_ffn = self.ffn_value(value_repr)\n \n if self.norm == True:\n key_ffn = self.norm1(key_repr)\n value_ffn = self.norm1(value_repr)\n global_token = self.global_token.expand(B, -1, -1)\n key_bbox = torch.stack(key_bbox_list, dim=0) \n value_bbox = torch.stack(value_bbox_list, dim=0) \n layout_repr = self.calc_layout(key_bbox, value_bbox)\n layout_repr = layout_repr * table_mask\n layout_repr = layout_repr.view(B,M*M,H//2)\n for i in range(self.rounds):\n '''\n method 1 with biaffine \n \n table_mask.shape B M M H/2 -> B M M H (M=64)\n table_logits.shape B M M H/2 -> B M M 2\n B M M 2 -> B M M H\n attention input B (64+1)*64 384\n table input 64 * 64 \n window_size 8\n token_num 64/8 * 64/8 = 64\n '''\n #key_ffn = self.ffn_key(key_repr)\n #value_ffn = self.ffn_value(value_repr)\n #key_ffn = self.ffn_key(key_ffn)\n #value_ffn = self.ffn_value(value_ffn)\n \n table_logits = self.biaffine(key_ffn.unsqueeze(2).repeat(1,1,M,1),\n value_ffn.unsqueeze(1).repeat(1,M,1,1))\n if i < self.rounds-1:\n table_logits = self.ffn(table_logits) * table_mask\n \n if self.use_gam:\n table_logits = table_logits.view(B,M*M,H//2)\n \n table_logits = torch.cat((global_token, table_logits), dim=1)\n if self.use_prefix:\n table_logits = self.attn(table_logits, M, M, table_mask, global_mask, layout_prefix=layout_repr, key_num=key_num, value_num=value_num)\n else:\n table_logits = self.attn(table_logits, M, M, table_mask, global_mask, layout_prefix=None)\n global_token_new = table_logits[:,:self.global_token_num,:]\n global_token = global_token + global_token_new\n table_logits = table_logits[:,self.global_token_num:,:]\n table_logits = table_logits.view(B,M,M,H//2)\n table_logits = table_logits * table_mask\n key_new, value_new = self.get_new_repr(table_logits, key_mask, value_mask)\n if self.norm == True:\n key_new = self.norm2(key_new)\n value_new = self.norm2(value_new)\n if self.use_gate:\n key_ffn = self.gru(key_ffn,key_new)\n value_ffn = self.gru(value_ffn,value_new)\n \n elif self.args.no_it:\n key_ffn = key_new\n value_ffn = value_new\n elif self.args.use_add:\n key_ffn = key_ffn + key_new\n value_ffn = value_ffn + value_new \n else:\n table_logits = table_logits * table_mask[:,:,:,:2]\n\n # table_logits M N 2\n # table_logits.unsqueeze(0)\n # batch_table_logits = table_logits if batch_table_logits == None else torch.cat((batch_table_logits,table_logits),dim=0)\n\n loss = self.get_loss(table_logits,entities,relations,key_mask,value_mask)\n all_pred_relations = self.get_predicted_relations(table_logits,entities,key_mask,value_mask, bbox)\n return loss,all_pred_relations\n \n def calc_layout(self, head_bbox, tail_bbox):\n bsz, num, _ = head_bbox.shape\n head_bbox = head_bbox.unsqueeze(2).repeat(1,1,num,1)\n tail_bbox = tail_bbox.unsqueeze(1).repeat(1,num,1,1)\n \n #-----中心点坐标特征\n head_bbox_center = torch.div(torch.cat(((head_bbox[:,:,:,0]+head_bbox[:,:,:,2]).view(-1,1), (head_bbox[:,:,:,1]+head_bbox[:,:,:,3]).view(-1,1)),dim=1), 2)\n tail_bbox_center = torch.div(torch.cat(((tail_bbox[:,:,:,0]+tail_bbox[:,:,:,2]).view(-1,1), (tail_bbox[:,:,:,1]+tail_bbox[:,:,:,3]).view(-1,1)),dim=1), 2)\n head_tail_center_dis, hea_tail_center_angle = self.axis_features(head_bbox_center, tail_bbox_center)\n head_tail_center_dis_feature = self.axis_dis_fn(head_tail_center_dis)\n head_tail_center_angle_feature = self.axis_angle_fn(hea_tail_center_angle)\n #-----左上点坐标特征\n head_bbox_left_top = torch.cat((head_bbox[:,:,:, 0].view(-1,1), head_bbox[:,:,:, 1].view(-1,1)), dim=1)\n tail_bbox_left_top = torch.cat((tail_bbox[:,:,:, 0].view(-1,1), tail_bbox[:,:,:, 1].view(-1,1)), dim=1)\n head_tail_lt_dis, hea_tail_lt_angle = self.axis_features(head_bbox_left_top, tail_bbox_left_top)\n head_tail_lt_dis_feature = self.axis_dis_fn(head_tail_lt_dis)\n hea_tail_lt_angle_feature = self.axis_angle_fn(hea_tail_lt_angle)\n #-----右下点坐标特征\n head_bbox_right_down = torch.cat((head_bbox[:,:,:, 2].view(-1,1), head_bbox[:,:,:, 3].view(-1,1)), dim=1)\n tail_bbox_right_down = torch.cat((tail_bbox[:,:,:, 2].view(-1,1), tail_bbox[:,:,:, 3].view(-1,1)), dim=1)\n head_tail_rd_dis, hea_tail_rd_angle = self.axis_features(head_bbox_right_down, tail_bbox_right_down)\n head_tail_rd_dis_feature = self.axis_dis_fn(head_tail_rd_dis)\n hea_tail_rd_angle_feature = self.axis_angle_fn(hea_tail_rd_angle)\n layout_repr = torch.cat(\n (head_tail_center_dis_feature, head_tail_center_angle_feature\n , head_tail_lt_dis_feature, hea_tail_lt_angle_feature\n , head_tail_rd_dis_feature, hea_tail_rd_angle_feature\n ),\n dim=-1\n )\n layout_repr = layout_repr.view(bsz, num, num, -1) \n return layout_repr\n \n \n \n def axis_features(self, tmp_bbox_1, tmp_bbox_2):\n tmp_bbox_distance = torch.pow(torch.sum(torch.pow(tmp_bbox_1 - tmp_bbox_2, 2), dim=1), 0.5) #欧氏距离\n tmp_bbox_distance = tmp_bbox_distance.view(-1, 1)\n ##########计算角度\n head_tail_x = tmp_bbox_1[:, 0] - tmp_bbox_2[:, 0]\n head_tail_y = tmp_bbox_1[:, 1] - tmp_bbox_2[:, 1]\n tmp_bbox_angle = torch.div(torch.atan2(head_tail_y, head_tail_x), 3.1416) #正切的角度\n tmp_bbox_angle = tmp_bbox_angle.view(-1, 1)\n return torch.div(tmp_bbox_distance, 1000), tmp_bbox_angle\n\n \n \n \n def get_new_repr(self, table_logits, key_mask, value_mask):\n key_repr_list = []\n value_repr_list = []\n bs,_,_,_ = table_logits.shape\n for b in range(bs):\n logit = table_logits[b][key_mask[b]]\n logit = logit[:,value_mask[b]]\n key_num, value_num, _ = logit.shape\n if self.pooling_mode == 'max':\n key_repr = logit.max(dim=1).values \n value_repr = logit.max(dim=0).values \n else:\n key_repr = logit.mean(dim=1)\n value_repr = logit.mean(dim=0)\n key_repr_list.append(F.pad(key_repr,(0, 0, 0, self.max_len - key_num)))\n value_repr_list.append(F.pad(value_repr,(0, 0, 0, self.max_len - value_num)))\n key_new = torch.stack(key_repr_list,dim=0) \n value_new = torch.stack(value_repr_list,dim=0)\n return key_new, value_new\n \n def get_predicted_relations(self, logists,entities,key_mask,value_mask,bbox):\n all_pred_relations = []\n #logits.shape B,M,N,2\n #here is one batch so no dim B\n B,N,M,_=logists.shape\n for b in range(B):\n\n pred_relations = []\n logist = logists[b][key_mask[b]]\n logist = logist[:,value_mask[b]]\n N,M,_ = logist.shape\n \n #---index指的是序列中的第几个实体\n entities_key_index, entities_value_index = self.get_entities_kv_index_list(entities[b])\n \n # if len(entities_key_index) > 64 or len(entities_value_index) > 64:\n # from IPython import embed;embed();exit()\n \n for index in range(M*N):\n key = index // M\n value = index % M\n pred_label = logist[key][value].argmax(-1)\n\n if pred_label == 0:\n continue\n \n rel = {}\n rel[\"head_id\"] = entities_key_index[key]\n rel[\"head\"] = (entities[b][\"start\"][rel[\"head_id\"]], entities[b][\"end\"][rel[\"head_id\"]])\n rel[\"head_type\"] = entities[b][\"label\"][rel[\"head_id\"]]\n\n rel[\"tail_id\"] = entities_value_index[value]\n rel[\"tail\"] = (entities[b][\"start\"][rel[\"tail_id\"]], entities[b][\"end\"][rel[\"tail_id\"]])\n rel[\"tail_type\"] = entities[b][\"label\"][rel[\"tail_id\"]]\n rel[\"type\"] = 1\n key_bbox_left_top = bbox[b][entities[b][\"start\"][rel[\"head_id\"]]].tolist()[:2]\n value_bbox_left_top = bbox[b][entities[b][\"start\"][rel[\"tail_id\"]]].tolist()[:2]\n rel[\"link\"] = (tuple(key_bbox_left_top), tuple(value_bbox_left_top))\n #--------\n pred_relations.append(rel)\n all_pred_relations.append(pred_relations)\n \n return all_pred_relations\n \n \n def get_loss(self,logists,entities,relations,key_mask,value_mask):\n #mask B M M H\n device = logists.device\n loss = 0\n B = key_mask.shape[0]\n all_logits = []\n all_labels = []\n for b in range(B):\n # 64,64 -> N,M\n logist = logists[b][key_mask[b]]\n logist = logist[:,value_mask[b]]\n N,M,_ = logist.shape\n\n\n entities_key_index, entities_value_index = self.get_entities_kv_index_list(entities[b])\n \n entities_key_list = relations[b]['head']\n entities_value_list = relations[b]['tail']\n\n labels = torch.zeros(N*M).to(device).view(N,M)\n \n for i in range(len(entities_key_list)):\n try:\n key = entities_key_index.index(entities_key_list[i])\n value = entities_value_index.index(entities_value_list[i])\n labels[key][value] = 1\n except:\n continue\n \n \n labels = labels.view(-1).to(dtype=torch.long)\n logist = logist.view(N*M,-1).to(dtype=torch.float)\n all_logits.append(logist)\n all_labels.append(labels)\n all_logits = torch.cat(all_logits, 0)\n all_labels = torch.cat(all_labels, 0)\n loss = self.loss_fcn(all_logits+1e-10, all_labels)\n if (torch.isnan(loss).sum().item() > 0):\n loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)\n \n return loss" } ]
import math import torch import torch.nn.functional as F import torch.utils.checkpoint import detectron2 from torch import nn from torch.nn import CrossEntropyLoss from detectron2.modeling import META_ARCH_REGISTRY from transformers import PreTrainedModel from transformers.modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, TokenClassifierOutput, ) from transformers.modeling_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from transformers.models.layoutlm.modeling_layoutlm import LayoutLMIntermediate as LayoutLMv2Intermediate from transformers.models.layoutlm.modeling_layoutlm import LayoutLMOutput as LayoutLMv2Output from transformers.models.layoutlm.modeling_layoutlm import LayoutLMPooler as LayoutLMv2Pooler from transformers.models.layoutlm.modeling_layoutlm import LayoutLMSelfOutput as LayoutLMv2SelfOutput from transformers.utils import logging from ...utils import ReOutput from .configuration_layoutlmv2 import LayoutLMv2Config from .detectron2_config import add_layoutlmv2_config from ...modules.decoders.gose import GOSE
10,215
) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_value, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, rel_pos=rel_pos, rel_2d_pos=rel_2d_pos, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, rel_pos=rel_pos, rel_2d_pos=rel_2d_pos, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) class LayoutLMv2PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = LayoutLMv2Config pretrained_model_archive_map = LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST base_model_prefix = "layoutlmv2" _keys_to_ignore_on_load_missing = [r"position_ids"] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, LayoutLMv2LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def my_convert_sync_batchnorm(module, process_group=None): # same as `nn.modules.SyncBatchNorm.convert_sync_batchnorm` but allowing converting from `detectron2.layers.FrozenBatchNorm2d` if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): return nn.modules.SyncBatchNorm.convert_sync_batchnorm(module, process_group) module_output = module if isinstance(module, detectron2.layers.FrozenBatchNorm2d): module_output = torch.nn.SyncBatchNorm( num_features=module.num_features, eps=module.eps, affine=True, track_running_stats=True, process_group=process_group, ) module_output.weight = torch.nn.Parameter(module.weight) module_output.bias = torch.nn.Parameter(module.bias) module_output.running_mean = module.running_mean module_output.running_var = module.running_var module_output.num_batches_tracked = torch.tensor(0, dtype=torch.long, device=module.running_mean.device) for name, child in module.named_children(): module_output.add_module(name, my_convert_sync_batchnorm(child, process_group)) del module return module_output class VisualBackbone(nn.Module): def __init__(self, config): super().__init__() self.cfg = detectron2.config.get_cfg()
# coding=utf-8 logger = logging.get_logger(__name__) LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST = [ "layoutlmv2-base-uncased", "layoutlmv2-large-uncased", ] LayoutLMv2LayerNorm = torch.nn.LayerNorm class LayoutLMv2Embeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super(LayoutLMv2Embeddings, self).__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size) self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size) self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size) self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = LayoutLMv2LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) def _cal_spatial_position_embeddings(self, bbox): try: left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0]) upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1]) right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2]) lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3]) except IndexError as e: raise IndexError("The :obj:`bbox`coordinate values should be within 0-1000 range.") from e h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1]) w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0]) spatial_position_embeddings = torch.cat( [ left_position_embeddings, upper_position_embeddings, right_position_embeddings, lower_position_embeddings, h_position_embeddings, w_position_embeddings, ], dim=-1, ) return spatial_position_embeddings class LayoutLMv2SelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.fast_qkv = config.fast_qkv self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.has_relative_attention_bias = config.has_relative_attention_bias self.has_spatial_attention_bias = config.has_spatial_attention_bias if config.fast_qkv: self.qkv_linear = nn.Linear(config.hidden_size, 3 * self.all_head_size, bias=False) self.q_bias = nn.Parameter(torch.zeros(1, 1, self.all_head_size)) self.v_bias = nn.Parameter(torch.zeros(1, 1, self.all_head_size)) else: self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def compute_qkv(self, hidden_states): if self.fast_qkv: qkv = self.qkv_linear(hidden_states) q, k, v = torch.chunk(qkv, 3, dim=-1) if q.ndimension() == self.q_bias.ndimension(): q = q + self.q_bias v = v + self.v_bias else: _sz = (1,) * (q.ndimension() - 1) + (-1,) q = q + self.q_bias.view(*_sz) v = v + self.v_bias.view(*_sz) else: q = self.query(hidden_states) k = self.key(hidden_states) v = self.value(hidden_states) return q, k, v def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, rel_pos=None, rel_2d_pos=None, ): q, k, v = self.compute_qkv(hidden_states) # (B, L, H*D) -> (B, H, L, D) query_layer = self.transpose_for_scores(q) key_layer = self.transpose_for_scores(k) value_layer = self.transpose_for_scores(v) query_layer = query_layer / math.sqrt(self.attention_head_size) # [BSZ, NAT, L, L] attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.has_relative_attention_bias: attention_scores += rel_pos if self.has_spatial_attention_bias: attention_scores += rel_2d_pos attention_scores = attention_scores.float().masked_fill_(attention_mask.to(torch.bool), float("-inf")) attention_probs = F.softmax(attention_scores, dim=-1, dtype=torch.float32).type_as(value_layer) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class LayoutLMv2Attention(nn.Module): def __init__(self, config): super().__init__() self.self = LayoutLMv2SelfAttention(config) self.output = LayoutLMv2SelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, rel_pos=None, rel_2d_pos=None, ): self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, rel_pos=rel_pos, rel_2d_pos=rel_2d_pos, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class LayoutLMv2Layer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = LayoutLMv2Attention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added" self.crossattention = LayoutLMv2Attention(config) self.intermediate = LayoutLMv2Intermediate(config) self.output = LayoutLMv2Output(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, rel_pos=None, rel_2d_pos=None, ): # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, rel_pos=rel_pos, rel_2d_pos=rel_2d_pos, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: assert hasattr( self, "crossattention" ), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`" # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output def relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): ret = 0 if bidirectional: num_buckets //= 2 ret += (relative_position > 0).long() * num_buckets n = torch.abs(relative_position) else: n = torch.max(-relative_position, torch.zeros_like(relative_position)) # now n is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = n < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance val_if_large = max_exact + ( torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1)) ret += torch.where(is_small, n, val_if_large) return ret class LayoutLMv2Encoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([LayoutLMv2Layer(config) for _ in range(config.num_hidden_layers)]) self.has_relative_attention_bias = config.has_relative_attention_bias self.has_spatial_attention_bias = config.has_spatial_attention_bias if self.has_relative_attention_bias: self.rel_pos_bins = config.rel_pos_bins self.max_rel_pos = config.max_rel_pos self.rel_pos_onehot_size = config.rel_pos_bins self.rel_pos_bias = nn.Linear(self.rel_pos_onehot_size, config.num_attention_heads, bias=False) if self.has_spatial_attention_bias: self.max_rel_2d_pos = config.max_rel_2d_pos self.rel_2d_pos_bins = config.rel_2d_pos_bins self.rel_2d_pos_onehot_size = config.rel_2d_pos_bins self.rel_pos_x_bias = nn.Linear(self.rel_2d_pos_onehot_size, config.num_attention_heads, bias=False) self.rel_pos_y_bias = nn.Linear(self.rel_2d_pos_onehot_size, config.num_attention_heads, bias=False) def _cal_1d_pos_emb(self, hidden_states, position_ids): rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1) rel_pos = relative_position_bucket( rel_pos_mat, num_buckets=self.rel_pos_bins, max_distance=self.max_rel_pos, ) rel_pos = F.one_hot(rel_pos, num_classes=self.rel_pos_onehot_size).type_as(hidden_states) rel_pos = self.rel_pos_bias(rel_pos).permute(0, 3, 1, 2) rel_pos = rel_pos.contiguous() return rel_pos def _cal_2d_pos_emb(self, hidden_states, bbox): position_coord_x = bbox[:, :, 0] position_coord_y = bbox[:, :, 3] rel_pos_x_2d_mat = position_coord_x.unsqueeze(-2) - position_coord_x.unsqueeze(-1) rel_pos_y_2d_mat = position_coord_y.unsqueeze(-2) - position_coord_y.unsqueeze(-1) rel_pos_x = relative_position_bucket( rel_pos_x_2d_mat, num_buckets=self.rel_2d_pos_bins, max_distance=self.max_rel_2d_pos, ) rel_pos_y = relative_position_bucket( rel_pos_y_2d_mat, num_buckets=self.rel_2d_pos_bins, max_distance=self.max_rel_2d_pos, ) rel_pos_x = F.one_hot(rel_pos_x, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states) rel_pos_y = F.one_hot(rel_pos_y, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states) rel_pos_x = self.rel_pos_x_bias(rel_pos_x).permute(0, 3, 1, 2) rel_pos_y = self.rel_pos_y_bias(rel_pos_y).permute(0, 3, 1, 2) rel_pos_x = rel_pos_x.contiguous() rel_pos_y = rel_pos_y.contiguous() rel_2d_pos = rel_pos_x + rel_pos_y return rel_2d_pos def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, bbox=None, position_ids=None, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None rel_pos = self._cal_1d_pos_emb(hidden_states, position_ids) if self.has_relative_attention_bias else None rel_2d_pos = self._cal_2d_pos_emb(hidden_states, bbox) if self.has_spatial_attention_bias else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if getattr(self.config, "gradient_checkpointing", False) and self.training: if use_cache: logger.warn( "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting " "`use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_value, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, rel_pos=rel_pos, rel_2d_pos=rel_2d_pos, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, rel_pos=rel_pos, rel_2d_pos=rel_2d_pos, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) class LayoutLMv2PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = LayoutLMv2Config pretrained_model_archive_map = LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST base_model_prefix = "layoutlmv2" _keys_to_ignore_on_load_missing = [r"position_ids"] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, LayoutLMv2LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def my_convert_sync_batchnorm(module, process_group=None): # same as `nn.modules.SyncBatchNorm.convert_sync_batchnorm` but allowing converting from `detectron2.layers.FrozenBatchNorm2d` if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): return nn.modules.SyncBatchNorm.convert_sync_batchnorm(module, process_group) module_output = module if isinstance(module, detectron2.layers.FrozenBatchNorm2d): module_output = torch.nn.SyncBatchNorm( num_features=module.num_features, eps=module.eps, affine=True, track_running_stats=True, process_group=process_group, ) module_output.weight = torch.nn.Parameter(module.weight) module_output.bias = torch.nn.Parameter(module.bias) module_output.running_mean = module.running_mean module_output.running_var = module.running_var module_output.num_batches_tracked = torch.tensor(0, dtype=torch.long, device=module.running_mean.device) for name, child in module.named_children(): module_output.add_module(name, my_convert_sync_batchnorm(child, process_group)) del module return module_output class VisualBackbone(nn.Module): def __init__(self, config): super().__init__() self.cfg = detectron2.config.get_cfg()
add_layoutlmv2_config(self.cfg)
2
2023-10-19 14:36:32+00:00
12k
mklissa/dceo
dopamine/discrete_domains/run_experiment.py
[ { "identifier": "dqn_agent", "path": "dopamine/agents/dqn/dqn_agent.py", "snippet": "NATURE_DQN_OBSERVATION_SHAPE = atari_lib.NATURE_DQN_OBSERVATION_SHAPE\nNATURE_DQN_DTYPE = atari_lib.NATURE_DQN_DTYPE\nNATURE_DQN_STACK_SIZE = atari_lib.NATURE_DQN_STACK_SIZE\ndef linearly_decaying_epsilon(decay_period, step, warmup_steps, epsilon):\ndef identity_epsilon(unused_decay_period, unused_step, unused_warmup_steps,\n epsilon):\n def __init__(self,\n sess,\n num_actions,\n observation_shape=atari_lib.NATURE_DQN_OBSERVATION_SHAPE,\n observation_dtype=atari_lib.NATURE_DQN_DTYPE,\n stack_size=atari_lib.NATURE_DQN_STACK_SIZE,\n network=atari_lib.NatureDQNNetwork,\n gamma=0.99,\n update_horizon=1,\n min_replay_history=20000,\n update_period=4,\n target_update_period=8000,\n epsilon_fn=linearly_decaying_epsilon,\n epsilon_train=0.01,\n epsilon_eval=0.001,\n epsilon_decay_period=250000,\n tf_device='/cpu:*',\n eval_mode=False,\n use_staging=False,\n max_tf_checkpoints_to_keep=4,\n optimizer=tf.compat.v1.train.RMSPropOptimizer(\n learning_rate=0.00025,\n decay=0.95,\n momentum=0.0,\n epsilon=0.00001,\n centered=True),\n summary_writer=None,\n summary_writing_frequency=500,\n allow_partial_reload=False):\n def _create_network(self, name):\n def _build_networks(self):\n def _build_replay_buffer(self, use_staging):\n def _build_target_q_op(self):\n def _build_train_op(self):\n def _build_sync_op(self):\n def begin_episode(self, observation):\n def step(self, reward, observation):\n def end_episode(self, reward):\n def _select_action(self):\n def _train_step(self):\n def _record_observation(self, observation):\n def _store_transition(self, last_observation, action, reward, is_terminal):\n def _reset_state(self):\n def bundle_and_checkpoint(self, checkpoint_dir, iteration_number):\n def unbundle(self, checkpoint_dir, iteration_number, bundle_dictionary):\nclass DQNAgent(object):" }, { "identifier": "implicit_quantile_agent", "path": "dopamine/agents/implicit_quantile/implicit_quantile_agent.py", "snippet": "class ImplicitQuantileAgent(rainbow_agent.RainbowAgent):\n def __init__(self,\n sess,\n num_actions,\n network=atari_lib.ImplicitQuantileNetwork,\n kappa=1.0,\n num_tau_samples=32,\n num_tau_prime_samples=32,\n num_quantile_samples=32,\n quantile_embedding_dim=64,\n double_dqn=False,\n summary_writer=None,\n summary_writing_frequency=500):\n def _create_network(self, name):\n def _build_networks(self):\n def _build_target_quantile_values_op(self):\n def _build_train_op(self):" }, { "identifier": "rainbow_agent", "path": "dopamine/agents/rainbow/rainbow_agent.py", "snippet": "class RainbowAgent(dqn_agent.DQNAgent):\n def __init__(self,\n sess,\n num_actions,\n observation_shape=dqn_agent.NATURE_DQN_OBSERVATION_SHAPE,\n observation_dtype=dqn_agent.NATURE_DQN_DTYPE,\n stack_size=dqn_agent.NATURE_DQN_STACK_SIZE,\n network=atari_lib.RainbowNetwork,\n num_atoms=51,\n vmin=None,\n vmax=10.,\n gamma=0.99,\n update_horizon=1,\n min_replay_history=20000,\n update_period=4,\n target_update_period=8000,\n epsilon_fn=dqn_agent.linearly_decaying_epsilon,\n epsilon_train=0.01,\n epsilon_eval=0.001,\n epsilon_decay_period=250000,\n replay_scheme='prioritized',\n tf_device='/cpu:*',\n use_staging=False,\n optimizer=tf.compat.v1.train.AdamOptimizer(\n learning_rate=0.00025, epsilon=0.0003125),\n summary_writer=None,\n summary_writing_frequency=500):\n def _create_network(self, name):\n def _build_replay_buffer(self, use_staging):\n def _build_target_distribution(self):\n def _build_train_op(self):\n def _store_transition(self,\n last_observation,\n action,\n reward,\n is_terminal,\n priority=None):\ndef project_distribution(supports, weights, target_support,\n validate_args=False):" }, { "identifier": "atari_lib", "path": "dopamine/discrete_domains/atari_lib.py", "snippet": "NATURE_DQN_OBSERVATION_SHAPE = (84, 84) # Size of downscaled Atari 2600 frame.\nNATURE_DQN_DTYPE = tf.uint8 # DType of Atari 2600 observations.\nNATURE_DQN_STACK_SIZE = 4 # Number of frames in the state stack.\ndef create_atari_environment(game_name=None, sticky_actions=True):\ndef maybe_transform_variable_names(variables, legacy_checkpoint_load=False):\n def __init__(self, num_actions, name=None):\n def call(self, state):\n def __init__(self, num_actions, num_atoms, support, name=None):\n def kernel_initializer():\n def call(self, state):\n def __init__(self, num_actions, quantile_embedding_dim, name=None):\n def kernel_initializer():\n def call(self, state, num_quantiles):\n def __init__(self, environment, frame_skip=4, terminal_on_life_loss=False,\n screen_size=84):\n def observation_space(self):\n def action_space(self):\n def reward_range(self):\n def metadata(self):\n def close(self):\n def reset(self):\n def render(self, mode):\n def step(self, action):\n def _fetch_grayscale_observation(self, output):\n def _pool_and_resize(self):\nclass NatureDQNNetwork(tf.keras.Model):\nclass RainbowNetwork(tf.keras.Model):\nclass ImplicitQuantileNetwork(tf.keras.Model):\nclass AtariPreprocessing(object):" }, { "identifier": "checkpointer", "path": "dopamine/discrete_domains/checkpointer.py", "snippet": "def get_latest_checkpoint_number(base_directory,\n override_number=None,\n sentinel_file_identifier='checkpoint'):\n def extract_iteration(x):\n def __init__(self, base_directory, checkpoint_file_prefix='ckpt',\n sentinel_file_identifier='checkpoint', checkpoint_frequency=1,\n checkpoint_duration=4,\n keep_every=None):\n def _generate_filename(self, file_prefix, iteration_number):\n def _save_data_to_file(self, data, filename):\n def save_checkpoint(self, iteration_number, data):\n def _clean_up_old_checkpoints(self, iteration_number):\n def _load_data_from_file(self, filename):\n def load_checkpoint(self, iteration_number):\nclass Checkpointer(object):" }, { "identifier": "iteration_statistics", "path": "dopamine/discrete_domains/iteration_statistics.py", "snippet": "class IterationStatistics(object):\n def __init__(self):\n def append(self, data_pairs):" }, { "identifier": "logger", "path": "dopamine/discrete_domains/logger.py", "snippet": "class Logger(object):\n def __init__(self, logging_dir, logs_duration=4):\n def __setitem__(self, key, value):\n def _generate_filename(self, filename_prefix, iteration_number):\n def log_to_file(self, filename_prefix, iteration_number):\n def is_logging_enabled(self):" }, { "identifier": "dqn_agent", "path": "dopamine/jax/agents/dqn/dqn_agent.py", "snippet": "NATURE_DQN_OBSERVATION_SHAPE = dqn_agent.NATURE_DQN_OBSERVATION_SHAPE\nNATURE_DQN_DTYPE = jnp.uint8\nNATURE_DQN_STACK_SIZE = dqn_agent.NATURE_DQN_STACK_SIZE\ndef create_optimizer(name='adam', learning_rate=6.25e-5, beta1=0.9, beta2=0.999,\n eps=1.5e-4, centered=False):\ndef train(network_def, online_params, target_params, optimizer, optimizer_state,\n states, actions, next_states, rewards, terminals, cumulative_gamma,\n loss_type='mse'):\n def loss_fn(params, target):\n def q_online(state):\n def q_target(state):\ndef target_q(target_network, next_states, rewards, terminals, cumulative_gamma):\ndef linearly_decaying_epsilon(decay_period, step, warmup_steps, epsilon):\ndef select_action(network_def, params, state, rng, num_actions, eval_mode,\n epsilon_eval, epsilon_train, epsilon_decay_period,\n training_steps, min_replay_history, epsilon_fn):\n def __init__(self,\n num_actions,\n observation_shape=NATURE_DQN_OBSERVATION_SHAPE,\n observation_dtype=NATURE_DQN_DTYPE,\n stack_size=NATURE_DQN_STACK_SIZE,\n network=networks.NatureDQNNetwork,\n gamma=0.99,\n update_horizon=1,\n min_replay_history=20000,\n update_period=4,\n target_update_period=8000,\n epsilon_fn=linearly_decaying_epsilon,\n epsilon_train=0.01,\n epsilon_eval=0.001,\n epsilon_decay_period=250000,\n eval_mode=False,\n optimizer='adam',\n summary_writer=None,\n summary_writing_frequency=500,\n allow_partial_reload=False,\n seed=None,\n loss_type='mse',\n preprocess_fn=None,\n collector_allowlist=('tensorboard',)):\n def _build_networks_and_optimizer(self):\n def _build_replay_buffer(self):\n def _sample_from_replay_buffer(self):\n def _sync_weights(self):\n def _reset_state(self):\n def _record_observation(self, observation):\n def begin_episode(self, observation):\n def step(self, reward, observation):\n def end_episode(self, reward, terminal=True):\n def _train_step(self):\n def _store_transition(self,\n last_observation,\n action,\n reward,\n is_terminal,\n *args,\n priority=None,\n episode_end=False):\n def bundle_and_checkpoint(self, checkpoint_dir, iteration_number):\n def unbundle(self, checkpoint_dir, iteration_number, bundle_dictionary):\n def set_collector_dispatcher(self, collector_dispatcher):\nclass JaxDQNAgent(object):" }, { "identifier": "full_rainbow_agent", "path": "dopamine/jax/agents/full_rainbow/full_rainbow_agent.py", "snippet": "def zero_epsilon(unused_decay_period, unused_step, unused_warmup_steps,\n unused_epsilon):\ndef select_action(network_def, params, state, rng, num_actions, eval_mode,\n epsilon_eval, epsilon_train, epsilon_decay_period,\n training_steps, min_replay_history, epsilon_fn, support):\ndef get_logits(model, states, rng):\ndef get_q_values(model, states, rng):\ndef train(network_def, online_params, target_params, optimizer, optimizer_state,\n states, actions, next_states, rewards, terminals, loss_weights,\n support, cumulative_gamma, double_dqn, distributional, mse_loss, rng):\n def q_online(state, key):\n def q_target(state, key):\n def loss_fn(params, target, loss_multipliers):\n def q_online(state, key):\ndef target_output(model, target_network, next_states, rewards, terminals,\n support, cumulative_gamma, double_dqn, distributional, rng):\n def __init__(self,\n num_actions,\n noisy=True,\n dueling=True,\n double_dqn=True,\n distributional=True,\n mse_loss=False,\n num_updates_per_train_step=1,\n network=networks.FullRainbowNetwork,\n num_atoms=51,\n vmax=10.,\n vmin=None,\n epsilon_fn=dqn_agent.linearly_decaying_epsilon,\n replay_scheme='prioritized',\n summary_writer=None,\n seed=None,\n preprocess_fn=None):\n def _build_networks_and_optimizer(self):\n def _build_replay_buffer(self):\n def _training_step_update(self):\n def _store_transition(self,\n last_observation,\n action,\n reward,\n is_terminal,\n *args,\n priority=None,\n episode_end=False):\n def _train_step(self):\n def begin_episode(self, observation):\n def step(self, reward, observation):\nclass JaxFullRainbowAgent(dqn_agent.JaxDQNAgent):" }, { "identifier": "full_rainbow_dceo", "path": "dopamine/jax/agents/full_rainbow/full_rainbow_dceo.py", "snippet": "class Option:\nclass JaxFullRainbowAgentDCEO(dqn_agent.JaxDQNAgent):\n def __init__(self, online_params, target_network_params, optimizer_state):\ndef zero_epsilon(unused_decay_period, unused_step, unused_warmup_steps,\n unused_epsilon):\ndef act(network_def, params, state, rng, \n num_actions, eval_mode, support, epsilon):\ndef select_action(network_def, params, state, rng, num_actions, eval_mode,\n epsilon_eval, epsilon_train, epsilon_decay_period,\n training_steps, min_replay_history, epsilon_fn, support,\n option_term, option_prob, dur, cur_opt, num_options, options):\ndef get_logits(model, states, rng):\ndef get_q_values(model, states, rng):\ndef train(network_def, online_params, target_params, optimizer, optimizer_state,\n states, actions, next_states, rewards, terminals, loss_weights,\n support, cumulative_gamma, double_dqn, distributional, mse_loss, rng):\n def q_online(state, key):\n def q_target(state, key):\n def loss_fn(params, target, loss_multipliers):\n def q_online(state, key):\ndef target_output(model, target_network, next_states, rewards, terminals,\n support, cumulative_gamma, double_dqn, distributional, rng):\n def __init__(self,\n num_actions,\n noisy=False,\n dueling=True,\n double_dqn=True,\n distributional=True,\n mse_loss=False,\n num_updates_per_train_step=1,\n network=networks.FullRainbowNetwork,\n rep_network=networks.NatureDQNNetwork,\n num_atoms=51,\n vmax=10.,\n vmin=None,\n epsilon_fn=dqn_agent.linearly_decaying_epsilon,\n replay_scheme='prioritized',\n summary_writer=None,\n seed=None,\n preprocess_fn=None,\n num_options=0,\n option_prob=0.0,\n option_duration=10,\n rep_dim=10,\n log_transform=True):\n def get_rep(params, state):\n def neg_loss_fn(phi_u, phi_v):\n def train_rep(rep_params, optimizer, optimizer_state, states):\n def loss_fn(params):\n def rep_online(state):\n def _build_networks_and_optimizer(self):\n def _build_replay_buffer(self):\n def _training_step_update(self):\n def get_loss_weights(self,):\n def _sync_option_weights(self, option):\n def _rep_sample_from_replay_buffer(self,):\n def _store_transition(self,\n last_observation,\n action,\n reward,\n is_terminal,\n *args,\n priority=None,\n episode_end=False):\n def _train_step(self):\n def begin_episode(self, observation):\n def step(self, reward, observation):" }, { "identifier": "implicit_quantile_agent", "path": "dopamine/jax/agents/implicit_quantile/implicit_quantile_agent.py", "snippet": "def target_quantile_values(network_def, online_params, target_params,\n next_states, rewards, terminals,\n num_tau_prime_samples, num_quantile_samples,\n cumulative_gamma, double_dqn, rng):\ndef train(network_def, online_params, target_params, optimizer, optimizer_state,\n states, actions, next_states, rewards, terminals, num_tau_samples,\n num_tau_prime_samples, num_quantile_samples, cumulative_gamma,\n double_dqn, kappa, rng):\n def loss_fn(params, rng_input, target_quantile_vals):\n def online(state, key):\ndef select_action(network_def, params, state, rng, num_quantile_samples,\n num_actions, eval_mode, epsilon_eval, epsilon_train,\n epsilon_decay_period, training_steps, min_replay_history,\n epsilon_fn):\n def __init__(self,\n num_actions,\n observation_shape=dqn_agent.NATURE_DQN_OBSERVATION_SHAPE,\n observation_dtype=dqn_agent.NATURE_DQN_DTYPE,\n stack_size=dqn_agent.NATURE_DQN_STACK_SIZE,\n network=networks.ImplicitQuantileNetwork,\n kappa=1.0,\n num_tau_samples=32,\n num_tau_prime_samples=32,\n num_quantile_samples=32,\n quantile_embedding_dim=64,\n double_dqn=False,\n gamma=0.99,\n update_horizon=1,\n min_replay_history=20000,\n update_period=4,\n target_update_period=8000,\n epsilon_fn=dqn_agent.linearly_decaying_epsilon,\n epsilon_train=0.01,\n epsilon_eval=0.001,\n epsilon_decay_period=250000,\n replay_scheme='prioritized',\n optimizer='adam',\n summary_writer=None,\n summary_writing_frequency=500,\n seed=None):\n def _build_networks_and_optimizer(self):\n def begin_episode(self, observation):\n def step(self, reward, observation):\n def _train_step(self):\nclass JaxImplicitQuantileAgent(dqn_agent.JaxDQNAgent):" }, { "identifier": "quantile_agent", "path": "dopamine/jax/agents/quantile/quantile_agent.py", "snippet": "def target_distribution(target_network, next_states, rewards, terminals,\n cumulative_gamma):\ndef train(network_def, online_params, target_params, optimizer, optimizer_state,\n states, actions, next_states, rewards, terminals, kappa, num_atoms,\n cumulative_gamma):\n def loss_fn(params, target):\n def q_online(state):\n def q_target(state):\n def __init__(self,\n num_actions,\n observation_shape=dqn_agent.NATURE_DQN_OBSERVATION_SHAPE,\n observation_dtype=dqn_agent.NATURE_DQN_DTYPE,\n stack_size=dqn_agent.NATURE_DQN_STACK_SIZE,\n network=networks.QuantileNetwork,\n kappa=1.0,\n num_atoms=200,\n gamma=0.99,\n update_horizon=1,\n min_replay_history=50000,\n update_period=4,\n target_update_period=10000,\n epsilon_fn=dqn_agent.linearly_decaying_epsilon,\n epsilon_train=0.1,\n epsilon_eval=0.05,\n epsilon_decay_period=1000000,\n replay_scheme='prioritized',\n optimizer='adam',\n summary_writer=None,\n summary_writing_frequency=500,\n seed=None,\n allow_partial_reload=False):\n def _build_networks_and_optimizer(self):\n def _build_replay_buffer(self):\n def _train_step(self):\nclass JaxQuantileAgent(dqn_agent.JaxDQNAgent):" }, { "identifier": "rainbow_agent", "path": "dopamine/jax/agents/rainbow/rainbow_agent.py", "snippet": "def train(network_def, online_params, target_params, optimizer, optimizer_state,\n states, actions, next_states, rewards, terminals, loss_weights,\n support, cumulative_gamma):\n def loss_fn(params, target, loss_multipliers):\n def q_online(state):\n def q_target(state):\ndef target_distribution(target_network, next_states, rewards, terminals,\n support, cumulative_gamma):\ndef select_action(network_def, params, state, rng, num_actions, eval_mode,\n epsilon_eval, epsilon_train, epsilon_decay_period,\n training_steps, min_replay_history, epsilon_fn, support):\n def __init__(self,\n num_actions,\n observation_shape=dqn_agent.NATURE_DQN_OBSERVATION_SHAPE,\n observation_dtype=dqn_agent.NATURE_DQN_DTYPE,\n stack_size=dqn_agent.NATURE_DQN_STACK_SIZE,\n network=networks.RainbowNetwork,\n num_atoms=51,\n vmin=None,\n vmax=10.,\n gamma=0.99,\n update_horizon=1,\n min_replay_history=20000,\n update_period=4,\n target_update_period=8000,\n epsilon_fn=dqn_agent.linearly_decaying_epsilon,\n epsilon_train=0.01,\n epsilon_eval=0.001,\n epsilon_decay_period=250000,\n replay_scheme='prioritized',\n optimizer='adam',\n seed=None,\n summary_writer=None,\n summary_writing_frequency=500,\n allow_partial_reload=False):\n def _build_networks_and_optimizer(self):\n def _build_replay_buffer(self):\n def begin_episode(self, observation):\n def step(self, reward, observation):\n def _train_step(self):\ndef project_distribution(supports, weights, target_support):\nclass JaxRainbowAgent(dqn_agent.JaxDQNAgent):" }, { "identifier": "collector_dispatcher", "path": "dopamine/metrics/collector_dispatcher.py", "snippet": "AVAILABLE_COLLECTORS = {\n 'console': console_collector.ConsoleCollector,\n 'pickle': pickle_collector.PickleCollector,\n 'tensorboard': tensorboard_collector.TensorboardCollector,\n}\ndef add_collector(name: str, constructor: CollectorConstructorType) -> None:\n def __init__(\n self,\n base_dir: Optional[str],\n # TODO(psc): Consider using sets instead.\n collectors: Sequence[str] = ('console', 'pickle', 'tensorboard')):\n def write(\n self,\n statistics: Sequence[statistics_instance.StatisticsInstance],\n collector_allowlist: Sequence[str] = ()) -> None:\n def flush(self) -> None:\n def close(self) -> None:\nclass CollectorDispatcher(object):" }, { "identifier": "statistics_instance", "path": "dopamine/metrics/statistics_instance.py", "snippet": "class StatisticsInstance:" } ]
import os import sys import time import gin.tf import numpy as np import tensorflow as tf from absl import logging from dopamine.agents.dqn import dqn_agent from dopamine.agents.implicit_quantile import implicit_quantile_agent from dopamine.agents.rainbow import rainbow_agent from dopamine.discrete_domains import atari_lib from dopamine.discrete_domains import checkpointer from dopamine.discrete_domains import iteration_statistics from dopamine.discrete_domains import logger from dopamine.jax.agents.dqn import dqn_agent as jax_dqn_agent from dopamine.jax.agents.full_rainbow import full_rainbow_agent from dopamine.jax.agents.full_rainbow import full_rainbow_dceo from dopamine.jax.agents.implicit_quantile import implicit_quantile_agent as jax_implicit_quantile_agent from dopamine.jax.agents.quantile import quantile_agent as jax_quantile_agent from dopamine.jax.agents.rainbow import rainbow_agent as jax_rainbow_agent from dopamine.metrics import collector_dispatcher from dopamine.metrics import statistics_instance
7,612
max_steps_per_episode=27000, clip_rewards=True, use_legacy_logger=True, fine_grained_print_to_console=True): """Initialize the Runner object in charge of running a full experiment. Args: base_dir: str, the base directory to host all required sub-directories. create_agent_fn: A function that takes as args a Tensorflow session and an environment, and returns an agent. create_environment_fn: A function which receives a problem name and creates a Gym environment for that problem (e.g. an Atari 2600 game). checkpoint_file_prefix: str, the prefix to use for checkpoint files. logging_file_prefix: str, prefix to use for the log files. log_every_n: int, the frequency for writing logs. num_iterations: int, the iteration number threshold (must be greater than start_iteration). training_steps: int, the number of training steps to perform. evaluation_steps: int, the number of evaluation steps to perform. max_steps_per_episode: int, maximum number of steps after which an episode terminates. clip_rewards: bool, whether to clip rewards in [-1, 1]. use_legacy_logger: bool, whether to use the legacy Logger. This will be deprecated soon, replaced with the new CollectorDispatcher setup. fine_grained_print_to_console: bool, whether to print fine-grained progress to console (useful for debugging). This constructor will take the following actions: - Initialize an environment. - Initialize a `tf.compat.v1.Session`. - Initialize a logger. - Initialize an agent. - Reload from the latest checkpoint, if available, and initialize the Checkpointer object. """ assert base_dir is not None self._legacy_logger_enabled = use_legacy_logger self._fine_grained_print_to_console_enabled = fine_grained_print_to_console self._logging_file_prefix = logging_file_prefix self._log_every_n = log_every_n self._num_iterations = num_iterations self._training_steps = training_steps self._evaluation_steps = evaluation_steps self._max_steps_per_episode = max_steps_per_episode self._base_dir = base_dir self._clip_rewards = clip_rewards self._create_directories() self._environment = create_environment_fn() # The agent is now in charge of setting up the session. self._sess = None # We're using a bit of a hack in that we pass in _base_dir instead of an # actually SummaryWriter. This is because the agent is now in charge of the # session, but needs to create the SummaryWriter before creating the ops, # and in order to do so, it requires the base directory. self._agent = create_agent_fn(self._sess, self._environment, summary_writer=self._base_dir) if hasattr(self._agent, '_sess'): self._sess = self._agent._sess self._summary_writer = self._agent.summary_writer self._initialize_checkpointer_and_maybe_resume(checkpoint_file_prefix) # Create a collector dispatcher for metrics reporting. self._collector_dispatcher = collector_dispatcher.CollectorDispatcher( self._base_dir) set_collector_dispatcher_fn = getattr( self._agent, 'set_collector_dispatcher', None) if callable(set_collector_dispatcher_fn): set_collector_dispatcher_fn(self._collector_dispatcher) @property def _use_legacy_logger(self): if not hasattr(self, '_legacy_logger_enabled'): return True return self._legacy_logger_enabled @property def _has_collector_dispatcher(self): if not hasattr(self, '_collector_dispatcher'): return False return True @property def _fine_grained_print_to_console(self): if not hasattr(self, '_fine_grained_print_to_console_enabled'): return True return self._fine_grained_print_to_console_enabled def _create_directories(self): """Create necessary sub-directories.""" self._checkpoint_dir = os.path.join(self._base_dir, 'checkpoints') if self._use_legacy_logger: logging.warning( 'DEPRECATION WARNING: Logger is being deprecated. ' 'Please switch to CollectorDispatcher!') self._logger = logger.Logger(os.path.join(self._base_dir, 'logs')) def _initialize_checkpointer_and_maybe_resume(self, checkpoint_file_prefix): """Reloads the latest checkpoint if it exists. This method will first create a `Checkpointer` object and then call `checkpointer.get_latest_checkpoint_number` to determine if there is a valid checkpoint in self._checkpoint_dir, and what the largest file number is. If a valid checkpoint file is found, it will load the bundled data from this file and will pass it to the agent for it to reload its data. If the agent is able to successfully unbundle, this method will verify that the unbundled data contains the keys,'logs' and 'current_iteration'. It will then load the `Logger`'s data from the bundle, and will return the iteration number keyed by 'current_iteration' as one of the return values (along with the `Checkpointer` object). Args: checkpoint_file_prefix: str, the checkpoint file prefix. Returns: start_iteration: int, the iteration number to start the experiment from. experiment_checkpointer: `Checkpointer` object for the experiment. """
# coding=utf-8 # Copyright 2018 The Dopamine Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module defining classes and helper methods for general agents.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function def load_gin_configs(gin_files, gin_bindings): """Loads gin configuration files. Args: gin_files: list, of paths to the gin configuration files for this experiment. gin_bindings: list, of gin parameter bindings to override the values in the config files. """ gin.parse_config_files_and_bindings(gin_files, bindings=gin_bindings, skip_unknown=False) @gin.configurable def create_agent(sess, environment, agent_name=None, summary_writer=None, debug_mode=False): """Creates an agent. Args: sess: A `tf.compat.v1.Session` object for running associated ops. environment: A gym environment (e.g. Atari 2600). agent_name: str, name of the agent to create. summary_writer: A Tensorflow summary writer to pass to the agent for in-agent training statistics in Tensorboard. debug_mode: bool, whether to output Tensorboard summaries. If set to true, the agent will output in-episode statistics to Tensorboard. Disabled by default as this results in slower training. Returns: agent: An RL agent. Raises: ValueError: If `agent_name` is not in supported list. """ assert agent_name is not None if not debug_mode: summary_writer = None if agent_name.startswith('dqn'): return dqn_agent.DQNAgent(sess, num_actions=environment.action_space.n, summary_writer=summary_writer) elif agent_name == 'rainbow': return rainbow_agent.RainbowAgent( sess, num_actions=environment.action_space.n, summary_writer=summary_writer) elif agent_name == 'implicit_quantile': return implicit_quantile_agent.ImplicitQuantileAgent( sess, num_actions=environment.action_space.n, summary_writer=summary_writer) elif agent_name == 'jax_dqn': return jax_dqn_agent.JaxDQNAgent(num_actions=environment.action_space.n, summary_writer=summary_writer) elif agent_name == 'jax_quantile': return jax_quantile_agent.JaxQuantileAgent( num_actions=environment.action_space.n, summary_writer=summary_writer) elif agent_name == 'jax_rainbow': return jax_rainbow_agent.JaxRainbowAgent( num_actions=environment.action_space.n, summary_writer=summary_writer) elif agent_name == 'full_rainbow': return full_rainbow_agent.JaxFullRainbowAgent( num_actions=environment.action_space.n, summary_writer=summary_writer) elif agent_name == 'full_rainbow_dceo': return full_rainbow_dceo.JaxFullRainbowAgentDCEO( num_actions=environment.action_space.n, summary_writer=summary_writer) elif agent_name == 'jax_implicit_quantile': return jax_implicit_quantile_agent.JaxImplicitQuantileAgent( num_actions=environment.action_space.n, summary_writer=summary_writer) else: raise ValueError('Unknown agent: {}'.format(agent_name)) @gin.configurable def create_runner(base_dir, schedule='continuous_train_and_eval'): """Creates an experiment Runner. Args: base_dir: str, base directory for hosting all subdirectories. schedule: string, which type of Runner to use. Returns: runner: A `Runner` like object. Raises: ValueError: When an unknown schedule is encountered. """ assert base_dir is not None # Continuously runs training and evaluation until max num_iterations is hit. if schedule == 'continuous_train_and_eval': return Runner(base_dir, create_agent) # Continuously runs training until max num_iterations is hit. elif schedule == 'continuous_train': return TrainRunner(base_dir, create_agent) else: raise ValueError('Unknown schedule: {}'.format(schedule)) @gin.configurable class Runner(object): """Object that handles running Dopamine experiments. Here we use the term 'experiment' to mean simulating interactions between the agent and the environment and reporting some statistics pertaining to these interactions. A simple scenario to train a DQN agent is as follows: ```python import dopamine.discrete_domains.atari_lib base_dir = '/tmp/simple_example' def create_agent(sess, environment): return dqn_agent.DQNAgent(sess, num_actions=environment.action_space.n) runner = Runner(base_dir, create_agent, atari_lib.create_atari_environment) runner.run() ``` """ def __init__(self, base_dir, create_agent_fn, create_environment_fn=atari_lib.create_atari_environment, checkpoint_file_prefix='ckpt', logging_file_prefix='log', log_every_n=1, num_iterations=200, training_steps=250000, evaluation_steps=125000, max_steps_per_episode=27000, clip_rewards=True, use_legacy_logger=True, fine_grained_print_to_console=True): """Initialize the Runner object in charge of running a full experiment. Args: base_dir: str, the base directory to host all required sub-directories. create_agent_fn: A function that takes as args a Tensorflow session and an environment, and returns an agent. create_environment_fn: A function which receives a problem name and creates a Gym environment for that problem (e.g. an Atari 2600 game). checkpoint_file_prefix: str, the prefix to use for checkpoint files. logging_file_prefix: str, prefix to use for the log files. log_every_n: int, the frequency for writing logs. num_iterations: int, the iteration number threshold (must be greater than start_iteration). training_steps: int, the number of training steps to perform. evaluation_steps: int, the number of evaluation steps to perform. max_steps_per_episode: int, maximum number of steps after which an episode terminates. clip_rewards: bool, whether to clip rewards in [-1, 1]. use_legacy_logger: bool, whether to use the legacy Logger. This will be deprecated soon, replaced with the new CollectorDispatcher setup. fine_grained_print_to_console: bool, whether to print fine-grained progress to console (useful for debugging). This constructor will take the following actions: - Initialize an environment. - Initialize a `tf.compat.v1.Session`. - Initialize a logger. - Initialize an agent. - Reload from the latest checkpoint, if available, and initialize the Checkpointer object. """ assert base_dir is not None self._legacy_logger_enabled = use_legacy_logger self._fine_grained_print_to_console_enabled = fine_grained_print_to_console self._logging_file_prefix = logging_file_prefix self._log_every_n = log_every_n self._num_iterations = num_iterations self._training_steps = training_steps self._evaluation_steps = evaluation_steps self._max_steps_per_episode = max_steps_per_episode self._base_dir = base_dir self._clip_rewards = clip_rewards self._create_directories() self._environment = create_environment_fn() # The agent is now in charge of setting up the session. self._sess = None # We're using a bit of a hack in that we pass in _base_dir instead of an # actually SummaryWriter. This is because the agent is now in charge of the # session, but needs to create the SummaryWriter before creating the ops, # and in order to do so, it requires the base directory. self._agent = create_agent_fn(self._sess, self._environment, summary_writer=self._base_dir) if hasattr(self._agent, '_sess'): self._sess = self._agent._sess self._summary_writer = self._agent.summary_writer self._initialize_checkpointer_and_maybe_resume(checkpoint_file_prefix) # Create a collector dispatcher for metrics reporting. self._collector_dispatcher = collector_dispatcher.CollectorDispatcher( self._base_dir) set_collector_dispatcher_fn = getattr( self._agent, 'set_collector_dispatcher', None) if callable(set_collector_dispatcher_fn): set_collector_dispatcher_fn(self._collector_dispatcher) @property def _use_legacy_logger(self): if not hasattr(self, '_legacy_logger_enabled'): return True return self._legacy_logger_enabled @property def _has_collector_dispatcher(self): if not hasattr(self, '_collector_dispatcher'): return False return True @property def _fine_grained_print_to_console(self): if not hasattr(self, '_fine_grained_print_to_console_enabled'): return True return self._fine_grained_print_to_console_enabled def _create_directories(self): """Create necessary sub-directories.""" self._checkpoint_dir = os.path.join(self._base_dir, 'checkpoints') if self._use_legacy_logger: logging.warning( 'DEPRECATION WARNING: Logger is being deprecated. ' 'Please switch to CollectorDispatcher!') self._logger = logger.Logger(os.path.join(self._base_dir, 'logs')) def _initialize_checkpointer_and_maybe_resume(self, checkpoint_file_prefix): """Reloads the latest checkpoint if it exists. This method will first create a `Checkpointer` object and then call `checkpointer.get_latest_checkpoint_number` to determine if there is a valid checkpoint in self._checkpoint_dir, and what the largest file number is. If a valid checkpoint file is found, it will load the bundled data from this file and will pass it to the agent for it to reload its data. If the agent is able to successfully unbundle, this method will verify that the unbundled data contains the keys,'logs' and 'current_iteration'. It will then load the `Logger`'s data from the bundle, and will return the iteration number keyed by 'current_iteration' as one of the return values (along with the `Checkpointer` object). Args: checkpoint_file_prefix: str, the checkpoint file prefix. Returns: start_iteration: int, the iteration number to start the experiment from. experiment_checkpointer: `Checkpointer` object for the experiment. """
self._checkpointer = checkpointer.Checkpointer(self._checkpoint_dir,
4
2023-10-15 22:14:16+00:00
12k
LeoQLi/NeuralGF
train_test.py
[ { "identifier": "Network", "path": "network.py", "snippet": "class Network(nn.Module):\n def __init__(self, num_points, num_knn):\n super(Network, self).__init__()\n self.num_points = num_points\n self.num_knn = num_knn\n self.num_iter = 2\n\n self.net = MLPNet_linear(d_in=3, d_mid=256, d_out=1, n_mid=8)\n\n def forward(self, pcl_source):\n \"\"\"\n pcl_source: (*, N, 3)\n \"\"\"\n self.sd_all = []\n self.grad_all = []\n with torch.set_grad_enabled(True):\n pcl_source.requires_grad = True\n sd_temp = torch.zeros_like(pcl_source)[::,0:1]\n grad_temp = torch.zeros_like(pcl_source)\n\n for i in range(self.num_iter):\n pcl_source = pcl_source - sd_temp * grad_temp\n\n sd_temp, grad_temp = self.net.gradient(pcl_source) # (*, N, 1), (*, N, 3)\n self.sd_all.append(sd_temp)\n self.grad_all.append(grad_temp)\n\n if i == 0:\n self.sd = sd_temp\n self.grad_norm = grad_temp\n elif i == 1:\n self.sd1 = sd_temp\n self.grad_norm1 = grad_temp\n elif i == 2:\n self.sd2 = sd_temp\n self.grad_norm2 = grad_temp\n else:\n raise ValueError('Not set value')\n\n self.grad_sum = F.normalize(sum(self.grad_all), dim=-1)\n\n return self.grad_sum\n\n def get_loss(self, pcl_raw=None, pcl_source=None, knn_idx=None):\n \"\"\"\n pcl_raw: (1, M, 3), M >= N\n pcl_source: (1, N+n, 3)\n normal_gt: (1, N, 3)\n knn_idx: (1, N, K)\n \"\"\"\n num_points = self.num_points\n _device, _dtype = pcl_source.device, pcl_source.dtype\n loss_d = torch.zeros(1, device=_device, dtype=_dtype)\n loss_v1 = torch.zeros(1, device=_device, dtype=_dtype)\n loss_v2 = torch.zeros(1, device=_device, dtype=_dtype)\n loss_v3 = torch.zeros(1, device=_device, dtype=_dtype)\n loss_reg1 = torch.zeros(1, device=_device, dtype=_dtype)\n loss_reg2 = torch.zeros(1, device=_device, dtype=_dtype)\n loss_con = torch.zeros(1, device=_device, dtype=_dtype)\n loss_sd = torch.zeros(1, device=_device, dtype=_dtype)\n\n pcl_nn = knn_gather(pcl_raw, knn_idx) # (1, N, K, 3)\n v = pcl_source[:, :num_points, None, :3] - pcl_nn # (1, N, K, 3)\n v1 = v[:,:,:8,:].mean(-2) # (1, N, 3)\n v2 = v[:,:,:4,:].mean(-2) # (1, N, 3)\n v3 = v[:,:,0,:] # (1, N, 3)\n\n pcl_target = torch.cat((pcl_nn[:,:,0,:], pcl_source[:, num_points:, :]), dim=-2)\n\n loss_reg1 = 10 * (self.sd[:, num_points:, :]**2).mean()\n loss_reg2 = 10 * (self.sd1**2).mean() #+ 10 * (self.sd2**2).mean()\n\n weight = torch.exp(-60 * torch.abs(self.sd)).squeeze() # (N,)\n\n loss_v1 = torch.linalg.norm((v1 - (self.sd * self.grad_norm)[:, :num_points, :]), ord=2, dim=-1).mean()\n loss_v2 = torch.linalg.norm((v2 - (self.sd * self.grad_norm)[:, :num_points, :]), ord=2, dim=-1).mean()\n loss_v3 = torch.linalg.norm((v3 - (self.sd * self.grad_norm)[:, :num_points, :]), ord=2, dim=-1).mean()\n\n pcl_source_new = pcl_source - self.sd * self.grad_norm - self.sd1 * self.grad_norm1 #- self.sd2 * self.grad_norm2\n loss_d = 0.3 * torch.linalg.norm((pcl_source_new - pcl_target), ord=2, dim=-1).mean()\n\n cos_ang = cos_angle(self.grad_norm[0, :, :], self.grad_norm1[0, :, :]) # (N,)\n # cos_ang1 = cos_angle(self.grad_norm[0, :, :], self.grad_norm2[0, :, :])\n loss_con = 0.01 * (weight * (1 - cos_ang)).mean() #+ 0.01 * (weight * (1 - cos_ang1)).mean()\n\n # loss_sd = 0.01 * torch.clamp(torch.abs(self.sd + self.sd1)[:, :num_points, :] - torch.linalg.norm(v3, ord=2, dim=-1), min=0.0).mean()\n\n loss_tuple = (loss_v1, loss_v2, loss_v3, loss_d, loss_reg1, loss_reg2, loss_con, loss_sd)\n loss_sum = sum(loss_tuple)\n return loss_sum, loss_tuple" }, { "identifier": "BaseDataset", "path": "datasets.py", "snippet": "class BaseDataset(Dataset):\n def __init__(self, root, data_set, data_list, num_points=5000, num_query=10, num_knn=64, dis_k=50, dis_scale=1.0):\n super().__init__()\n self.num_points = num_points\n self.num_query = num_query\n self.num_knn = num_knn\n self.dis_k = dis_k\n self.dis_scale = dis_scale\n self.num_split = 10\n self.max_point = int(3e5)\n self.data_dir = os.path.join(root, data_set)\n\n ### get all shape names\n if len(data_list) > 0:\n cur_sets = []\n with open(os.path.join(root, data_set, 'list', data_list + '.txt')) as f:\n cur_sets = f.readlines()\n cur_sets = [x.strip() for x in cur_sets]\n cur_sets = list(filter(None, cur_sets))\n else:\n raise ValueError('Data list need to be given.')\n for s in cur_sets:\n print(' ', s)\n self.cur_sets = cur_sets\n\n def get_data(self, shape_name):\n pcl = load_data(filedir=self.data_dir, filename=shape_name + '.xyz', dtype=np.float32)[:, :3]\n\n if os.path.exists(os.path.join(self.data_dir, shape_name + '.normals')):\n normal_gt = load_data(filedir=self.data_dir, filename=shape_name + '.normals', dtype=np.float32)\n else:\n normal_gt = np.zeros_like(pcl)\n\n ### normalization\n pcl = normalization(pcl)\n idx = np.linalg.norm(normal_gt, axis=-1) == 0.0\n normal_gt /= (np.linalg.norm(normal_gt, axis=-1, keepdims=True) + 1e-8)\n normal_gt[idx, :] = 0.0\n\n self.bbox_min = np.array([np.min(pcl[:,0]), np.min(pcl[:,1]), np.min(pcl[:,2])]) - 0.05\n self.bbox_max = np.array([np.max(pcl[:,0]), np.max(pcl[:,1]), np.max(pcl[:,2])]) + 0.05\n\n assert pcl.shape == normal_gt.shape\n return pcl, normal_gt\n\n def process_data(self, shape_name):\n self.pcl_raw = None\n self.k_idex = None\n self.pt_source = None\n self.knn_idx = None\n\n start_time = time.time()\n pointcloud, normal_gt = self.get_data(shape_name)\n\n if pointcloud.shape[0] > self.max_point:\n print('Using sparse point cloud data: %d' % self.max_point)\n pidx = np.random.choice(pointcloud.shape[0], self.max_point, replace=False)\n pointcloud = pointcloud[pidx, :]\n\n if 1000000 / pointcloud.shape[0] <= 10.0:\n num_query = self.num_query\n else:\n num_query = 1000000 // pointcloud.shape[0]\n\n sigmas = []\n k_idex = []\n ptree = spatial.cKDTree(pointcloud)\n for p in np.array_split(pointcloud, 100, axis=0):\n d, idex = ptree.query(p, k=self.dis_k + 1) # no self\n # d = np.clip(d, a_min=0, a_max=0.5)\n sigmas.append(d[:, -1])\n k_idex.append(idex)\n sigmas = np.concatenate(sigmas, axis=0)[:, None] # (N, 1)\n self.k_idex = np.concatenate(k_idex, axis=0) # (N, K)\n # sigmas[sigmas > 2 * sigmas.mean()] = 2 * sigmas.mean()\n\n sample = []\n knn_idx = []\n if self.dis_scale == 1.0 or self.dis_scale * np.sqrt(pointcloud.shape[0] / 20000) < self.dis_scale:\n dis_scale = self.dis_scale\n else:\n dis_scale = self.dis_scale * np.sqrt(pointcloud.shape[0] / 20000)\n for i in range(num_query):\n pcl_noisy = pointcloud + np.random.normal(0.0, 1.0, size=pointcloud.shape) * sigmas * dis_scale\n sample.append(pcl_noisy)\n\n for p in np.array_split(pcl_noisy, 100, axis=0):\n _, index = ptree.query(p, k=self.num_knn)\n knn_idx.append(index)\n print(i, 'Processing', shape_name)\n\n self.pt_source = np.concatenate(sample, axis=0) # noisy point cloud, (N * num_query, 3)\n self.knn_idx = np.concatenate(knn_idx, axis=0) # (N * num_query, K)\n if self.num_knn == 1:\n self.knn_idx = self.knn_idx[:, None]\n self.pt_num = self.pt_source.shape[0] - 1\n elapsed_time = time.time() - start_time # time second\n\n self.pcl_raw = torch.from_numpy(pointcloud).float() # (N, 3)\n self.k_idex = torch.from_numpy(self.k_idex).long() # (N, K1)\n print(shape_name, 'Size:', self.pt_source.shape, '| Time: %.3f sec' % elapsed_time, '\\n')\n\n def __len__(self):\n return self.pt_source.shape[0]\n\n def __getitem__(self, idx):\n index_coarse = np.random.choice(self.num_split, 1)\n index_fine = np.random.choice(self.pt_num//self.num_split, self.num_points, replace=False)\n index = index_fine * self.num_split + index_coarse\n\n pidx = np.random.choice(self.pcl_raw.shape[0], self.num_points//2, replace=False)\n pcl_raw_sub = self.pcl_raw[pidx]\n\n # knn_idx_sub = self.knn_idx[index, 0:1]\n # pcl_raw_sub = knn_gather_np(self.pointcloud, knn_idx_sub)[:,0,:]\n # pcl_raw_sub = torch.from_numpy(pcl_raw_sub).float()\n\n data = {\n 'pcl_raw': self.pcl_raw,\n # 'k_idex': self.k_idex[pidx],\n 'pcl_raw_sub': pcl_raw_sub,\n 'pcl_source': torch.from_numpy(self.pt_source[index]).float(),\n 'knn_idx': torch.from_numpy(self.knn_idx[index]).long(),\n }\n return data" }, { "identifier": "extract_mesh", "path": "mesh.py", "snippet": "def extract_mesh(func, bbox_min, bbox_max, resolution=256, threshold=0.0, points_gt=None, mesh_far=0.0):\n print('Creating mesh with resolution: {} and threshold: {}'.format(resolution, threshold))\n bound_min = torch.tensor(bbox_min, dtype=torch.float32)\n bound_max = torch.tensor(bbox_max, dtype=torch.float32)\n\n u = extract_fields(bound_min, bound_max, resolution, func=lambda pts: func(pts))\n vertices, triangles = mcubes.marching_cubes(u, threshold)\n\n vertices = vertices / (resolution - 1.0) * (bbox_max - bbox_min)[None, :] + bbox_min[None, :]\n mesh = trimesh.Trimesh(vertices, triangles)\n\n if mesh_far > 0 and points_gt is not None:\n mesh = remove_far(points_gt, mesh, mesh_far)\n return mesh" }, { "identifier": "seed_all", "path": "misc.py", "snippet": "def seed_all(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n torch.backends.cudnn.enabled = True\n torch.backends.cudnn.benchmark = True\n torch.backends.cudnn.deterministic = True\n\n # # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html\n # if seed == 0: # slower, more reproducible\n # torch.backends.cudnn.benchmark = False # default is False\n # torch.backends.cudnn.deterministic = True\n # else: # faster, less reproducible\n # torch.backends.cudnn.benchmark = True # if True, the net graph and input size should be fixed !!!\n # torch.backends.cudnn.deterministic = False" }, { "identifier": "get_log", "path": "misc.py", "snippet": "def get_log(args):\n log_dir, log_name = get_log_dir(args.log_root, prefix='',\n postfix='_' + args.tag if args.tag is not None else '')\n ckpt_dir = os.path.join(log_dir, '../ckpts')\n os.makedirs(ckpt_dir)\n\n code_dir = os.path.join(log_dir, 'code')\n os.makedirs(code_dir, exist_ok=True)\n os.system('cp %s %s' % ('*.py', code_dir))\n # os.system('cp -r %s %s' % ('net', code_dir))\n # os.system('cp -r %s %s' % ('utils', code_dir))\n\n git_commit(git_name=log_name)\n return log_dir, log_name, ckpt_dir" }, { "identifier": "get_logger", "path": "misc.py", "snippet": "def get_logger(args, log_dir, log_name, file_name, model=None):\n logger = creat_logger(log_name=log_name, log_dir=log_dir, file_name=file_name)\n logger.info('Command: {}'.format(' '.join(sys.argv)))\n arg_str = '\\n'.join([' {}: {}'.format(op, getattr(args, op)) for op in vars(args)])\n logger.info('Arguments:\\n' + arg_str)\n if model is not None:\n logger.info(repr(model))\n\n return logger" }, { "identifier": "creat_logger", "path": "misc.py", "snippet": "def creat_logger(log_name, log_dir=None, file_name='log'):\n logger = logging.getLogger(log_name)\n logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter('[%(asctime)s::%(name)s::%(levelname)s] %(message)s')\n\n stream_handler = logging.StreamHandler()\n stream_handler.setLevel(logging.DEBUG)\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n\n if log_dir is not None:\n file_handler = logging.FileHandler(os.path.join(log_dir, file_name+'.txt'), mode='w')\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n logger.info('Output and logs will be saved to: {}'.format(log_dir))\n return logger" }, { "identifier": "knn_gather_np", "path": "misc.py", "snippet": "def knn_gather_np(x, idx):\n \"\"\"\n :param x: (N, C)\n :param idx: (M, K)\n :return (M, K, C)\n \"\"\"\n N, C = x.shape\n M, K = idx.shape\n x = x[None, ...].repeat(M, axis=0) # (M, N, C)\n idx = idx[..., None].repeat(C, axis=2) # (M, K, C)\n return np.take_along_axis(x, indices=idx, axis=1)" } ]
import os, sys import argparse import time import math import numpy as np import torch import torch.utils.data import torch.optim as optim import torch.optim.lr_scheduler as lr_scheduler import scipy.spatial as spatial import torch.multiprocessing as mp from network import Network from datasets import BaseDataset from mesh import extract_mesh from misc import seed_all, get_log, get_logger, creat_logger, knn_gather_np
7,245
### reorder and normalize the vectors, eliminate zero values pred_norm = np.zeros_like(grad_norm) pred_norm[rand_idxs, :] = grad_norm pred_norm[np.linalg.norm(pred_norm, axis=-1) == 0.0] = 1.0 pred_norm /= np.linalg.norm(pred_norm, axis=-1, keepdims=True) elapsed_time = time.time() - start_time time_sum += elapsed_time assert pcl_raw.shape == pred_norm.shape if args.avg_nor: # k_idex = [] ptree = spatial.cKDTree(pcl_raw) _, k_idex = ptree.query(pcl_raw, k=1, distance_upper_bound=0.3) if k_idex.ndim == 1: k_idex = k_idex[:, None] pred_norm = knn_gather_np(pred_norm, k_idex) pred_norm = pred_norm.mean(axis=1) if args.save_normal_npy or args.save_normal_xyz: normal_dir = os.path.join(output_dir, 'pred_normal') os.makedirs(normal_dir, exist_ok=True) path_save = os.path.join(normal_dir, shape_name) if args.save_normal_npy: np.save(path_save + '_normal.npy', pred_norm) if args.save_normal_xyz: pc_nor = np.concatenate([pcl_raw, pred_norm], axis=-1) # k = 1000; n = 50 # 10 # pc_nor = pc_nor[n*k:n*k+k, :] np.savetxt(path_save + '.xyz', pc_nor, fmt='%.6f') ### evaluation nn = np.sum(np.multiply(-1 * nor_gt, pred_norm), axis=1) nn[nn > 1] = 1 nn[nn < -1] = -1 ang = np.rad2deg(np.arccos(np.abs(nn))) rms = np.sqrt(np.mean(np.square(ang))) ang_o = np.rad2deg(np.arccos(nn)) ids = ang_o < 90.0 p90 = sum(ids) / pred_norm.shape[0] * 100 ### if more than half of points have wrong orientation, then flip all normals if p90 < 50.0: nn = np.sum(np.multiply(nor_gt, pred_norm), axis=1) nn[nn > 1] = 1 nn[nn < -1] = -1 ang_o = np.rad2deg(np.arccos(nn)) ids = ang_o < 90.0 p90 = sum(ids) / pred_norm.shape[0] * 100 rms_o = np.sqrt(np.mean(np.square(ang_o))) list_rms.append(rms) list_rms_o.append(rms_o) list_p90.append(p90) if np.mean(p90) < 90.0: list_bad[shape_name] = p90 logger.info('RMSE_U: %.3f, RMSE_O: %.3f, Correct orientation: %.3f %% (%s)' % (rms, rms_o, p90, shape_name)) if args.save_mesh: mesh_dir = os.path.join(output_dir, 'recon_mesh') os.makedirs(mesh_dir, exist_ok=True) mesh = extract_mesh(my_model.net.forward, bbox_min=test_set.bbox_min, bbox_max=test_set.bbox_max, points_gt=pcl_raw, mesh_far=args.mesh_far) mesh.export(os.path.join(mesh_dir, '%s.obj' % shape_name)) if len(list_p90) > 0: logger.info('Time: %.2f sec\n' % time_sum) logger.info('Average || RMSE_U: %.3f, RMSE_O: %.3f, Correct orientation: %.3f %%' % (np.mean(list_rms), np.mean(list_rms_o), np.mean(list_p90))) ss = '' for k, v in list_bad.items(): ss += '%s: %.3f %%\n' % (k, v) logger.info('Bad results in %d shapes: \n%s' % (len(list_p90), ss)) return 1 ### Arguments args = parse_arguments() if len(args.testset_list) == 0: args.testset_list = 'testset_' + args.data_set if args.data_set in ['SceneNN', 'Semantic3D', 'KITTI_sub', 'Others', '3DScene']: args.lr = 0.00001 args.dis_k = 64 if args.data_set in ['PCPNet']: args.dis_k = 25 # args.lr = 0.0007 eval_list = ['testset_no_noise', 'testset_low_noise', 'testset_med_noise', 'testset_high_noise', 'testset_vardensity_striped', 'testset_vardensity_gradient'] if args.data_set in ['FamousShape']: args.dis_k = 50 args.lr = 0.002 eval_list = ['testset_noise_clean', 'testset_noise_low', 'testset_noise_med', 'testset_noise_high', 'testset_density_stripe', 'testset_density_gradient'] if args.data_set == 'FamousShape5k': args.num_points = 1000 args.dis_k = 10 if args.data_set == 'WireframePC': args.max_iter = 10000 args.save_inter = 2500 args.num_points = 300 args.dis_k = 3 args.warn_up = 2000 # args.lr = 0.0001 if args.data_set == 'NestPC': args.dis_k = 50 # args.num_knn = 6 args.lr = 0.0001 torch.cuda.set_device(args.gpu) _device = torch.device('cuda')
def parse_arguments(): parser = argparse.ArgumentParser() parser.add_argument('--gpu', type=int, default=0) parser.add_argument('--mode', type=str, default='') parser.add_argument('--log_root', type=str, default='./log') parser.add_argument('--data_set', type=str, default='', choices=['PCPNet', 'FamousShape', 'FamousShape5k', 'SceneNN', 'Others', 'KITTI_sub', 'Semantic3D', '3DScene', 'WireframePC', 'NestPC', 'Plane']) ### Train parser.add_argument('--seed', type=int, default=2023) parser.add_argument('--tag', type=str, default=None) parser.add_argument('--logging', type=eval, default=True, choices=[True, False]) parser.add_argument('--max_iter', type=int, default=20000) parser.add_argument('--save_inter', type=int, default=10000) parser.add_argument('--warn_up', type=int, default=10000) parser.add_argument('--lr', type=float, default=0.001) ### Dataset and loader parser.add_argument('--dataset_root', type=str, default='/data1/lq/Dataset/') parser.add_argument('--testset_list', type=str, default='') parser.add_argument('--batch_size', type=int, default=1) parser.add_argument('--num_workers', type=int, default=6) parser.add_argument('--num_points', type=int, default=5000) parser.add_argument('--num_query', type=int, default=10) parser.add_argument('--num_knn', type=int, default=64) parser.add_argument('--dis_k', type=int, default=50) parser.add_argument('--dis_scale', type=float, default=0.15) ### Test parser.add_argument('--ckpt_dir', type=str, default='') parser.add_argument('--ckpt_iter', type=int, default=None) parser.add_argument('--save_normal_npy', type=eval, default=False, choices=[True, False]) parser.add_argument('--save_normal_xyz', type=eval, default=False, choices=[True, False]) parser.add_argument('--save_mesh', type=eval, default=False, choices=[True, False]) parser.add_argument('--avg_nor', type=eval, default=False, choices=[True, False]) parser.add_argument('--mesh_far', type=float, default=-1.0) args = parser.parse_args() return args def update_learning_rate(optimizer, iter_step, init_lr, max_iter): warn_up = args.warn_up # 2000, 10000 lr = (iter_step / warn_up) if iter_step < warn_up else 0.5 * (math.cos((iter_step - warn_up)/(max_iter - warn_up) * math.pi) + 1) lr = lr * init_lr for g in optimizer.param_groups: g['lr'] = lr def train(data_list, log_dir, log_name, ckpt_dir, id=None): ### Dataset train_set = BaseDataset(root=args.dataset_root, data_set=args.data_set, data_list=data_list, num_points=args.num_points, num_query=args.num_query, num_knn=args.num_knn, dis_k=args.dis_k, dis_scale=args.dis_scale, ) dataloader = torch.utils.data.DataLoader( train_set, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=True, # faster speed ) log_flag = True num_shapes = len(train_set.cur_sets) for shape_idx, shape_name in enumerate(train_set.cur_sets): ### Model my_model = Network(args.num_points, num_knn=args.num_knn).to(_device).train() optimizer = optim.Adam(my_model.parameters(), lr=args.lr) train_set.process_data(shape_name) iter_dataloader = iter(dataloader) if log_flag: log_name = 'train(%s)(%d)' % (log_name, os.getpid()) if id is not None: log_name = log_name + '-%d' % id logger = get_logger(args, log_dir, log_name, file_name='log_'+data_list, model=my_model) log_flag = False time_sum = 0 for iter_i in range(1, args.max_iter+1): update_learning_rate(optimizer, iter_i, init_lr=args.lr, max_iter=args.max_iter) data = iter_dataloader.next() start_time = time.time() pcl_raw = data['pcl_raw'].to(_device) # (B, M, 3), M > N pcl_source = data['pcl_source'].to(_device) # (B, N, 3) knn_idx = data['knn_idx'].to(_device) # (B, N, K) pcl_raw_sub = data['pcl_raw_sub'].to(_device) if 'pcl_raw_sub' in data else None # (B, N, 3) ### Reset gradient and model state my_model.train() optimizer.zero_grad() pcl_source = torch.cat([pcl_source, pcl_raw_sub], dim=-2) grad_norm = my_model(pcl_source) loss, loss_tuple = my_model.get_loss(pcl_raw=pcl_raw, pcl_source=pcl_source, knn_idx=knn_idx) ### Backward and optimize loss.backward() optimizer.step() elapsed_time = time.time() - start_time time_sum += elapsed_time if iter_i % (args.save_inter//10) == 0: ss = '' for l in loss_tuple: ss += '%.6f+' % l.item() logger.info('shape:%d/%d, iter:%d/%d, loss=%.6f(%s), lr=%.6f' % ( shape_idx+1, num_shapes, iter_i, args.max_iter, loss, ss[:-1], optimizer.param_groups[0]['lr'])) if iter_i % args.save_inter == 0 or iter_i == args.max_iter: model_filename = os.path.join(ckpt_dir, shape_name + '_%d.pt' % iter_i) torch.save(my_model.state_dict(), model_filename) logger.info('Save model: ' + model_filename) # pc_nor = torch.cat([pcl_source, grad_norm], dim=-1)[0].cpu().detach().numpy() # np.savetxt(model_filename[:-3] + '.txt', pc_nor, fmt='%.6f') del my_model, optimizer logger.info('Time: %.2f sec\n' % time_sum) return 1 def test(data_list): ckpt_paths = os.path.join(args.log_root, args.ckpt_dir, 'ckpts/*.pt') assert len(ckpt_paths) > 0 ### Dataset test_set = BaseDataset(root=args.dataset_root, data_set=args.data_set, data_list=data_list, ) ### Model print('Building model ...') my_model = Network(args.num_points, num_knn=args.num_knn).to(_device).eval() ### Log PID = os.getpid() output_dir = os.path.join(args.log_root, args.ckpt_dir, 'test_%s' % args.ckpt_iter) os.makedirs(output_dir, exist_ok=True) logger = creat_logger('test(%d)(%s-%s)' % (PID, args.ckpt_dir, args.ckpt_iter), output_dir) logger.info('Command: {}'.format(' '.join(sys.argv))) trainable_num = sum(p.numel() for p in my_model.parameters() if p.requires_grad) logger.info('Num_params_trainable: %d' % trainable_num) max_n = int(2e5) list_bad = {} list_rms = [] list_rms_o = [] list_p90 = [] time_sum = 0 for shape_idx, shape_name in enumerate(test_set.cur_sets): ### load the trained model ckpt_path = os.path.join(args.log_root, args.ckpt_dir, 'ckpts/%s_%s.pt' % (shape_name, args.ckpt_iter)) if not os.path.exists(ckpt_path): logger.info('File not exist: ' + ckpt_path) continue my_model.load_state_dict(torch.load(ckpt_path, map_location=_device), strict=False) ### load a point cloud and shuffle the order of points pcl_raw, nor_gt = test_set.get_data(shape_name) # (N, 3) start_time = time.time() num_point = pcl_raw.shape[0] rand_idxs = np.random.choice(num_point, num_point, replace=False) pcl = pcl_raw[rand_idxs, :3] ### if there are too many points, the point cloud will be processed in batches, ### the number of output vectors may be less than the number of initial points (decided by remainder). if num_point <= max_n: pcl_source = torch.from_numpy(pcl).float().to(_device) with torch.no_grad(): grad_norm = my_model(pcl_source) grad_norm = grad_norm.cpu().detach().numpy() else: k = math.ceil(num_point / max_n) remainder = int(max_n * k % num_point) print('Split data: ', num_point, k, remainder) pcl_new = np.concatenate((pcl, pcl[:remainder]), axis=0) pcl_source = torch.from_numpy(pcl_new).float() # (max_n*k, D) grad_norm = np.zeros((pcl_new.shape[0], 3)) # (N, 3) with torch.no_grad(): for i in range(k): grad_norm_s = my_model(pcl_source[max_n*i:max_n*(i+1)].to(_device)) grad_norm[max_n*i:max_n*(i+1)] = grad_norm_s.cpu().detach().numpy() grad_norm = grad_norm[:max_n*k-remainder] ### reorder and normalize the vectors, eliminate zero values pred_norm = np.zeros_like(grad_norm) pred_norm[rand_idxs, :] = grad_norm pred_norm[np.linalg.norm(pred_norm, axis=-1) == 0.0] = 1.0 pred_norm /= np.linalg.norm(pred_norm, axis=-1, keepdims=True) elapsed_time = time.time() - start_time time_sum += elapsed_time assert pcl_raw.shape == pred_norm.shape if args.avg_nor: # k_idex = [] ptree = spatial.cKDTree(pcl_raw) _, k_idex = ptree.query(pcl_raw, k=1, distance_upper_bound=0.3) if k_idex.ndim == 1: k_idex = k_idex[:, None] pred_norm = knn_gather_np(pred_norm, k_idex) pred_norm = pred_norm.mean(axis=1) if args.save_normal_npy or args.save_normal_xyz: normal_dir = os.path.join(output_dir, 'pred_normal') os.makedirs(normal_dir, exist_ok=True) path_save = os.path.join(normal_dir, shape_name) if args.save_normal_npy: np.save(path_save + '_normal.npy', pred_norm) if args.save_normal_xyz: pc_nor = np.concatenate([pcl_raw, pred_norm], axis=-1) # k = 1000; n = 50 # 10 # pc_nor = pc_nor[n*k:n*k+k, :] np.savetxt(path_save + '.xyz', pc_nor, fmt='%.6f') ### evaluation nn = np.sum(np.multiply(-1 * nor_gt, pred_norm), axis=1) nn[nn > 1] = 1 nn[nn < -1] = -1 ang = np.rad2deg(np.arccos(np.abs(nn))) rms = np.sqrt(np.mean(np.square(ang))) ang_o = np.rad2deg(np.arccos(nn)) ids = ang_o < 90.0 p90 = sum(ids) / pred_norm.shape[0] * 100 ### if more than half of points have wrong orientation, then flip all normals if p90 < 50.0: nn = np.sum(np.multiply(nor_gt, pred_norm), axis=1) nn[nn > 1] = 1 nn[nn < -1] = -1 ang_o = np.rad2deg(np.arccos(nn)) ids = ang_o < 90.0 p90 = sum(ids) / pred_norm.shape[0] * 100 rms_o = np.sqrt(np.mean(np.square(ang_o))) list_rms.append(rms) list_rms_o.append(rms_o) list_p90.append(p90) if np.mean(p90) < 90.0: list_bad[shape_name] = p90 logger.info('RMSE_U: %.3f, RMSE_O: %.3f, Correct orientation: %.3f %% (%s)' % (rms, rms_o, p90, shape_name)) if args.save_mesh: mesh_dir = os.path.join(output_dir, 'recon_mesh') os.makedirs(mesh_dir, exist_ok=True) mesh = extract_mesh(my_model.net.forward, bbox_min=test_set.bbox_min, bbox_max=test_set.bbox_max, points_gt=pcl_raw, mesh_far=args.mesh_far) mesh.export(os.path.join(mesh_dir, '%s.obj' % shape_name)) if len(list_p90) > 0: logger.info('Time: %.2f sec\n' % time_sum) logger.info('Average || RMSE_U: %.3f, RMSE_O: %.3f, Correct orientation: %.3f %%' % (np.mean(list_rms), np.mean(list_rms_o), np.mean(list_p90))) ss = '' for k, v in list_bad.items(): ss += '%s: %.3f %%\n' % (k, v) logger.info('Bad results in %d shapes: \n%s' % (len(list_p90), ss)) return 1 ### Arguments args = parse_arguments() if len(args.testset_list) == 0: args.testset_list = 'testset_' + args.data_set if args.data_set in ['SceneNN', 'Semantic3D', 'KITTI_sub', 'Others', '3DScene']: args.lr = 0.00001 args.dis_k = 64 if args.data_set in ['PCPNet']: args.dis_k = 25 # args.lr = 0.0007 eval_list = ['testset_no_noise', 'testset_low_noise', 'testset_med_noise', 'testset_high_noise', 'testset_vardensity_striped', 'testset_vardensity_gradient'] if args.data_set in ['FamousShape']: args.dis_k = 50 args.lr = 0.002 eval_list = ['testset_noise_clean', 'testset_noise_low', 'testset_noise_med', 'testset_noise_high', 'testset_density_stripe', 'testset_density_gradient'] if args.data_set == 'FamousShape5k': args.num_points = 1000 args.dis_k = 10 if args.data_set == 'WireframePC': args.max_iter = 10000 args.save_inter = 2500 args.num_points = 300 args.dis_k = 3 args.warn_up = 2000 # args.lr = 0.0001 if args.data_set == 'NestPC': args.dis_k = 50 # args.num_knn = 6 args.lr = 0.0001 torch.cuda.set_device(args.gpu) _device = torch.device('cuda')
seed_all(args.seed)
3
2023-10-22 08:51:50+00:00
12k
BurgerBurgerBurger/AA
run.py
[ { "identifier": "add_args", "path": "args.py", "snippet": "def add_args(parser):\n parser.add_argument(\"--do_train\", action=\"store_true\")\n parser.add_argument(\"--data_dir\", default=\"./dataset/docred\", type=str)\n parser.add_argument(\"--transformer_type\", default=\"bert\", type=str)\n parser.add_argument(\"--model_name_or_path\", default=\"bert-base-cased\", type=str)\n\n parser.add_argument(\"--train_file\", default=\"train_annotated.json\", type=str)\n parser.add_argument(\"--dev_file\", default=\"dev.json\", type=str)\n parser.add_argument(\"--test_file\", default=\"dev.json\", type=str)\n parser.add_argument(\"--pred_file\", default=\"results.json\", type=str)\n parser.add_argument(\"--save_path\", default=\"\", type=str)\n parser.add_argument(\"--load_path\", default=\"\", type=str)\n parser.add_argument(\"--results_path\", default=\"\", type=str)\n parser.add_argument(\"--teacher_sig_path\", default=\"\", type=str)\n parser.add_argument(\"--save_attn\", action=\"store_true\", help=\"Whether store the evidence distribution or not\")\n\n # graph\n parser.add_argument(\"--attn_heads\", default=2, type=int, help=\"Attention heads\")\n parser.add_argument(\"--gcn_layers\", default=2, type=int, help=\"GCN layers\")\n parser.add_argument(\"--iters\", default=2, type=int, help=\"Iteration\")\n parser.add_argument(\"--use_graph\", action=\"store_true\", help=\"Use graph\")\n\n parser.add_argument(\"--config_name\", default=\"\", type=str,\n help=\"Pretrained config name or path if not the same as model_name\")\n parser.add_argument(\"--tokenizer_name\", default=\"\", type=str,\n help=\"Pretrained tokenizer name or path if not the same as model_name\")\n parser.add_argument(\"--max_seq_length\", default=1024, type=int,\n help=\"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\")\n\n parser.add_argument(\"--train_batch_size\", default=4, type=int,\n help=\"Batch size for training.\")\n parser.add_argument(\"--test_batch_size\", default=8, type=int,\n help=\"Batch size for testing.\")\n parser.add_argument(\"--eval_mode\", default=\"single\", type=str,\n choices=[\"single\", \"fushion\"], \n help=\"Single-pass evaluation or evaluation with inference-stage fusion.\")\n parser.add_argument(\"--gradient_accumulation_steps\", default=1, type=int,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n parser.add_argument(\"--num_labels\", default=4, type=int,\n help=\"Max number of labels in prediction.\")\n parser.add_argument(\"--max_sent_num\", default=25, type=int,\n help=\"Max number of sentences in each document.\")\n parser.add_argument(\"--evi_thresh\", default=0.2, type=float,\n help=\"Evidence Threshold. \")\n parser.add_argument(\"--evi_lambda\", default=0.1, type=float,\n help=\"Weight of relation-agnostic evidence loss during training. \")\n parser.add_argument(\"--attn_lambda\", default=1.0, type=float,\n help=\"Weight of knowledge distillation loss for attentions during training. \")\n parser.add_argument(\"--lr_transformer\", default=5e-5, type=float,\n help=\"The initial learning rate for transformer.\")\n parser.add_argument(\"--lr_added\", default=1e-4, type=float,\n help=\"The initial learning rate for added modules.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-6, type=float,\n help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float,\n help=\"Max gradient norm.\")\n parser.add_argument(\"--warmup_ratio\", default=0.06, type=float,\n help=\"Warm up ratio for Adam.\")\n parser.add_argument(\"--num_train_epochs\", default=30.0, type=float,\n help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--evaluation_steps\", default=-1, type=int,\n help=\"Number of training steps between evaluations.\")\n parser.add_argument(\"--seed\", type=int, default=66,\n help=\"random seed for initialization\")\n parser.add_argument(\"--num_class\", type=int, default=97,\n help=\"Number of relation types in dataset.\")\n\n return parser" }, { "identifier": "DocREModel", "path": "model.py", "snippet": "class DocREModel(nn.Module):\n\n def __init__(self, args, config, model, tokenizer,\n emb_size=768, block_size=64, num_labels=-1,\n max_sent_num=25, evi_thresh=0.2):\n super().__init__()\n self.config = config\n self.model = model\n self.tokenizer = tokenizer\n self.hidden_size = config.hidden_size\n\n self.loss_fnt = ATLoss()\n self.loss_fnt_evi = nn.KLDivLoss(reduction=\"batchmean\")\n\n self.head_extractor = nn.Linear(self.hidden_size * 2, emb_size)\n self.tail_extractor = nn.Linear(self.hidden_size * 2, emb_size)\n\n self.use_graph = args.use_graph\n if self.use_graph:\n self.head_extractor = nn.Linear(3 * config.hidden_size, emb_size)\n self.tail_extractor = nn.Linear(3 * config.hidden_size, emb_size)\n self.bilinear = nn.Linear(emb_size * block_size, config.num_labels)\n\n self.emb_size = emb_size\n self.block_size = block_size\n self.num_labels = num_labels\n self.total_labels = config.num_labels\n self.max_sent_num = max_sent_num\n self.evi_thresh = evi_thresh\n\n self.edges = ['self-loop', 'mention-anaphor', 'co-reference', 'inter-entity']\n\n if self.use_graph:\n self.graph_layers = nn.ModuleList(\n AttentionGCNLayer(self.edges, self.hidden_size, nhead=args.attn_heads, iters=args.gcn_layers) for _ in\n range(args.iters))\n\n def encode(self, input_ids, attention_mask):\n config = self.config\n if config.transformer_type == \"bert\":\n start_tokens = [config.cls_token_id]\n end_tokens = [config.sep_token_id]\n elif config.transformer_type == \"roberta\":\n start_tokens = [config.cls_token_id]\n end_tokens = [config.sep_token_id, config.sep_token_id]\n # process long documents.\n sequence_output, attention = process_long_input(self.model, input_ids, attention_mask, start_tokens, end_tokens)\n\n return sequence_output, attention\n\n def get_hrt(self, sequence_output, attention, entity_pos, hts, offset):\n n, h, _, c = attention.size()\n hss, tss, rss = [], [], []\n ht_atts = []\n\n for i in range(len(entity_pos)): # for each batch\n entity_embs, entity_atts = [], []\n\n # obtain entity embedding from mention embeddings.\n for eid, e in enumerate(entity_pos[i]): # for each entity\n if len(e) > 1:\n e_emb, e_att = [], []\n for mid, (start, end) in enumerate(e): # for every mention\n if start + offset < c:\n # In case the entity mention is truncated due to limited max seq length.\n e_emb.append(sequence_output[i, start + offset])\n e_att.append(attention[i, :, start + offset])\n\n if len(e_emb) > 0:\n e_emb = torch.logsumexp(torch.stack(e_emb, dim=0), dim=0)\n e_att = torch.stack(e_att, dim=0).mean(0)\n else:\n e_emb = torch.zeros(self.config.hidden_size).to(sequence_output)\n e_att = torch.zeros(h, c).to(attention)\n else:\n start, end = e[0]\n if start + offset < c:\n e_emb = sequence_output[i, start + offset]\n e_att = attention[i, :, start + offset]\n else:\n e_emb = torch.zeros(self.config.hidden_size).to(sequence_output)\n e_att = torch.zeros(h, c).to(attention)\n\n entity_embs.append(e_emb)\n entity_atts.append(e_att)\n\n entity_embs = torch.stack(entity_embs, dim=0) # [n_e, d]\n entity_atts = torch.stack(entity_atts, dim=0) # [n_e, h, seq_len]\n\n ht_i = torch.LongTensor(hts[i]).to(sequence_output.device)\n\n # obtain subject/object (head/tail) embeddings from entity embeddings.\n hs = torch.index_select(entity_embs, 0, ht_i[:, 0])\n ts = torch.index_select(entity_embs, 0, ht_i[:, 1])\n\n h_att = torch.index_select(entity_atts, 0, ht_i[:, 0])\n t_att = torch.index_select(entity_atts, 0, ht_i[:, 1])\n\n ht_att = (h_att * t_att).mean(1) # average over all heads\n ht_att = ht_att / (ht_att.sum(1, keepdim=True) + 1e-30)\n ht_atts.append(ht_att)\n\n # obtain local context embeddings.\n rs = contract(\"ld,rl->rd\", sequence_output[i], ht_att)\n\n hss.append(hs)\n tss.append(ts)\n rss.append(rs)\n\n rels_per_batch = [len(b) for b in hss]\n hss = torch.cat(hss, dim=0) # (num_ent_pairs_all_batches, emb_size)\n tss = torch.cat(tss, dim=0) # (num_ent_pairs_all_batches, emb_size)\n rss = torch.cat(rss, dim=0) # (num_ent_pairs_all_batches, emb_size)\n ht_atts = torch.cat(ht_atts, dim=0) # (num_ent_pairs_all_batches, max_doc_len)\n\n return hss, rss, tss, ht_atts, rels_per_batch\n\n def graph(self, sequence_output, graphs, attention, entity_pos, hts, offset):\n n, h, _, c = attention.size()\n\n max_node = max([graph.shape[0] for graph in graphs])\n graph_fea = torch.zeros(n, max_node, self.config.hidden_size, device=sequence_output.device)\n graph_adj = torch.zeros(n, max_node, max_node, device=sequence_output.device)\n\n for i, graph in enumerate(graphs):\n nodes_num = graph.shape[0]\n graph_adj[i, :nodes_num, :nodes_num] = torch.from_numpy(graph)\n\n for i in range(len(entity_pos)):\n mention_index = 0\n for e in entity_pos[i]:\n for start, end in e:\n if start + offset < c:\n # In case the entity mention is truncated due to limited max seq length.\n graph_fea[i, mention_index, :] = sequence_output[i, start + offset]\n else:\n graph_fea[i, mention_index, :] = torch.zeros(self.config.hidden_size).to(sequence_output)\n mention_index += 1\n\n for graph_layer in self.graph_layers:\n graph_fea, _ = graph_layer(graph_fea, graph_adj)\n\n h_entity, t_entity = [], []\n for i in range(len(entity_pos)):\n entity_embs = []\n mention_index = 0\n for e in entity_pos[i]:\n e_emb = graph_fea[i, mention_index:mention_index + len(e), :]\n mention_index += len(e)\n\n e_emb = torch.logsumexp(e_emb, dim=0) if len(e) > 1 else e_emb.squeeze(0)\n entity_embs.append(e_emb)\n\n entity_embs = torch.stack(entity_embs, dim=0)\n ht_i = torch.LongTensor(hts[i]).to(sequence_output.device)\n hs = torch.index_select(entity_embs, 0, ht_i[:, 0])\n ts = torch.index_select(entity_embs, 0, ht_i[:, 1])\n h_entity.append(hs)\n t_entity.append(ts)\n\n h_entity = torch.cat(h_entity, dim=0)\n t_entity = torch.cat(t_entity, dim=0)\n return h_entity, t_entity\n\n def forward_rel(self, hs, ts, rs, h, t):\n hs = torch.tanh(self.head_extractor(torch.cat([hs, rs, h], dim=-1)))\n ts = torch.tanh(self.tail_extractor(torch.cat([ts, rs, t], dim=-1)))\n # split into several groups.\n b1 = hs.view(-1, self.emb_size // self.block_size, self.block_size)\n b2 = ts.view(-1, self.emb_size // self.block_size, self.block_size)\n\n bl = (b1.unsqueeze(3) * b2.unsqueeze(2)).view(-1, self.emb_size * self.block_size)\n logits = self.bilinear(bl)\n\n return logits\n\n def forward_rel_no_graph(self, hs, ts, rs):\n hs = torch.tanh(self.head_extractor(torch.cat([hs, rs], dim=-1)))\n ts = torch.tanh(self.tail_extractor(torch.cat([ts, rs], dim=-1)))\n # split into several groups.\n b1 = hs.view(-1, self.emb_size // self.block_size, self.block_size)\n b2 = ts.view(-1, self.emb_size // self.block_size, self.block_size)\n\n bl = (b1.unsqueeze(3) * b2.unsqueeze(2)).view(-1, self.emb_size * self.block_size)\n logits = self.bilinear(bl)\n\n return logits\n\n def forward_evi(self, doc_attn, sent_pos, batch_rel, offset):\n max_sent_num = max([len(sent) for sent in sent_pos])\n rel_sent_attn = []\n for i in range(len(sent_pos)): # for each batch\n # the relation ids corresponds to document in batch i is [sum(batch_rel[:i]), sum(batch_rel[:i+1]))\n curr_attn = doc_attn[sum(batch_rel[:i]):sum(batch_rel[:i + 1])]\n curr_sent_pos = [torch.arange(s[0], s[1]).to(curr_attn.device) + offset for s in sent_pos[i]] # + offset\n\n curr_attn_per_sent = [curr_attn.index_select(-1, sent) for sent in curr_sent_pos]\n curr_attn_per_sent += [torch.zeros_like(curr_attn_per_sent[0])] * (max_sent_num - len(curr_attn_per_sent))\n sum_attn = torch.stack([attn.sum(dim=-1) for attn in curr_attn_per_sent],\n dim=-1) # sum across those attentions\n rel_sent_attn.append(sum_attn)\n\n s_attn = torch.cat(rel_sent_attn, dim=0)\n return s_attn\n\n def forward(self,\n input_ids=None,\n attention_mask=None,\n labels=None, # relation labels\n entity_pos=None,\n hts=None, # entity pairs\n sent_pos=None,\n sent_labels=None, # evidence labels (0/1)\n teacher_attns=None, # evidence distribution from teacher model\n graph=None,\n tag=\"train\"\n ):\n\n offset = 1 if self.config.transformer_type in [\"bert\", \"roberta\"] else 0\n output = {}\n sequence_output, attention = self.encode(input_ids, attention_mask)\n\n hs, rs, ts, doc_attn, batch_rel = self.get_hrt(sequence_output, attention, entity_pos, hts, offset)\n\n if self.use_graph:\n h, t = self.graph(sequence_output, graph, attention, entity_pos, hts, offset)\n logits = self.forward_rel(hs, ts, rs, h, t)\n else:\n logits = self.forward_rel_no_graph(hs, ts, rs)\n\n output[\"rel_pred\"] = self.loss_fnt.get_label(logits, num_labels=self.num_labels)\n\n if sent_labels is not None: # human-annotated evidence available\n\n s_attn = self.forward_evi(doc_attn, sent_pos, batch_rel, offset)\n output[\"evi_pred\"] = F.pad(s_attn > self.evi_thresh, (0, self.max_sent_num - s_attn.shape[-1]))\n\n if tag in [\"test\", \"dev\"]: # testing\n scores_topk = self.loss_fnt.get_score(logits, self.num_labels)\n output[\"scores\"] = scores_topk[0]\n output[\"topks\"] = scores_topk[1]\n\n if tag == \"infer\": # teacher model inference\n output[\"attns\"] = doc_attn.split(batch_rel)\n\n else: # training\n # relation extraction loss\n loss = self.loss_fnt(logits.float(), labels.float())\n output[\"loss\"] = {\"rel_loss\": loss.to(sequence_output)}\n\n if sent_labels is not None: # supervised training with human evidence\n\n idx_used = torch.nonzero(labels[:, 1:].sum(dim=-1)).view(-1)\n # evidence retrieval loss (kldiv loss)\n s_attn = s_attn[idx_used]\n sent_labels = sent_labels[idx_used]\n norm_s_labels = sent_labels / (sent_labels.sum(dim=-1, keepdim=True) + 1e-30)\n norm_s_labels[norm_s_labels == 0] = 1e-30\n s_attn[s_attn == 0] = 1e-30\n evi_loss = self.loss_fnt_evi(s_attn.log(), norm_s_labels)\n output[\"loss\"][\"evi_loss\"] = evi_loss.to(sequence_output)\n\n elif teacher_attns is not None: # self training with teacher attention\n\n doc_attn[doc_attn == 0] = 1e-30\n teacher_attns[teacher_attns == 0] = 1e-30\n attn_loss = self.loss_fnt_evi(doc_attn.log(), teacher_attns)\n output[\"loss\"][\"attn_loss\"] = attn_loss.to(sequence_output)\n\n return output" }, { "identifier": "set_seed", "path": "utils.py", "snippet": "def set_seed(args):\n seed = int(args.seed)\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'\n torch.use_deterministic_algorithms(True)" }, { "identifier": "collate_fn", "path": "utils.py", "snippet": "def collate_fn(batch):\n max_len = max([len(f[\"input_ids\"]) for f in batch])\n max_sent = max([len(f[\"sent_pos\"]) for f in batch])\n input_ids = [f[\"input_ids\"] + [0] * (max_len - len(f[\"input_ids\"])) for f in batch]\n input_mask = [[1.0] * len(f[\"input_ids\"]) + [0.0] * (max_len - len(f[\"input_ids\"])) for f in batch]\n labels = [f[\"labels\"] for f in batch]\n entity_pos = [f[\"entity_pos\"] for f in batch]\n hts = [f[\"hts\"] for f in batch]\n sent_pos = [f[\"sent_pos\"] for f in batch]\n sent_labels = [f[\"sent_labels\"] for f in batch if \"sent_labels\" in f]\n attns = [f[\"attns\"] for f in batch if \"attns\" in f]\n\n input_ids = torch.tensor(input_ids, dtype=torch.long)\n input_mask = torch.tensor(input_mask, dtype=torch.float)\n\n labels = [torch.tensor(label) for label in labels]\n labels = torch.cat(labels, dim=0)\n\n if sent_labels != [] and None not in sent_labels:\n sent_labels_tensor = []\n for sent_label in sent_labels:\n sent_label = np.array(sent_label)\n sent_labels_tensor.append(np.pad(sent_label, ((0, 0), (0, max_sent - sent_label.shape[1]))))\n sent_labels_tensor = torch.from_numpy(np.concatenate(sent_labels_tensor, axis=0))\n else:\n sent_labels_tensor = None\n\n if attns:\n attns = [np.pad(attn, ((0, 0), (0, max_len - attn.shape[1]))) for attn in attns]\n attns = torch.from_numpy(np.concatenate(attns, axis=0))\n else:\n attns = None\n\n graph = [f[\"graph\"] for f in batch]\n\n output = (input_ids, input_mask, labels, entity_pos, hts, sent_pos, sent_labels_tensor, attns, graph)\n\n return output" }, { "identifier": "create_directory", "path": "utils.py", "snippet": "def create_directory(d):\n if d and not os.path.exists(d):\n os.makedirs(d)\n return d" }, { "identifier": "read_docred", "path": "prepro.py", "snippet": "def read_docred(file_in,\n tokenizer,\n transformer_type=\"bert\",\n max_seq_length=1024,\n teacher_sig_path=\"\",\n single_results=None):\n\n i_line = 0\n pos_samples = 0\n neg_samples = 0\n features = []\n\n if file_in == \"\":\n return None\n\n with open(file_in, \"r\", encoding='utf-8') as fh:\n data = json.load(fh)\n\n if teacher_sig_path != \"\": # load logits\n basename = os.path.splitext(os.path.basename(file_in))[0]\n attns_file = os.path.join(teacher_sig_path, f\"{basename}.attns\")\n attns = pickle.load(open(attns_file, 'rb'))\n\n if single_results != None:\n # reorder predictions as relations by title\n pred_pos_samples = 0\n pred_neg_samples = 0\n pred_rels = single_results\n title2preds = {}\n for pred_rel in pred_rels:\n if pred_rel[\"title\"] in title2preds:\n title2preds[pred_rel[\"title\"]].append(pred_rel)\n else:\n title2preds[pred_rel[\"title\"]] = [pred_rel]\n\n for doc_id in tqdm(range(len(data)), desc=\"Loading examples\"):\n\n sample = data[doc_id]\n entities = sample['vertexSet']\n entity_start, entity_end = [], []\n # record entities\n for entity in entities:\n for mention in entity:\n sent_id = mention[\"sent_id\"]\n pos = mention[\"pos\"]\n entity_start.append((sent_id, pos[0],))\n entity_end.append((sent_id, pos[1] - 1,))\n\n # add entity markers\n sents, sent_map, sent_pos = add_entity_markers(sample, tokenizer, entity_start, entity_end)\n\n # training triples with positive examples (entity pairs with labels)\n train_triple = {}\n\n if \"labels\" in sample:\n for label in sample['labels']:\n evidence = label['evidence']\n r = int(docred_rel2id[label['r']])\n\n # update training triples\n if (label['h'], label['t']) not in train_triple:\n train_triple[(label['h'], label['t'])] = [\n {'relation': r, 'evidence': evidence}]\n else:\n train_triple[(label['h'], label['t'])].append(\n {'relation': r, 'evidence': evidence})\n\n # get anaphors in the doc\n mentions = set([m['name'] for e in entities for m in e])\n\n potential_mention = get_anaphors(sample['sents'], mentions)\n\n entities.append(potential_mention)\n\n # entity start, end position\n entity_pos = []\n\n for e in entities:\n entity_pos.append([])\n for m in e:\n start = sent_map[m[\"sent_id\"]][m[\"pos\"][0]]\n end = sent_map[m[\"sent_id\"]][m[\"pos\"][1]]\n label = m[\"type\"]\n entity_pos[-1].append((start, end,))\n\n relations, hts, sent_labels = [], [], []\n\n for h, t in train_triple.keys(): # for every entity pair with gold relation\n relation = [0] * len(docred_rel2id)\n sent_evi = [0] * len(sent_pos)\n\n for mention in train_triple[h, t]: # for each relation mention with head h and tail t\n relation[mention[\"relation\"]] = 1\n for i in mention[\"evidence\"]:\n sent_evi[i] += 1\n\n relations.append(relation)\n hts.append([h, t])\n sent_labels.append(sent_evi)\n pos_samples += 1\n\n for h in range(len(entities) - 1):\n for t in range(len(entities) - 1):\n # all entity pairs that do not have relation are treated as negative samples\n if h != t and [h, t] not in hts: # and [t, h] not in hts:\n relation = [1] + [0] * (len(docred_rel2id) - 1)\n sent_evi = [0] * len(sent_pos)\n relations.append(relation)\n\n hts.append([h, t])\n sent_labels.append(sent_evi)\n neg_samples += 1\n\n graph = create_graph(entity_pos)\n\n assert len(relations) == (len(entities) - 1) * (len(entities) - 2)\n assert len(sents) < max_seq_length\n sents = sents[:max_seq_length - 2] # truncate, -2 for [CLS] and [SEP]\n input_ids = tokenizer.convert_tokens_to_ids(sents)\n input_ids = tokenizer.build_inputs_with_special_tokens(input_ids)\n\n feature = [{'input_ids': input_ids,\n 'entity_pos': entity_pos if entity_pos[-1] != [] else entity_pos[:-1],\n 'labels': relations,\n 'hts': hts,\n 'sent_pos': sent_pos,\n 'sent_labels': sent_labels,\n 'title': sample['title'],\n 'graph': graph\n }]\n\n if teacher_sig_path != '': # add evidence distributions from the teacher model\n feature[0]['attns'] = attns[doc_id][:, :len(input_ids)]\n\n if single_results is not None: # get pseudo documents from predictions of the single run\n offset = 1 if transformer_type in [\"bert\", \"roberta\"] else 0\n if sample[\"title\"] in title2preds:\n feature, pos_sample, neg_sample, = get_pseudo_features(feature[0], title2preds[sample[\"title\"]],\n entities, sent_map, offset, tokenizer)\n pred_pos_samples += pos_sample\n pred_neg_samples += neg_sample\n\n i_line += len(feature)\n features.extend(feature)\n\n print(\"# of documents {}.\".format(i_line))\n if single_results is not None:\n print(\"# of positive examples {}.\".format(pred_pos_samples))\n print(\"# of negative examples {}.\".format(pred_neg_samples))\n\n else:\n print(\"# of positive examples {}.\".format(pos_samples))\n print(\"# of negative examples {}.\".format(neg_samples))\n\n return features" }, { "identifier": "to_official", "path": "evaluation.py", "snippet": "def to_official(preds: list, features: list, evi_preds: list = [], scores: list = [], topks: list = []):\n '''\n Convert the predictions to official format for evaluating.\n Input:\n :preds: list of dictionaries, each dictionary entry is a predicted relation triple from the original document. Keys: ['title', 'h_idx', 't_idx', 'r', 'evidence', 'score'].\n :features: list of features within each document. Identical to the lists obtained from pre-processing.\n :evi_preds: list of the evidence prediction corresponding to each relation triple prediction.\n :scores: list of scores of topk relation labels for each entity pair.\n :topks: list of topk relation labels for each entity pair.\n Output:\n :official_res: official results used for evaluation.\n :res: topk results to be dumped into file, which can be further used during fushion.\n '''\n\n h_idx, t_idx, title, sents = [], [], [], []\n\n for f in features:\n if \"entity_map\" in f:\n hts = [[f[\"entity_map\"][ht[0]], f[\"entity_map\"][ht[1]]] for ht in f[\"hts\"]]\n else:\n hts = f[\"hts\"]\n\n h_idx += [ht[0] for ht in hts]\n t_idx += [ht[1] for ht in hts]\n title += [f[\"title\"] for ht in hts]\n sents += [len(f[\"sent_pos\"])] * len(hts)\n\n official_res = []\n res = []\n\n for i in range(preds.shape[0]): # for each entity pair\n if scores != []:\n score = extract_relative_score(scores[i], topks[i])\n pred = topks[i]\n else:\n pred = preds[i]\n pred = np.nonzero(pred)[0].tolist()\n\n for p in pred: # for each predicted relation label (topk)\n curr_result = {\n 'title': title[i],\n 'h_idx': h_idx[i],\n 't_idx': t_idx[i],\n 'r': id2rel[p],\n }\n if evi_preds != []:\n curr_evi = evi_preds[i]\n evis = np.nonzero(curr_evi)[0].tolist()\n curr_result[\"evidence\"] = [evi for evi in evis if evi < sents[i]]\n if scores != []:\n curr_result[\"score\"] = score[np.where(topks[i] == p)].item()\n if p != 0 and p in np.nonzero(preds[i])[0].tolist():\n official_res.append(curr_result)\n res.append(curr_result)\n\n return official_res, res" }, { "identifier": "official_evaluate", "path": "evaluation.py", "snippet": "def official_evaluate(tmp, path, train_file=\"train_annotated.json\", dev_file=\"dev.json\"):\n '''\n Adapted from the official evaluation code\n '''\n truth_dir = os.path.join(path, 'ref')\n\n if not os.path.exists(truth_dir):\n os.makedirs(truth_dir)\n\n fact_in_train_annotated = gen_train_facts(os.path.join(path, train_file), truth_dir)\n fact_in_train_distant = gen_train_facts(os.path.join(path, \"train_distant.json\"), truth_dir)\n\n truth = json.load(open(os.path.join(path, dev_file)))\n\n std = {}\n tot_evidences = 0\n titleset = set([])\n\n title2vectexSet = {}\n\n for x in truth:\n title = x['title']\n titleset.add(title)\n\n vertexSet = x['vertexSet']\n title2vectexSet[title] = vertexSet\n\n if 'labels' not in x: # official test set from DocRED\n continue\n\n for label in x['labels']:\n r = label['r']\n h_idx = label['h']\n t_idx = label['t']\n std[(title, r, h_idx, t_idx)] = set(label['evidence'])\n tot_evidences += len(label['evidence'])\n\n tot_relations = len(std)\n tmp.sort(key=lambda x: (x['title'], x['h_idx'], x['t_idx'], x['r']))\n submission_answer = [tmp[0]]\n\n for i in range(1, len(tmp)):\n x = tmp[i]\n y = tmp[i - 1]\n if (x['title'], x['h_idx'], x['t_idx'], x['r']) != (y['title'], y['h_idx'], y['t_idx'], y['r']):\n submission_answer.append(tmp[i])\n\n correct_re = 0\n correct_evidence = 0\n pred_evi = 0\n\n correct_in_train_annotated = 0\n correct_in_train_distant = 0\n titleset2 = set([])\n for x in submission_answer:\n title = x['title']\n h_idx = x['h_idx']\n t_idx = x['t_idx']\n r = x['r']\n titleset2.add(title)\n if title not in title2vectexSet:\n continue\n vertexSet = title2vectexSet[title]\n\n if 'evidence' in x: # and (title, h_idx, t_idx) in std:\n evi = set(x['evidence'])\n else:\n evi = set([])\n pred_evi += len(evi)\n\n if (title, r, h_idx, t_idx) in std:\n correct_re += 1\n stdevi = std[(title, r, h_idx, t_idx)]\n correct_evidence += len(stdevi & evi)\n in_train_annotated = in_train_distant = False\n for n1 in vertexSet[h_idx]:\n for n2 in vertexSet[t_idx]:\n if (n1['name'], n2['name'], r) in fact_in_train_annotated:\n in_train_annotated = True\n if (n1['name'], n2['name'], r) in fact_in_train_distant:\n in_train_distant = True\n\n if in_train_annotated:\n correct_in_train_annotated += 1\n if in_train_distant:\n correct_in_train_distant += 1\n\n re_p = 1.0 * correct_re / len(submission_answer)\n re_r = 1.0 * correct_re / tot_relations if tot_relations != 0 else 0\n if re_p + re_r == 0:\n re_f1 = 0\n else:\n re_f1 = 2.0 * re_p * re_r / (re_p + re_r)\n\n evi_p = 1.0 * correct_evidence / pred_evi if pred_evi > 0 else 0\n evi_r = 1.0 * correct_evidence / tot_evidences if tot_evidences > 0 else 0\n\n if evi_p + evi_r == 0:\n evi_f1 = 0\n else:\n evi_f1 = 2.0 * evi_p * evi_r / (evi_p + evi_r)\n\n re_p_ignore_train_annotated = 1.0 * (correct_re - correct_in_train_annotated) / (\n len(submission_answer) - correct_in_train_annotated + 1e-5)\n re_p_ignore_train = 1.0 * (correct_re - correct_in_train_distant) / (\n len(submission_answer) - correct_in_train_distant + 1e-5)\n\n if re_p_ignore_train_annotated + re_r == 0:\n re_f1_ignore_train_annotated = 0\n else:\n re_f1_ignore_train_annotated = 2.0 * re_p_ignore_train_annotated * re_r / (re_p_ignore_train_annotated + re_r)\n\n if re_p_ignore_train + re_r == 0:\n re_f1_ignore_train = 0\n else:\n re_f1_ignore_train = 2.0 * re_p_ignore_train * re_r / (re_p_ignore_train + re_r)\n\n return [re_p, re_r, re_f1], [evi_p, evi_r, evi_f1], \\\n [re_p_ignore_train_annotated, re_r, re_f1_ignore_train_annotated], \\\n [re_p_ignore_train, re_r, re_f1_ignore_train]" }, { "identifier": "merge_results", "path": "evaluation.py", "snippet": "def merge_results(pred: list, pred_pseudo: list, features: list, thresh: float = None):\n '''\n Merge relation predictions from the original document and psuedo documents.\n Input:\n :pred: list of dictionaries, each dictionary entry is a predicted relation triple from the original document. Keys: ['title', 'h_idx', 't_idx', 'r', 'evidence', 'score'].\n :pred_pseudo: list of dictionaries, each dictionary entry is a predicted relation triple from pseudo documents. Keys: ['title', 'h_idx', 't_idx', 'r', 'evidence', 'score'].\n :features: list of features within each document. Identical to the lists obtained from pre-processing.\n :thresh: threshold for selecting predictions.\n Output:\n :merged_res: list of merged relation predictions. Each relation prediction is a dictionay with keys (title, h_idx, t_idx, r).\n :thresh: threshold of selecting relation predictions.\n '''\n\n title2pred = get_title2pred(pred)\n title2pred_pseudo = get_title2pred(pred_pseudo)\n\n title2gt = get_title2gt(features)\n num_gt = sum([len(title2gt[t]) for t in title2gt])\n\n titles = list(title2pred.keys())\n cand = []\n merged_res = []\n correct, num_pred = 0, 0\n\n for t in titles:\n rels = title2pred[t]\n rels_pseudo = title2pred_pseudo[t] if t in title2pred_pseudo else {}\n\n union = set(rels.keys()) | set(rels_pseudo.keys())\n for r in union:\n if r in rels and r in rels_pseudo: # add those into predictions\n if rels[r] > 0 and rels_pseudo[r] > 0:\n merged_res.append({'title': t, 'h_idx': r[0], 't_idx': r[1], 'r': r[2]})\n num_pred += 1\n correct += r in title2gt[t]\n continue\n score = rels[r] + rels_pseudo[r]\n elif r in rels: # -10 for penalty\n score = rels[r] - 10\n elif r in rels_pseudo:\n score = rels_pseudo[r] - 10\n cand.append((r in title2gt[t], score, t, r[0], r[1], r[2]))\n\n if thresh != None:\n sorted_pred = sorted(cand, key=lambda x: x[1], reverse=True)\n last = min(filter(lambda x: x[1] > thresh, sorted_pred))\n until = sorted_pred.index(last)\n cand = sorted_pred[:until + 1]\n merged_res.extend([{'title': r[2], 'h_idx': r[3], 't_idx': r[4], 'r': r[5]} for r in cand])\n return merged_res, thresh\n\n if cand != []:\n thresh, cand = select_thresh(cand, num_gt, correct, num_pred)\n merged_res.extend([{'title': r[2], 'h_idx': r[3], 't_idx': r[4], 'r': r[5]} for r in cand])\n\n return merged_res, thresh" } ]
import argparse import os import numpy as np import torch import ujson as json import pandas as pd import pickle from torch.cuda.amp import GradScaler from torch.utils.data import DataLoader from transformers import AutoConfig, AutoModel, AutoTokenizer from transformers.optimization import AdamW, get_linear_schedule_with_warmup from args import add_args from model import DocREModel from utils import set_seed, collate_fn, create_directory from prepro import read_docred from evaluation import to_official, official_evaluate, merge_results from tqdm import tqdm
9,652
def load_input(batch, device, tag="dev"): input = {'input_ids': batch[0].to(device), 'attention_mask': batch[1].to(device), 'labels': batch[2].to(device), 'entity_pos': batch[3], 'hts': batch[4], 'sent_pos': batch[5], 'sent_labels': batch[6].to(device) if (not batch[6] is None) and (batch[7] is None) else None, 'teacher_attns': batch[7].to(device) if not batch[7] is None else None, 'graph': batch[8], 'tag': tag } return input def train(args, model, train_features, dev_features): def finetune(features, optimizer, num_epoch, num_steps): best_score = -1
def load_input(batch, device, tag="dev"): input = {'input_ids': batch[0].to(device), 'attention_mask': batch[1].to(device), 'labels': batch[2].to(device), 'entity_pos': batch[3], 'hts': batch[4], 'sent_pos': batch[5], 'sent_labels': batch[6].to(device) if (not batch[6] is None) and (batch[7] is None) else None, 'teacher_attns': batch[7].to(device) if not batch[7] is None else None, 'graph': batch[8], 'tag': tag } return input def train(args, model, train_features, dev_features): def finetune(features, optimizer, num_epoch, num_steps): best_score = -1
train_dataloader = DataLoader(features, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn,
3
2023-10-20 05:53:25+00:00
12k
xingchenshanyao/YOLOP-E
lib/dataset/AutoDriveDataset.py
[ { "identifier": "xyxy2xywh", "path": "lib/utils/utils.py", "snippet": "def xyxy2xywh(x):\n # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center\n y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center\n y[:, 2] = x[:, 2] - x[:, 0] # width\n y[:, 3] = x[:, 3] - x[:, 1] # height\n return y" }, { "identifier": "augment_hsv", "path": "lib/utils/augmentations.py", "snippet": "def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):\n \"\"\"change color hue, saturation, value\"\"\"\n r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains\n hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))\n dtype = img.dtype # uint8\n\n x = np.arange(0, 256, dtype=np.int16)\n lut_hue = ((x * r[0]) % 180).astype(dtype)\n lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)\n lut_val = np.clip(x * r[2], 0, 255).astype(dtype)\n\n img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)\n cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed\n\n # Histogram equalization\n # if random.random() < 0.2:\n # for i in range(3):\n # img[:, :, i] = cv2.equalizeHist(img[:, :, i])" }, { "identifier": "random_perspective", "path": "lib/utils/augmentations.py", "snippet": "def random_perspective(combination, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)):\n \"\"\"combination of img transform\"\"\"\n # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))\n # targets = [cls, xyxy]\n img, gray, line = combination\n height = img.shape[0] + border[0] * 2 # shape(h,w,c)\n width = img.shape[1] + border[1] * 2\n\n # Center\n C = np.eye(3)\n C[0, 2] = -img.shape[1] / 2 # x translation (pixels)\n C[1, 2] = -img.shape[0] / 2 # y translation (pixels)\n\n # Perspective\n P = np.eye(3)\n P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)\n P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)\n\n # Rotation and Scale\n R = np.eye(3)\n a = random.uniform(-degrees, degrees)\n # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations\n s = random.uniform(1 - scale, 1 + scale)\n # s = 2 ** random.uniform(-scale, scale)\n R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)\n\n # Shear\n S = np.eye(3)\n S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)\n S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)\n\n # Translation\n T = np.eye(3)\n T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)\n T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)\n\n # Combined rotation matrix\n M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT\n if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed\n if perspective:\n img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))\n gray = cv2.warpPerspective(gray, M, dsize=(width, height), borderValue=0)\n line = cv2.warpPerspective(line, M, dsize=(width, height), borderValue=0)\n else: # affine\n img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))\n gray = cv2.warpAffine(gray, M[:2], dsize=(width, height), borderValue=0)\n line = cv2.warpAffine(line, M[:2], dsize=(width, height), borderValue=0)\n\n # Visualize\n # import matplotlib.pyplot as plt\n # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()\n # ax[0].imshow(img[:, :, ::-1]) # base\n # ax[1].imshow(img2[:, :, ::-1]) # warped\n\n # Transform label coordinates\n n = len(targets)\n if n:\n # warp points\n xy = np.ones((n * 4, 3))\n xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1\n xy = xy @ M.T # transform\n if perspective:\n xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale\n else: # affine\n xy = xy[:, :2].reshape(n, 8)\n\n # create new boxes\n x = xy[:, [0, 2, 4, 6]]\n y = xy[:, [1, 3, 5, 7]]\n xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T\n\n # # apply angle-based reduction of bounding boxes\n # radians = a * math.pi / 180\n # reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5\n # x = (xy[:, 2] + xy[:, 0]) / 2\n # y = (xy[:, 3] + xy[:, 1]) / 2\n # w = (xy[:, 2] - xy[:, 0]) * reduction\n # h = (xy[:, 3] - xy[:, 1]) * reduction\n # xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T\n\n # clip boxes\n xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)\n xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)\n\n # filter candidates\n i = _box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T)\n targets = targets[i]\n targets[:, 1:5] = xy[i]\n\n combination = (img, gray, line)\n return combination, targets" }, { "identifier": "cutout", "path": "lib/utils/augmentations.py", "snippet": "def cutout(combination, labels):\n # Applies image cutout augmentation https://arxiv.org/abs/1708.04552\n image, gray = combination\n h, w = image.shape[:2]\n\n def bbox_ioa(box1, box2):\n # Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2\n box2 = box2.transpose()\n\n # Get the coordinates of bounding boxes\n b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]\n b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]\n\n # Intersection area\n inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \\\n (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)\n\n # box2 area\n box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16\n\n # Intersection over box2 area\n return inter_area / box2_area\n\n # create random masks\n scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction\n for s in scales:\n mask_h = random.randint(1, int(h * s))\n mask_w = random.randint(1, int(w * s))\n\n # box\n xmin = max(0, random.randint(0, w) - mask_w // 2)\n ymin = max(0, random.randint(0, h) - mask_h // 2)\n xmax = min(w, xmin + mask_w)\n ymax = min(h, ymin + mask_h)\n # print('xmin:{},ymin:{},xmax:{},ymax:{}'.format(xmin,ymin,xmax,ymax))\n\n # apply random color mask\n image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]\n gray[ymin:ymax, xmin:xmax] = -1\n\n # return unobscured labels\n if len(labels) and s > 0.03:\n box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)\n ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area\n labels = labels[ioa < 0.60] # remove >60% obscured labels\n\n return image, gray, labels" }, { "identifier": "letterbox", "path": "lib/utils/augmentations.py", "snippet": "def letterbox(combination, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):\n \"\"\"Resize the input image and automatically padding to suitable shape :https://zhuanlan.zhihu.com/p/172121380\"\"\"\n # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232\n img, gray, line = combination\n shape = img.shape[:2] # current shape [height, width]\n if isinstance(new_shape, int):\n new_shape = (new_shape, new_shape)\n\n # Scale ratio (new / old)\n r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])\n if not scaleup: # only scale down, do not scale up (for better test mAP)\n r = min(r, 1.0)\n\n # Compute padding\n ratio = r, r # width, height ratios\n new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))\n dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding\n if auto: # minimum rectangle\n dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding\n elif scaleFill: # stretch\n dw, dh = 0.0, 0.0\n new_unpad = (new_shape[1], new_shape[0])\n ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios\n\n dw /= 2 # divide padding into 2 sides\n dh /= 2\n\n if shape[::-1] != new_unpad: # resize\n img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)\n gray = cv2.resize(gray, new_unpad, interpolation=cv2.INTER_LINEAR)\n line = cv2.resize(line, new_unpad, interpolation=cv2.INTER_LINEAR)\n\n top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))\n left, right = int(round(dw - 0.1)), int(round(dw + 0.1))\n\n img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border\n gray = cv2.copyMakeBorder(gray, top, bottom, left, right, cv2.BORDER_CONSTANT, value=0) # add border\n line = cv2.copyMakeBorder(line, top, bottom, left, right, cv2.BORDER_CONSTANT, value=0) # add border\n # print(img.shape)\n \n combination = (img, gray, line)\n return combination, ratio, (dw, dh)" } ]
import os import cv2 import numpy as np import random import torch import torchvision.transforms as transforms from pathlib import Path from PIL import Image from torch.utils.data import Dataset from ..utils import letterbox, augment_hsv, random_perspective, xyxy2xywh, cutout
8,630
if random.random() > p : # Pending Sign增强 Is_add = True if id_image >= 3294 and Only_day: # 只加强白天的图片 Is_add = False cropped_path = cropped_path0+'Pending Sign/' fileList = os.listdir(cropped_path) cropped_id = random.randint(0,len(fileList)-1) txt_id = int(fileList[cropped_id].split('_')[0]) txt_line = lines[txt_id-1].split(' ') x1, y1, x2, y2, idxx = int(txt_line[1]), int(txt_line[2]), int(txt_line[3]), int(txt_line[4]), int(txt_line[5]) if x1>x2: x1,x2 = x2,x1 if y1>y2: y1,y2 = y2,y1 for line in data_label: idx_0 = line[0] x_c, y_c, w_c, h_c = int(line[1]*1280), int(line[2]*720), int(line[3]*1280), int(line[4]*720) x1_0, y1_0, x2_0, y2_0 = int(x_c-w_c/2), int(y_c-h_c/2), int(x_c+w_c/2), int(y_c+h_c/2) if (x1>x1_0 and y1>y1_0 and x1<x2_0 and y1<y2_0) or (x2>x1_0 and y2>y1_0 and x2<x2_0 and y2<y2_0) or (x1<x1_0 and y1<y1_0 and x2>x2_0 and y2>y2_0): Is_add = False break if Is_add: try: cropped = cv2.imread(cropped_path+fileList[cropped_id]) img[max(0,int(y1-c_c)):min(720,int(y2+c_c)), max(0,int(x1-c_c)):min(1280,int(x2+c_c))] = cropped cropped_line = [[idxx, (x1+x2)/2/1280, (y1+y2)/2/720, (x2-x1)/1280, (y2-y1)/720]] data_label = np.append(data_label, cropped_line, axis=0) except: Is_add = True if random.random() > p : # Guidance Sign增强 Is_add = True if id_image >= 3294 and Only_day: # 只加强白天的图片 Is_add = False cropped_path = cropped_path0+'Guidance Sign/' fileList = os.listdir(cropped_path) cropped_id = random.randint(0,len(fileList)-1) txt_id = int(fileList[cropped_id].split('_')[0]) txt_line = lines[txt_id-1].split(' ') x1, y1, x2, y2, idxx = int(txt_line[1]), int(txt_line[2]), int(txt_line[3]), int(txt_line[4]), int(txt_line[5]) if x1>x2: x1,x2 = x2,x1 if y1>y2: y1,y2 = y2,y1 for line in data_label: idx_0 = line[0] x_c, y_c, w_c, h_c = int(line[1]*1280), int(line[2]*720), int(line[3]*1280), int(line[4]*720) x1_0, y1_0, x2_0, y2_0 = int(x_c-w_c/2), int(y_c-h_c/2), int(x_c+w_c/2), int(y_c+h_c/2) if (x1>x1_0 and y1>y1_0 and x1<x2_0 and y1<y2_0) or (x2>x1_0 and y2>y1_0 and x2<x2_0 and y2<y2_0) or (x1<x1_0 and y1<y1_0 and x2>x2_0 and y2>y2_0): Is_add = False break if Is_add: try: cropped = cv2.imread(cropped_path+fileList[cropped_id]) img[max(0,int(y1-c_c)):min(720,int(y2+c_c)), max(0,int(x1-c_c)):min(1280,int(x2+c_c))] = cropped cropped_line = [[idxx, (x1+x2)/2/1280, (y1+y2)/2/720, (x2-x1)/1280, (y2-y1)/720]] data_label = np.append(data_label, cropped_line, axis=0) except: Is_add = True data["label"] = data_label # cv2.imshow("img",img) # cv2.waitKey(10000) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # cv2.imshow("img",img) # 图像颜色空间转换 # cv2.waitKey(10000) # seg_label = cv2.imread(data["mask"], 0) if self.cfg.num_seg_class == 3: seg_label = cv2.imread(data["mask"]) else: seg_label = cv2.imread(data["mask"], 0) lane_label = cv2.imread(data["lane"], 0) #print(lane_label.shape) # print(seg_label.shape) # print(lane_label.shape) # print(seg_label.shape) resized_shape = self.inputsize if isinstance(resized_shape, list): resized_shape = max(resized_shape) h0, w0 = img.shape[:2] # orig hw r = resized_shape / max(h0, w0) # resize image to img_size if r != 1: # always resize down, only resize up if training with augmentation interp = cv2.INTER_AREA if r < 1 else cv2.INTER_LINEAR img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp) # cv2.imshow("img",img) # 图像缩小到640*360 # cv2.waitKey(10000) seg_label = cv2.resize(seg_label, (int(w0 * r), int(h0 * r)), interpolation=interp) lane_label = cv2.resize(lane_label, (int(w0 * r), int(h0 * r)), interpolation=interp) h, w = img.shape[:2] (img, seg_label, lane_label), ratio, pad = letterbox((img, seg_label, lane_label), resized_shape, auto=True, scaleup=self.is_train) shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling # ratio = (w / w0, h / h0) # print(resized_shape) det_label = data["label"] labels=[] if det_label.size > 0: # Normalized xywh to pixel xyxy format labels = det_label.copy() labels[:, 1] = ratio[0] * w * (det_label[:, 1] - det_label[:, 3] / 2) + pad[0] # pad width labels[:, 2] = ratio[1] * h * (det_label[:, 2] - det_label[:, 4] / 2) + pad[1] # pad height labels[:, 3] = ratio[0] * w * (det_label[:, 1] + det_label[:, 3] / 2) + pad[0] labels[:, 4] = ratio[1] * h * (det_label[:, 2] + det_label[:, 4] / 2) + pad[1] if self.is_train: combination = (img, seg_label, lane_label) (img, seg_label, lane_label), labels = random_perspective( combination=combination, targets=labels, degrees=self.cfg.DATASET.ROT_FACTOR, translate=self.cfg.DATASET.TRANSLATE, scale=self.cfg.DATASET.SCALE_FACTOR, shear=self.cfg.DATASET.SHEAR ) #print(labels.shape)
# np.set_printoptions(threshold=np.inf) # from visualization import plot_img_and_mask,plot_one_box,show_seg_result # # ACE # import os # import math # import matplotlib.pyplot as plt # #线性拉伸处理 # #去掉最大最小0.5%的像素值 线性拉伸至[0,1] # def stretchImage(data, s=0.005, bins = 2000): # ht = np.histogram(data, bins) # d = np.cumsum(ht[0])/float(data.size) # lmin = 0; lmax=bins-1 # while lmin<bins: # if d[lmin]>=s: # break # lmin+=1 # while lmax>=0: # if d[lmax]<=1-s: # break # lmax-=1 # return np.clip((data-ht[1][lmin])/(ht[1][lmax]-ht[1][lmin]), 0,1) # #根据半径计算权重参数矩阵 # g_para = {} # def getPara(radius = 5): # global g_para # m = g_para.get(radius, None) # if m is not None: # return m # size = radius*2+1 # m = np.zeros((size, size)) # for h in range(-radius, radius+1): # for w in range(-radius, radius+1): # if h==0 and w==0: # continue # m[radius+h, radius+w] = 1.0/math.sqrt(h**2+w**2) # m /= m.sum() # g_para[radius] = m # return m # #常规的ACE实现 # def zmIce(I, ratio=4, radius=300): # para = getPara(radius) # height,width = I.shape # zh = [] # zw = [] # n = 0 # while n < radius: # zh.append(0) # zw.append(0) # n += 1 # for n in range(height): # zh.append(n) # for n in range(width): # zw.append(n) # n = 0 # while n < radius: # zh.append(height-1) # zw.append(width-1) # n += 1 # #print(zh) # #print(zw) # Z = I[np.ix_(zh, zw)] # res = np.zeros(I.shape) # for h in range(radius*2+1): # for w in range(radius*2+1): # if para[h][w] == 0: # continue # res += (para[h][w] * np.clip((I-Z[h:h+height, w:w+width])*ratio, -1, 1)) # return res # #单通道ACE快速增强实现 # def zmIceFast(I, ratio, radius): # # print(I) # height, width = I.shape[:2] # if min(height, width) <=2: # return np.zeros(I.shape)+0.5 # Rs = cv2.resize(I, (int((width+1)/2), int((height+1)/2))) # Rf = zmIceFast(Rs, ratio, radius) #递归调用 # Rf = cv2.resize(Rf, (width, height)) # Rs = cv2.resize(Rs, (width, height)) # return Rf+zmIce(I,ratio, radius)-zmIce(Rs,ratio,radius) # #rgb三通道分别增强 ratio是对比度增强因子 radius是卷积模板半径 # def zmIceColor(I, ratio=4, radius=3): # res = np.zeros(I.shape) # for k in range(3): # res[:,:,k] = stretchImage(zmIceFast(I[:,:,k], ratio, radius)) # return res class AutoDriveDataset(Dataset): """ A general Dataset for some common function """ def __init__(self, cfg, is_train, inputsize=640, transform=None): """ initial all the characteristic Inputs: -cfg: configurations -is_train(bool): whether train set or not -transform: ToTensor and Normalize Returns: None """ self.is_train = is_train self.cfg = cfg self.transform = transform self.inputsize = inputsize self.Tensor = transforms.ToTensor() img_root = Path(cfg.DATASET.DATAROOT) label_root = Path(cfg.DATASET.LABELROOT) mask_root = Path(cfg.DATASET.MASKROOT) lane_root = Path(cfg.DATASET.LANEROOT) if is_train: indicator = cfg.DATASET.TRAIN_SET else: indicator = cfg.DATASET.TEST_SET self.img_root = img_root / indicator self.label_root = label_root / indicator self.mask_root = mask_root / indicator self.lane_root = lane_root / indicator # self.label_list = self.label_root.iterdir() self.mask_list = self.mask_root.iterdir() self.db = [] self.data_format = cfg.DATASET.DATA_FORMAT self.scale_factor = cfg.DATASET.SCALE_FACTOR self.rotation_factor = cfg.DATASET.ROT_FACTOR self.flip = cfg.DATASET.FLIP self.color_rgb = cfg.DATASET.COLOR_RGB # self.target_type = cfg.MODEL.TARGET_TYPE self.shapes = np.array(cfg.DATASET.ORG_IMG_SIZE) def _get_db(self): """ finished on children Dataset(for dataset which is not in Bdd100k format, rewrite children Dataset) """ raise NotImplementedError def evaluate(self, cfg, preds, output_dir): """ finished on children dataset """ raise NotImplementedError def __len__(self,): """ number of objects in the dataset """ return len(self.db) def __getitem__(self, idx): """ Get input and groud-truth from database & add data augmentation on input Inputs: -idx: the index of image in self.db(database)(list) self.db(list) [a,b,c,...] a: (dictionary){'image':, 'information':} Returns: -image: transformed image, first passed the data augmentation in __getitem__ function(type:numpy), then apply self.transform -target: ground truth(det_gt,seg_gt) function maybe useful cv2.imread cv2.cvtColor(data, cv2.COLOR_BGR2RGB) cv2.warpAffine """ data = self.db[idx] data_label = data["label"] id_image = int(data["image"].split('/')[-1][:-4]) # 获取图片序号 img = cv2.imread(data["image"], cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION) # cv2.imshow("img",img) # 原图像 # cv2.waitKey(5000) # print("img = zmIceColor(img/255.0)*255") # img = zmIceColor(img/255.0)*255 # cv2.imshow("img",img/255) # ACE自动色彩均衡快速算法 # cv2.waitKey(5000) # Only Mascio Enhancement 数据增强 for line in data_label: idx_0 = line[0] x_c, y_c, w_c, h_c = int(line[1]*1280), int(line[2]*720), int(line[3]*1280), int(line[4]*720) x1, y1, x2, y2 = int(x_c-w_c/2), int(y_c-h_c/2), int(x_c+w_c/2), int(y_c+h_c/2) random.seed(idx) if self.is_train and int(idx_0) == 9 and random.random() > 1: # 只增强Straight or Right Turn Arrow # if self.is_train: # if True: c_y = 10 # 偏移间隙 c_x = 0 x_c_new = x_c+c_x y_c_new = y_c+h_c+c_y x1_new, y1_new, x2_new, y2_new = x1+c_x, y1+h_c+c_y, x2+c_x, y2+h_c+c_y if (x1_new >=0 and x2_new <=1280 and y1_new>=0 and y2_new <=720): # 向下重叠一次 Is_add = True for line0 in data_label: x1_0, y1_0, x2_0, y2_0 = line0[1]*1280-line0[3]*1280/2, line0[2]*1280-line0[4]*720/2, line0[1]*1280+line0[3]*1280/2, line0[2]*1280+line0[4]*720/2 if (x1_new>x1_0 and y1_new>y1_0 and x1_new<x2_0 and y1_new<y2_0) or (x2_new>x1_0 and y2_new>y1_0 and x2_new<x2_0 and y2_new<y2_0) or (x1_new<x1_0 and y1_new<y1_0 and x2_new>x2_0 and y2_new>y2_0): Is_add = False break if Is_add: try: cropped_line = [[idx_0, x_c_new, y_c_new, w_c, h_c]] data_label = np.append(data_label, cropped_line, axis=0) img[int(y1_new):int(y2_new), int(x1_new):int(x2_new)] = img[int(y1):int(y2), int(x1):int(x2)] except: Is_add = True # cv2.imshow("img",img) # cv2.waitKey(10000) # Specific Mascio Enhancement数据增强 cropped_path0 = '/home/xingchen/Study/dataset/SDExpressway/traffic_object_cropped/' f=open('/home/xingchen/Study/dataset/SDExpressway/traffic_object_cropped.txt','r') lines=f.readlines() f.close() c_c = 10 p = 0.8 # 数据增强概率 # Only_day = True Only_day = False #只加强白天的图片 # if self.is_train: # 限定只有训练的时候增强 # if True: if False: random.seed(idx) if random.random() > p-0.1 : # Straight or Right Turn Arrow增强 Is_add = True if id_image >= 3294 and Only_day: # 只加强白天的图片 Is_add = False cropped_path = cropped_path0+'Straight or Right Turn Arrow/' fileList = os.listdir(cropped_path) cropped_id = random.randint(0,len(fileList)-1) txt_id = int(fileList[cropped_id].split('_')[0]) txt_line = lines[txt_id-1].split(' ') x1, y1, x2, y2, idxx = int(txt_line[1]), int(txt_line[2]), int(txt_line[3]), int(txt_line[4]), int(txt_line[5]) if x1>x2: x1,x2 = x2,x1 if y1>y2: y1,y2 = y2,y1 for line in data_label: idx_0 = line[0] x_c, y_c, w_c, h_c = int(line[1]*1280), int(line[2]*720), int(line[3]*1280), int(line[4]*720) x1_0, y1_0, x2_0, y2_0 = int(x_c-w_c/2), int(y_c-h_c/2), int(x_c+w_c/2), int(y_c+h_c/2) if (x1>x1_0 and y1>y1_0 and x1<x2_0 and y1<y2_0) or (x2>x1_0 and y2>y1_0 and x2<x2_0 and y2<y2_0) or (x1<x1_0 and y1<y1_0 and x2>x2_0 and y2>y2_0): Is_add = False break if Is_add: try: cropped = cv2.imread(cropped_path+fileList[cropped_id]) img[int(y1):int(y2), int(x1):int(x2)] = cropped cropped_line = [[idxx, (x1+x2)/2/1280, (y1+y2)/2/720, (x2-x1)/1280, (y2-y1)/720]] data_label = np.append(data_label, cropped_line, axis=0) except: Is_add = True if random.random() > p-0.1 : # Straight Ahead Arrow增强 Is_add = True if id_image >= 3294 and Only_day: # 只加强白天的图片 Is_add = False cropped_path = cropped_path0+'Straight Ahead Arrow/' fileList = os.listdir(cropped_path) cropped_id = random.randint(0,len(fileList)-1) txt_id = int(fileList[cropped_id].split('_')[0]) txt_line = lines[txt_id-1].split(' ') x1, y1, x2, y2, idxx = int(txt_line[1]), int(txt_line[2]), int(txt_line[3]), int(txt_line[4]), int(txt_line[5]) if x1>x2: x1,x2 = x2,x1 if y1>y2: y1,y2 = y2,y1 for line in data_label: idx_0 = line[0] x_c, y_c, w_c, h_c = int(line[1]*1280), int(line[2]*720), int(line[3]*1280), int(line[4]*720) x1_0, y1_0, x2_0, y2_0 = int(x_c-w_c/2), int(y_c-h_c/2), int(x_c+w_c/2), int(y_c+h_c/2) if (x1>x1_0 and y1>y1_0 and x1<x2_0 and y1<y2_0) or (x2>x1_0 and y2>y1_0 and x2<x2_0 and y2<y2_0) or (x1<x1_0 and y1<y1_0 and x2>x2_0 and y2>y2_0): Is_add = False break if Is_add: try: cropped = cv2.imread(cropped_path+fileList[cropped_id]) img[int(y1):int(y2), int(x1):int(x2)] = cropped cropped_line = [[idxx, (x1+x2)/2/1280, (y1+y2)/2/720, (x2-x1)/1280, (y2-y1)/720]] data_label = np.append(data_label, cropped_line, axis=0) except: Is_add = True if random.random() > p : # Speed Limit Sign增强 Is_add = True if id_image >= 3294 and Only_day: # 只加强白天的图片 Is_add = False cropped_path = cropped_path0+'Speed Limit Sign/' fileList = os.listdir(cropped_path) cropped_id = random.randint(0,len(fileList)-1) txt_id = int(fileList[cropped_id].split('_')[0]) txt_line = lines[txt_id-1].split(' ') x1, y1, x2, y2, idxx = int(txt_line[1]), int(txt_line[2]), int(txt_line[3]), int(txt_line[4]), int(txt_line[5]) if x1>x2: x1,x2 = x2,x1 if y1>y2: y1,y2 = y2,y1 for line in data_label: idx_0 = line[0] x_c, y_c, w_c, h_c = int(line[1]*1280), int(line[2]*720), int(line[3]*1280), int(line[4]*720) x1_0, y1_0, x2_0, y2_0 = int(x_c-w_c/2), int(y_c-h_c/2), int(x_c+w_c/2), int(y_c+h_c/2) if (x1>x1_0 and y1>y1_0 and x1<x2_0 and y1<y2_0) or (x2>x1_0 and y2>y1_0 and x2<x2_0 and y2<y2_0) or (x1<x1_0 and y1<y1_0 and x2>x2_0 and y2>y2_0): Is_add = False break if Is_add: try: cropped = cv2.imread(cropped_path+fileList[cropped_id]) img[max(0,int(y1-c_c)):min(720,int(y2+c_c)), max(0,int(x1-c_c)):min(1280,int(x2+c_c))] = cropped cropped_line = [[idxx, (x1+x2)/2/1280, (y1+y2)/2/720, (x2-x1)/1280, (y2-y1)/720]] data_label = np.append(data_label, cropped_line, axis=0) except: Is_add = True if random.random() > p : # Emergency Telephone Sign增强 Is_add = True if id_image >= 3294 and Only_day: # 只加强白天的图片 Is_add = False cropped_path = cropped_path0+'Emergency Telephone Sign/' fileList = os.listdir(cropped_path) cropped_id = random.randint(0,len(fileList)-1) txt_id = int(fileList[cropped_id].split('_')[0]) txt_line = lines[txt_id-1].split(' ') x1, y1, x2, y2, idxx = int(txt_line[1]), int(txt_line[2]), int(txt_line[3]), int(txt_line[4]), int(txt_line[5]) if x1>x2: x1,x2 = x2,x1 if y1>y2: y1,y2 = y2,y1 for line in data_label: idx_0 = line[0] x_c, y_c, w_c, h_c = int(line[1]*1280), int(line[2]*720), int(line[3]*1280), int(line[4]*720) x1_0, y1_0, x2_0, y2_0 = int(x_c-w_c/2), int(y_c-h_c/2), int(x_c+w_c/2), int(y_c+h_c/2) if (x1>x1_0 and y1>y1_0 and x1<x2_0 and y1<y2_0) or (x2>x1_0 and y2>y1_0 and x2<x2_0 and y2<y2_0) or (x1<x1_0 and y1<y1_0 and x2>x2_0 and y2>y2_0): Is_add = False break if Is_add: try: cropped = cv2.imread(cropped_path+fileList[cropped_id]) img[max(0,int(y1-c_c)):min(720,int(y2+c_c)), max(0,int(x1-c_c)):min(1280,int(x2+c_c))] = cropped cropped_line = [[idxx, (x1+x2)/2/1280, (y1+y2)/2/720, (x2-x1)/1280, (y2-y1)/720]] data_label = np.append(data_label, cropped_line, axis=0) except: Is_add = True if random.random() > p : # Warning Sign增强 Is_add = True if id_image >= 3294 and Only_day: # 只加强白天的图片 Is_add = False cropped_path = cropped_path0+'Warning Sign/' fileList = os.listdir(cropped_path) cropped_id = random.randint(0,len(fileList)-1) txt_id = int(fileList[cropped_id].split('_')[0]) txt_line = lines[txt_id-1].split(' ') x1, y1, x2, y2, idxx = int(txt_line[1]), int(txt_line[2]), int(txt_line[3]), int(txt_line[4]), int(txt_line[5]) if x1>x2: x1,x2 = x2,x1 if y1>y2: y1,y2 = y2,y1 for line in data_label: idx_0 = line[0] x_c, y_c, w_c, h_c = int(line[1]*1280), int(line[2]*720), int(line[3]*1280), int(line[4]*720) x1_0, y1_0, x2_0, y2_0 = int(x_c-w_c/2), int(y_c-h_c/2), int(x_c+w_c/2), int(y_c+h_c/2) if (x1>x1_0 and y1>y1_0 and x1<x2_0 and y1<y2_0) or (x2>x1_0 and y2>y1_0 and x2<x2_0 and y2<y2_0) or (x1<x1_0 and y1<y1_0 and x2>x2_0 and y2>y2_0): Is_add = False break if Is_add: try: cropped = cv2.imread(cropped_path+fileList[cropped_id]) img[max(0,int(y1-c_c)):min(720,int(y2+c_c)), max(0,int(x1-c_c)):min(1280,int(x2+c_c))] = cropped cropped_line = [[idxx, (x1+x2)/2/1280, (y1+y2)/2/720, (x2-x1)/1280, (y2-y1)/720]] data_label = np.append(data_label, cropped_line, axis=0) except: Is_add = True if random.random() > p : # Directional Sign增强 Is_add = True if id_image >= 3294 and Only_day: # 只加强白天的图片 Is_add = False cropped_path = cropped_path0+'Directional Sign/' fileList = os.listdir(cropped_path) cropped_id = random.randint(0,len(fileList)-1) txt_id = int(fileList[cropped_id].split('_')[0]) txt_line = lines[txt_id-1].split(' ') x1, y1, x2, y2, idxx = int(txt_line[1]), int(txt_line[2]), int(txt_line[3]), int(txt_line[4]), int(txt_line[5]) if x1>x2: x1,x2 = x2,x1 if y1>y2: y1,y2 = y2,y1 for line in data_label: idx_0 = line[0] x_c, y_c, w_c, h_c = int(line[1]*1280), int(line[2]*720), int(line[3]*1280), int(line[4]*720) x1_0, y1_0, x2_0, y2_0 = int(x_c-w_c/2), int(y_c-h_c/2), int(x_c+w_c/2), int(y_c+h_c/2) if (x1>x1_0 and y1>y1_0 and x1<x2_0 and y1<y2_0) or (x2>x1_0 and y2>y1_0 and x2<x2_0 and y2<y2_0) or (x1<x1_0 and y1<y1_0 and x2>x2_0 and y2>y2_0): Is_add = False break if Is_add: try: cropped = cv2.imread(cropped_path+fileList[cropped_id]) img[max(0,int(y1-c_c)):min(720,int(y2+c_c)), max(0,int(x1-c_c)):min(1280,int(x2+c_c))] = cropped cropped_line = [[idxx, (x1+x2)/2/1280, (y1+y2)/2/720, (x2-x1)/1280, (y2-y1)/720]] data_label = np.append(data_label, cropped_line, axis=0) except: Is_add = True if random.random() > p : # Pending Sign增强 Is_add = True if id_image >= 3294 and Only_day: # 只加强白天的图片 Is_add = False cropped_path = cropped_path0+'Pending Sign/' fileList = os.listdir(cropped_path) cropped_id = random.randint(0,len(fileList)-1) txt_id = int(fileList[cropped_id].split('_')[0]) txt_line = lines[txt_id-1].split(' ') x1, y1, x2, y2, idxx = int(txt_line[1]), int(txt_line[2]), int(txt_line[3]), int(txt_line[4]), int(txt_line[5]) if x1>x2: x1,x2 = x2,x1 if y1>y2: y1,y2 = y2,y1 for line in data_label: idx_0 = line[0] x_c, y_c, w_c, h_c = int(line[1]*1280), int(line[2]*720), int(line[3]*1280), int(line[4]*720) x1_0, y1_0, x2_0, y2_0 = int(x_c-w_c/2), int(y_c-h_c/2), int(x_c+w_c/2), int(y_c+h_c/2) if (x1>x1_0 and y1>y1_0 and x1<x2_0 and y1<y2_0) or (x2>x1_0 and y2>y1_0 and x2<x2_0 and y2<y2_0) or (x1<x1_0 and y1<y1_0 and x2>x2_0 and y2>y2_0): Is_add = False break if Is_add: try: cropped = cv2.imread(cropped_path+fileList[cropped_id]) img[max(0,int(y1-c_c)):min(720,int(y2+c_c)), max(0,int(x1-c_c)):min(1280,int(x2+c_c))] = cropped cropped_line = [[idxx, (x1+x2)/2/1280, (y1+y2)/2/720, (x2-x1)/1280, (y2-y1)/720]] data_label = np.append(data_label, cropped_line, axis=0) except: Is_add = True if random.random() > p : # Guidance Sign增强 Is_add = True if id_image >= 3294 and Only_day: # 只加强白天的图片 Is_add = False cropped_path = cropped_path0+'Guidance Sign/' fileList = os.listdir(cropped_path) cropped_id = random.randint(0,len(fileList)-1) txt_id = int(fileList[cropped_id].split('_')[0]) txt_line = lines[txt_id-1].split(' ') x1, y1, x2, y2, idxx = int(txt_line[1]), int(txt_line[2]), int(txt_line[3]), int(txt_line[4]), int(txt_line[5]) if x1>x2: x1,x2 = x2,x1 if y1>y2: y1,y2 = y2,y1 for line in data_label: idx_0 = line[0] x_c, y_c, w_c, h_c = int(line[1]*1280), int(line[2]*720), int(line[3]*1280), int(line[4]*720) x1_0, y1_0, x2_0, y2_0 = int(x_c-w_c/2), int(y_c-h_c/2), int(x_c+w_c/2), int(y_c+h_c/2) if (x1>x1_0 and y1>y1_0 and x1<x2_0 and y1<y2_0) or (x2>x1_0 and y2>y1_0 and x2<x2_0 and y2<y2_0) or (x1<x1_0 and y1<y1_0 and x2>x2_0 and y2>y2_0): Is_add = False break if Is_add: try: cropped = cv2.imread(cropped_path+fileList[cropped_id]) img[max(0,int(y1-c_c)):min(720,int(y2+c_c)), max(0,int(x1-c_c)):min(1280,int(x2+c_c))] = cropped cropped_line = [[idxx, (x1+x2)/2/1280, (y1+y2)/2/720, (x2-x1)/1280, (y2-y1)/720]] data_label = np.append(data_label, cropped_line, axis=0) except: Is_add = True data["label"] = data_label # cv2.imshow("img",img) # cv2.waitKey(10000) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # cv2.imshow("img",img) # 图像颜色空间转换 # cv2.waitKey(10000) # seg_label = cv2.imread(data["mask"], 0) if self.cfg.num_seg_class == 3: seg_label = cv2.imread(data["mask"]) else: seg_label = cv2.imread(data["mask"], 0) lane_label = cv2.imread(data["lane"], 0) #print(lane_label.shape) # print(seg_label.shape) # print(lane_label.shape) # print(seg_label.shape) resized_shape = self.inputsize if isinstance(resized_shape, list): resized_shape = max(resized_shape) h0, w0 = img.shape[:2] # orig hw r = resized_shape / max(h0, w0) # resize image to img_size if r != 1: # always resize down, only resize up if training with augmentation interp = cv2.INTER_AREA if r < 1 else cv2.INTER_LINEAR img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp) # cv2.imshow("img",img) # 图像缩小到640*360 # cv2.waitKey(10000) seg_label = cv2.resize(seg_label, (int(w0 * r), int(h0 * r)), interpolation=interp) lane_label = cv2.resize(lane_label, (int(w0 * r), int(h0 * r)), interpolation=interp) h, w = img.shape[:2] (img, seg_label, lane_label), ratio, pad = letterbox((img, seg_label, lane_label), resized_shape, auto=True, scaleup=self.is_train) shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling # ratio = (w / w0, h / h0) # print(resized_shape) det_label = data["label"] labels=[] if det_label.size > 0: # Normalized xywh to pixel xyxy format labels = det_label.copy() labels[:, 1] = ratio[0] * w * (det_label[:, 1] - det_label[:, 3] / 2) + pad[0] # pad width labels[:, 2] = ratio[1] * h * (det_label[:, 2] - det_label[:, 4] / 2) + pad[1] # pad height labels[:, 3] = ratio[0] * w * (det_label[:, 1] + det_label[:, 3] / 2) + pad[0] labels[:, 4] = ratio[1] * h * (det_label[:, 2] + det_label[:, 4] / 2) + pad[1] if self.is_train: combination = (img, seg_label, lane_label) (img, seg_label, lane_label), labels = random_perspective( combination=combination, targets=labels, degrees=self.cfg.DATASET.ROT_FACTOR, translate=self.cfg.DATASET.TRANSLATE, scale=self.cfg.DATASET.SCALE_FACTOR, shear=self.cfg.DATASET.SHEAR ) #print(labels.shape)
augment_hsv(img, hgain=self.cfg.DATASET.HSV_H, sgain=self.cfg.DATASET.HSV_S, vgain=self.cfg.DATASET.HSV_V)
1
2023-10-24 02:08:25+00:00
12k
giulio98/functional-diffusion-processes
src/functional_diffusion_processes/losses/mse_loss.py
[ { "identifier": "BaseMAML", "path": "src/functional_diffusion_processes/models/base_maml.py", "snippet": "class BaseMAML(nn.Module, abc.ABC):\n \"\"\"Abstract model class for implementing Model-Agnostic Meta-Learning (MAML).\n\n The Model-Agnostic Meta-Learning (MAML) algorithm is designed to train models\n in a manner that they can be fine-tuned for new tasks with a small number of examples.\n This implementation is based on the MAML algorithm introduced in the paper\n \"Model-Agnostic Meta-Learning for Fast Adaptation of Deep Networks\"\n (https://arxiv.org/abs/1703.03400).\n\n Attributes:\n model_config (DictConfig): Configuration dictionary for the model.\n optimizer_inner (optax.GradientTransformation): Inner optimizer configuration.\n inner_steps (int): Number of inner optimization steps.\n\n Methods:\n __call__(self, inputs: jnp.ndarray) -> jnp.ndarray: Implement the forward pass of the model.\n initialize_model(self, rng: jax.random.PRNGKey, batch_input: jnp.ndarray) -> FrozenDict[str, Mapping[str, Any]]: Initialize the model with dummy inputs.\n initialize_input(self, shape: Tuple[int, ...]) -> jnp.ndarray: Create input tensor for the model based on the specified shape.\n make_update_params_fn(self) -> Callable[..., Tuple[jax.random.PRNGKey, jnp.ndarray, jnp.ndarray]]: Create a function to update the model parameters.\n make_update_inner_fn(self, optimizer_inner: optax.GradientTransformation, n_steps: int) -> Callable[..., Tuple[jnp.ndarray, jnp.ndarray]]: Create a function to update model parameters for inner optimization.\n make_predict_fn(self) -> Callable[..., jnp.ndarray]: Creates a function for making predictions with the model.\n \"\"\"\n\n model_config: DictConfig\n optimizer_inner: optax.GradientTransformation\n inner_steps: int\n\n @abc.abstractmethod\n @nn.compact\n def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Implement the forward pass of the model.\n\n Args:\n inputs (jnp.ndarray): Input tensor to the model.\n\n Returns:\n jnp.ndarray: Output tensor from the model.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the __call__ method.\")\n\n def initialize_model(self, rng: jax.random.PRNGKey, batch_input: jnp.ndarray) -> FrozenDict[str, Mapping[str, Any]]:\n \"\"\"Initialize the model with dummy inputs.\n\n This method initializes the model parameters by passing a batch of dummy inputs\n through the model. This is a common practice to infer the dimensions of the model's\n parameters.\n\n Args:\n rng (jax.random.PRNGKey): A random key for generating initial model parameters.\n batch_input (jnp.ndarray): A batch of dummy inputs for initializing the model.\n\n Returns:\n FrozenDict[str, Mapping[str, Any]]: The initialized model parameters.\n \"\"\"\n self.optimizer_inner = hydra.utils.instantiate(self.optimizer_inner)\n return self.init(rng, batch_input)\n\n def initialize_input(self, shape: Tuple[int, ...]) -> jnp.ndarray:\n \"\"\"Create input tensor for the model based on the specified shape.\n\n Args:\n shape (Tuple[int, ...]): Shape of the input tensor.\n\n Returns:\n jnp.ndarray: Initialized input tensor.\n \"\"\"\n batch_size = shape[0]\n num_channels = shape[-1]\n grid_size = shape[1:-1]\n if not self.model_config.y_input:\n num_channels = None\n coordinates = make_coordinates(batch_size, grid_size, num_channels)\n return coordinates\n\n def make_update_params_fn(self) -> Callable[..., Tuple[jax.random.PRNGKey, jnp.ndarray, jnp.ndarray]]:\n \"\"\"Create a function to update the model parameters.\n\n This method creates a function that performs the forward pass of the model\n and updates the model parameters.\n\n Returns:\n Callable[..., Tuple[jax.random.PRNGKey, jnp.ndarray, jnp.ndarray]]: Function to update model parameters.\n \"\"\"\n update_inner_fn = self.make_update_inner_fn(\n optimizer_inner=self.optimizer_inner,\n n_steps=self.inner_steps,\n )\n\n def apply_forward(\n rng: jax.random.PRNGKey,\n params: Params,\n batch_input: jnp.ndarray,\n batch_corrupted: jnp.ndarray,\n psm: jnp.ndarray,\n ) -> Tuple[jax.random.PRNGKey, jnp.ndarray, jnp.ndarray]:\n \"\"\"Apply the (outer) forward pass and update the model parameters.\n\n Args:\n rng (jax.random.PRNGKey): Random key.\n params (Params): Initial model parameters.\n batch_input (jnp.ndarray): Input tensor to the model.\n batch_corrupted (jnp.ndarray): Corrupted version of the output tensor.\n psm (jnp.ndarray): Power special matrix.\n\n Returns:\n Tuple[jax.random.PRNGKey, jnp.ndarray, jnp.ndarray]: A tuple containing a new random key, the model output, and the inner loss.\n \"\"\"\n params_adapted, loss_inner = update_inner_fn(params, batch_input, batch_corrupted, psm)\n model_output = jax.vmap(self.apply)(params_adapted, batch_input)\n\n return rng, model_output, loss_inner\n\n return apply_forward\n\n def make_update_inner_fn(\n self, optimizer_inner: optax.GradientTransformation, n_steps: int\n ) -> Callable[[Params, jnp.ndarray, jnp.ndarray, jnp.ndarray], Tuple[jnp.ndarray, jnp.ndarray]]:\n \"\"\"Create a function to update model parameters for inner optimization.\n\n This method creates a function that performs the inner optimization updates\n during the meta-training phase, which is a key component of the MAML algorithm.\n\n Args:\n optimizer_inner (optax.GradientTransformation): The optimizer used for inner optimization.\n n_steps (int): The number of optimization steps.\n\n Returns:\n Callable[..., Tuple[jnp.ndarray, jnp.ndarray]]: Function to update model parameters for inner optimization.\n \"\"\"\n\n @partial(jax.vmap, in_axes=0)\n @partial(jax.grad, has_aux=True)\n def loss_inner_fn(params_i: Params, batch_input: T, y_corrupted: T, psm: T) -> T:\n \"\"\"Computes the loss for inner optimization.\n\n This inner method computes the loss for inner optimization by comparing\n the model's output against the corrupted batch using mean square error.\n The method is vectorized using JAX's vmap function for efficiency.\n\n Args:\n params_i (Params): Model parameters.\n batch_input (T): Input batch.\n y_corrupted (T): Corrupted batch.\n psm (T): Power special matrix.\n\n Returns:\n T: Loss value.\n \"\"\"\n c = y_corrupted.shape[-1]\n model_output = self.apply(params_i, batch_input)\n if len(psm.shape) == 3:\n model_output_freq = jnp.fft.fft2(model_output.reshape(*psm.shape[:-1], c), norm=\"ortho\", axes=(0, 1))\n y_corrupted_freq = jnp.fft.fft2(y_corrupted.reshape(*psm.shape[:-1], c), norm=\"ortho\", axes=(0, 1))\n else:\n model_output_freq = jnp.fft.fft(model_output.reshape(*psm.shape[:-1], c), norm=\"ortho\", axis=0)\n y_corrupted_freq = jnp.fft.fft(y_corrupted.reshape(*psm.shape[:-1], c), norm=\"ortho\", axis=0)\n mse = mean_square_error(\n y_corrupted_freq.reshape(-1, c),\n model_output_freq.reshape(-1, c),\n psm.reshape(-1, 1),\n )\n loss: jnp.ndarray = jnp.mean(mse)\n\n return loss, loss\n\n def apply_inner_forward(\n params: Params, batch_input: jnp.ndarray, batch_corrupted: jnp.ndarray, psm: jnp.ndarray\n ):\n \"\"\"Applies inner forward pass for updating model parameters.\n\n Args:\n params (Params): Model parameters.\n batch_input (jnp.ndarray): Input batch.\n batch_corrupted (jnp.ndarray): Corrupted batch.\n psm (jnp.ndarray): Power special matrix.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Updated model parameters and inner loss.\n \"\"\"\n\n def inner_opt_loop(\n carry: Tuple[Params, jnp.ndarray, int, Any, jnp.ndarray], _: None\n ) -> Tuple[Tuple[Params, jnp.ndarray, int, Any, jnp.ndarray], None]:\n \"\"\"Inner optimization loop for updating model parameters.\n\n Args:\n carry (Tuple[Params, jnp.ndarray, int, optax.OptState, jnp.ndarray]): Tuple containing model parameters,\n loss vector, iteration index, optimizer state, and corrupted batch.\n _ (None): A throwaway variable as no second argument is used in this function.\n\n Returns:\n Tuple[Params, jnp.ndarray, int, optax.OptState, jnp.ndarray]: Updated tuple with new model parameters,\n updated loss vector, incremented iteration index, updated optimizer state, and corrupted batch.\n \"\"\"\n params_i, loss_inner_vec, it, opt_inner_state_params, batch_corrupted_i = carry\n\n grad_params, (loss) = loss_inner_fn(params_i, batch_input, batch_corrupted_i, psm)\n loss_inner_vec = loss_inner_vec.at[it].set(jnp.mean(loss))\n\n if self.model_config.use_dense_lr:\n # separate learning rates from grad_params\n grad_params_true, _ = separate_learning_rates(unfreeze(grad_params))\n\n # separate learning rates from params_i\n params_i_true, learning_rates = separate_learning_rates(unfreeze(params_i))\n\n # calculate updates using meta-sgd\n updates_params = jax.tree_map(\n lambda g, lr: -jnp.clip(lr, 0, 1) * g,\n grad_params_true,\n learning_rates,\n )\n\n # merge updates_params and learning_rates\n merged_updates = merge_learning_rates(unfreeze(updates_params), unfreeze(learning_rates))\n params_i1 = optax.apply_updates(params_i, merged_updates)\n\n # after update of params clip learning rates to [0, 1]\n params_i1 = clip_learning_rates(params_i1)\n else:\n updates_params, opt_state = optimizer_inner.update(grad_params, opt_inner_state_params, params_i)\n params_i1 = optax.apply_updates(params_i, updates_params)\n return (\n params_i1,\n loss_inner_vec,\n it + 1,\n opt_inner_state_params,\n batch_corrupted,\n ), _\n\n base_params = jax.tree_map(\n lambda base_param: jnp.stack(\n [\n base_param,\n ]\n * batch_input.shape[0],\n axis=0,\n ),\n params,\n )\n loss_inner = jnp.zeros((n_steps,))\n i = 0\n initial_state = (\n base_params,\n loss_inner,\n i,\n optimizer_inner.init(base_params),\n batch_corrupted,\n )\n params_adapted, loss_inner, *_ = jax.lax.scan(inner_opt_loop, initial_state, xs=None, length=n_steps)[0]\n return params_adapted, loss_inner\n\n return apply_inner_forward\n\n def make_predict_fn(\n self,\n ) -> Callable[\n [Params, jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray, Optional[jnp.ndarray]], jnp.ndarray\n ]:\n \"\"\"Creates a function for making predictions with the model.\n\n This method creates a function that can be used to make predictions with the model.\n\n Returns:\n Callable[..., jnp.ndarray]: A function for making predictions with the model.\n \"\"\"\n update_inner_fn = self.make_update_inner_fn(\n optimizer_inner=self.optimizer_inner,\n n_steps=self.inner_steps,\n )\n\n def predict(\n params: Params,\n batch_corrupted: jnp.ndarray,\n batch_input: jnp.ndarray,\n time: jnp.ndarray,\n psm: jnp.ndarray,\n shape: jnp.ndarray,\n ) -> jnp.ndarray:\n \"\"\"Make predictions using the model.\n\n Args:\n params (Params): Model parameters.\n batch_corrupted (jnp.ndarray): Corrupted version of the output tensor.\n batch_input (jnp.ndarray): Input tensor to the model.\n time (jnp.ndarray): Time tensor.\n psm (jnp.ndarray): Power special matrix.\n shape (jnp.ndarray): Shape of the input tensor.\n\n Returns:\n jnp.ndarray: Reconstructed output tensor.\n \"\"\"\n b, g, c = batch_corrupted.shape\n t_aux = jnp.reshape(time, (b, 1, 1))\n t_aux = jnp.broadcast_to(t_aux, (b, g, 1)) * 2 - 1\n batch_input = batch_input.at[:, :, -1:].set(t_aux)\n if self.model_config.y_input:\n batch_input = batch_input.at[:, :, len(shape) : len(shape) + c].set(batch_corrupted)\n params_adapted, _ = update_inner_fn(params, batch_input, batch_corrupted, psm)\n batch_reconstructed = jax.vmap(self.apply)(params_adapted, batch_input)\n\n return batch_reconstructed\n\n return predict\n\n def make_super_resolution_fn(\n self,\n ) -> Callable[\n [Params, jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray, Optional[jnp.ndarray]], jnp.ndarray\n ]:\n \"\"\"Creates a function for making super resolution output with the model.\n\n This method creates a function that can be used to make super resolution task with the model.\n\n Returns:\n Callable[..., jnp.ndarray]: A function for making super resolution output with the model.\n \"\"\"\n update_inner_fn = self.make_update_inner_fn(\n optimizer_inner=self.optimizer_inner,\n n_steps=self.inner_steps,\n )\n\n def super_resolution_fn(\n params: Params,\n batch_corrupted: jnp.ndarray,\n batch_input: jnp.ndarray,\n time: jnp.ndarray,\n psm: jnp.ndarray,\n shape: jnp.ndarray,\n target_shape: Optional[jnp.ndarray] = None,\n ) -> jnp.ndarray:\n \"\"\"Make last prediction for super resolution task using the model.\n\n Args:\n params (Params): Model parameters.\n batch_corrupted (jnp.ndarray): Corrupted version of the output tensor.\n batch_input (jnp.ndarray): Input tensor to the model.\n time (jnp.ndarray): Time tensor.\n psm (jnp.ndarray): Power special matrix.\n shape (jnp.ndarray): Shape of the input tensor.\n target_shape (Optional[jnp.ndarray]): Target shape of the output tensor.\n\n Returns:\n jnp.ndarray: Reconstructed output tensor at super-resolution.\n \"\"\"\n b, g, c = batch_corrupted.shape\n t_aux = jnp.reshape(time, (b, 1, 1))\n t_aux = jnp.broadcast_to(t_aux, (b, g, 1)) * 2 - 1\n batch_input = batch_input.at[:, :, -1:].set(t_aux)\n if self.model_config.y_input:\n batch_input = batch_input.at[:, :, len(shape) : len(shape) + c].set(batch_corrupted)\n params_adapted, _ = update_inner_fn(params, batch_input, batch_corrupted, psm)\n if self.model_config.y_input:\n batch_reconstructed = jax.vmap(self.apply)(params_adapted, batch_input)\n batch_input = batch_input.at[:, :, len(shape) : len(shape) + c].set(batch_reconstructed)\n batch_input = batch_input.reshape((b, *shape, -1))\n\n new_h, new_w = target_shape\n\n batch_input_new = jax.image.resize(batch_input, (b, new_h, new_w, batch_input.shape[-1]), method=\"bilinear\")\n batch_input_new = batch_input_new.reshape((b, new_h * new_w, -1))\n batch_reconstructed = jax.vmap(self.apply)(params_adapted, batch_input_new)\n\n return batch_reconstructed\n\n return super_resolution_fn" }, { "identifier": "BaseViT", "path": "src/functional_diffusion_processes/models/base_vit.py", "snippet": "class BaseViT(nn.Module, abc.ABC):\n \"\"\"Abstract base class for Vision Transformer (ViT) models.\n\n Introduced in the paper \"An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale\" (https://arxiv.org/abs/2010.11929).\n\n Attributes:\n model_config (DictConfig): Configuration dictionary for the model.\n \"\"\"\n\n model_config: DictConfig\n\n @abc.abstractmethod\n @nn.compact\n def __call__(self, inputs: jnp.ndarray, train: bool) -> jnp.ndarray:\n \"\"\"Performs the forward pass of the model.\n\n Args:\n inputs (jnp.ndarray): Input data.\n train (bool): Indicates whether the model is in training mode.\n\n Returns:\n jnp.ndarray: Model's output.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the __call__ method.\")\n\n def initialize_model(self, rng: jax.random.PRNGKey, batch_input: jnp.ndarray) -> FrozenDict[str, Mapping[str, Any]]:\n \"\"\"Initializes the model with dummy inputs.\n\n Args:\n rng (jax.random.PRNGKey): The random number generator key.\n batch_input (jnp.ndarray): The input data for batch.\n\n Returns:\n FrozenDict[str, Mapping[str, Any]]: The initialized model.\n \"\"\"\n return self.init(rng, batch_input, train=False)\n\n @staticmethod\n def initialize_input(shape: Tuple[int, ...]) -> jnp.ndarray:\n \"\"\"Creates input for the model based on the specified shape.\n\n Args:\n shape (Tuple[int, ...]): The shape of the input.\n\n Returns:\n jnp.ndarray: The created input.\n \"\"\"\n batch_size = shape[0]\n num_channels = shape[-1]\n grid_size = shape[1:-1]\n coordinates = make_coordinates(batch_size, grid_size, num_channels)\n return coordinates\n\n def make_update_params_fn(self) -> Callable:\n \"\"\"Creates a function to update model parameters.\n\n Returns:\n Callable: The created function to update model parameters.\n \"\"\"\n\n def apply_forward(\n rng: jax.random.PRNGKey, params: Params, batch_input: jnp.ndarray, batch_corrupted: jnp.ndarray, psm: Any\n ) -> Tuple[jax.random.PRNGKey, jnp.ndarray, None]: # noqa\n \"\"\"Updates model parameters in a forward pass.\n\n Args:\n rng (jax.random.PRNGKey): The random number generator key.\n params (Params): The model parameters.\n batch_input (jnp.ndarray): The input data for the batch.\n batch_corrupted (jnp.ndarray): The corrupted version of the output tensor.\n psm (Any): Power special matrix.\n\n Returns:\n Tuple[jax.random.PRNGKey, jnp.ndarray, None]: A tuple containing a new random key,\n the model output, and the inner loss (which is None in this case).\n \"\"\"\n _, new_rng = jax.random.split(rng)\n dropout_rng = jax.random.fold_in(rng, jax.lax.axis_index(\"device\"))\n model_output = self.apply(params, rngs={\"dropout\": dropout_rng}, inputs=batch_input, train=True)\n loss_inner = None\n return new_rng, model_output, loss_inner\n\n return apply_forward\n\n def make_predict_fn(self) -> Callable:\n \"\"\"Creates a function for making predictions with the model.\n\n Returns:\n Callable: The created function for making predictions.\n \"\"\"\n\n def predict(\n params: Params,\n batch_corrupted: jnp.ndarray,\n batch_input: jnp.ndarray,\n time: jnp.ndarray,\n psm: jnp.ndarray,\n shape: Tuple[int, ...],\n ) -> jnp.ndarray: # noqa\n \"\"\"Makes predictions with the model.\n\n Args:\n params (Params): The model parameters.\n batch_corrupted (jnp.ndarray): The corrupted version of the output tensor.\n batch_input (jnp.ndarray): The input data for the batch.\n time (jnp.ndarray): The time tensor.\n psm (jnp.ndarray): Power special matrix.\n shape (Tuple[int, ...]): The shape of the input tensor.\n\n Returns:\n jnp.ndarray: The model's output.\n \"\"\"\n b, g, c = batch_corrupted.shape\n t_aux = jnp.reshape(time, (b, 1, 1))\n t_aux = jnp.broadcast_to(t_aux, (b, g, 1))\n batch_input = batch_input.at[:, :, -1:].set(t_aux)\n batch_input = batch_input.at[:, :, len(shape) : len(shape) + c].set(batch_corrupted)\n model_output = self.apply(params, batch_input, train=False)\n return model_output\n\n return predict" }, { "identifier": "SDE", "path": "src/functional_diffusion_processes/sdetools/base_sde.py", "snippet": "class SDE(abc.ABC):\n \"\"\"Abstract base class for representing Stochastic Differential Equations (SDEs).\n\n This class provides a structured way to define and work with SDEs, including computing\n Fourier transforms, discretizing the equations, and defining the drift and diffusion terms.\n\n Attributes:\n sde_config (DictConfig): Configuration object containing SDE settings.\n T (float): Total time duration.\n N (int): Number of time steps.\n eps (float): Small constant for numerical stability.\n is_unidimensional (bool): Flag indicating if the SDE is unidimensional.\n \"\"\"\n\n def __init__(self, sde_config: DictConfig) -> None:\n \"\"\"Initializes the SDE with the given configuration.\n\n Args:\n sde_config (DictConfig): Configuration object containing SDE settings.\n \"\"\"\n super().__init__()\n self.sde_config = sde_config\n self.T = self.sde_config.T\n self.N = self.sde_config.N\n self.eps = self.sde_config.eps\n self.is_unidimensional = True if len(self.sde_config.shape) == 1 else False\n\n def fourier_transform(self, state: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Computes the Fourier transform of the given state.\n\n This method can handle both vectorized and non-vectorized input states.\n\n Args:\n state (jnp.ndarray): State whose Fourier transform is to be computed.\n\n Returns:\n jnp.ndarray: Fourier transform of the given state.\n \"\"\"\n return (\n jnp.fft.fft(state, norm=\"ortho\", axis=1)\n if self.is_unidimensional\n else jnp.fft.fft2(state, norm=\"ortho\", axes=(1, 2))\n )\n\n def inverse_fourier_transform(self, state: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Computes the inverse Fourier transform of the given state.\n\n This method can handle both vectorized and non-vectorized input states.\n\n Args:\n state (jnp.ndarray): State whose inverse Fourier transform is to be computed.\n\n Returns:\n jnp.ndarray: Inverse Fourier transform of the given state.\n \"\"\"\n return (\n jnp.fft.ifft(state, norm=\"ortho\", axis=1)\n if self.is_unidimensional\n else jnp.fft.ifft2(state, norm=\"ortho\", axes=(1, 2))\n )\n\n @abc.abstractmethod\n def sde(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n rng: Optional[PRNGKeyArray] = None,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Abstract method to compute the drift and diffusion terms of the SDE.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Tuple containing the drift and diffusion terms of the SDE.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the sde method.\")\n\n @abc.abstractmethod\n def marginal_prob(\n self,\n rng: PRNGKeyArray,\n x: jnp.ndarray,\n t: jnp.ndarray,\n t0: Optional[jnp.ndarray] = None,\n ) -> Tuple[Any, jnp.ndarray | Any]:\n \"\"\"Computes the marginal probability density at a given time.\n\n This is an abstract method that should be overridden by subclasses to\n compute the marginal probability density based on the state and time.\n\n Args:\n rng (PRNGKeyArray): Random number generator.\n x (jnp.ndarray): State of the system.\n t (jnp.ndarray): Current time.\n t0 (Optional[jnp.ndarray], optional): Initial time. Defaults to None.\n\n Returns:\n Tuple[Any, jnp.ndarray | Any]: Marginal probability density at the given time.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the marginal_prob method.\")\n\n @abc.abstractmethod\n def diffuse(\n self, rng: PRNGKeyArray, x: jnp.ndarray, t: jnp.ndarray, t0: Optional[jnp.ndarray] = None\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Performs diffusion of the input from time t0 to time t.\n\n This is an abstract method that should be overridden by subclasses to\n implement the diffusion process based on the state and time.\n\n Args:\n rng (PRNGKeyArray): Random number generator.\n x (jnp.ndarray): Input state.\n t (jnp.ndarray): Current time.\n t0 (Optional[jnp.ndarray], optional): Initial time. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Mean of the corrupted input and the corrupted input.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the diffuse method.\")\n\n @abc.abstractmethod\n def prior_sampling(\n self, rng: PRNGKeyArray, shape: Tuple[int, ...], t0: Optional[jnp.ndarray] = None\n ) -> jnp.ndarray:\n \"\"\"Generates a sample from the prior distribution of the SDE.\n\n This is an abstract method that should be overridden by subclasses to\n implement the prior sampling process based on the shape and initial time.\n\n Args:\n rng (PRNGKeyArray): Random number generator.\n shape (Tuple[int, ...]): Shape of the sample to be generated.\n t0 (Optional[jnp.ndarray], optional): Initial time. Defaults to None.\n\n Returns:\n jnp.ndarray: A sample from the prior distribution of the SDE.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the prior_sampling method.\")\n\n @abc.abstractmethod\n def score_fn(\n self, y_corrupted: jnp.ndarray, y_reconstructed: jnp.ndarray, t: jnp.ndarray, rng: Optional[PRNGKeyArray] = None\n ) -> jnp.ndarray:\n \"\"\"Computes the score function based on the corrupted and reconstructed states.\n\n This is an abstract method that should be overridden by subclasses to\n compute the score function based on the state and time.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n y_reconstructed (jnp.ndarray): Reconstructed state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n\n Returns:\n jnp.ndarray: The score function.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the score_fn method.\")\n\n @abc.abstractmethod\n def get_psm(self, t: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Computes the Power-Special-Matrix(PSM) used as a weighting factor for the loss.\n\n This is an abstract method that should be overridden by subclasses to\n compute the state-dependent diffusion matrix based on the time.\n\n Args:\n t (jnp.ndarray): Current time.\n\n Returns:\n jnp.ndarray: The state-dependent diffusion matrix.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the get_psm method.\")\n\n @abc.abstractmethod\n def get_reverse_noise(self, rng: PRNGKeyArray, shape: Tuple[int, ...]) -> jnp.ndarray:\n \"\"\"Generates noise for the reverse SDE.\n\n This is an abstract method that should be overridden by subclasses to\n generate reverse noise based on the shape.\n\n Args:\n rng (PRNGKeyArray): Random number generator.\n shape (Tuple[int, ...]): Shape of the noise to be generated.\n\n Returns:\n jnp.ndarray: The reverse noise.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the get_reverse_noise method.\")\n\n def discretize(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Discretizes the SDE into an iterative update rule.\n\n This method computes the discrete drift and diffusion terms based on the continuous SDE.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Tuple containing the discrete drift and diffusion terms.\n \"\"\"\n dt = (self.T - self.eps) / self.N\n drift, diffusion = self.sde(y_corrupted, t, y_reconstructed)\n f = drift * dt\n g = diffusion * jnp.sqrt(dt)\n return f, g\n\n def reverse(self):\n \"\"\"Creates a reverse-time version of the current SDE.\n\n This method defines a nested class for the reverse-time SDE and returns an instance of it.\n\n Returns:\n ReverseSDE: An instance of the reverse-time SDE subclass.\n \"\"\"\n num_time_steps = self.N\n end_t = self.T\n sde_fn = self.sde\n discretize_fn = self.discretize\n score_fn = self.score_fn\n sde_config = self.sde_config\n\n class ReverseSDE(self.__class__, abc.ABC):\n \"\"\"Reverse Stochastic Differential Equation abstract base class.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initialize the ReverseSDE class.\n\n Inherits the properties from the original SDE class and overrides the relevant methods for the\n reverse-time SDE.\n \"\"\"\n super().__init__(sde_config)\n self.N = num_time_steps\n self.T = end_t\n self.score_fn = score_fn\n\n def sde(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n rng: Optional[PRNGKeyArray] = None,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Return the drift and diffusion terms for the reverse-time SDE.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Drift and diffusion terms for the reverse-time SDE.\n \"\"\"\n drift, diffusion = sde_fn(y_corrupted, t, y_reconstructed)\n score = self.score_fn(y_corrupted, y_reconstructed, t, rng=rng)\n drift = -drift + batch_mul(diffusion**2, score * (0.5 if self.sde_config.probability_flow else 1.0))\n # Set the diffusion function to zero for ODEs.\n diffusion = jnp.zeros_like(diffusion) if self.sde_config.probability_flow else diffusion\n return drift, diffusion\n\n def discretize(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n rng: Optional[PRNGKeyArray] = None,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Discretizes the reverse-time SDE in the form of an iterative update rule.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Drift and diffusion terms for the discretized reverse-time SDE.\n \"\"\"\n f, g = discretize_fn(y_corrupted, t, y_corrupted)\n rev_f = -f + batch_mul(\n g**2,\n self.score_fn(y_corrupted, y_reconstructed, t, rng=rng)\n * (0.5 if self.sde_config.probability_flow else 1.0),\n )\n rev_g = jnp.zeros_like(g) if self.sde_config.probability_flow else g\n return rev_f, rev_g\n\n def semi_analytic(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n rng: Optional[PRNGKeyArray] = None,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Computes the semi-analytic drift and diffusion terms for the reverse-time SDE.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Drift and diffusion terms for the semi-analytic reverse-time SDE.\n \"\"\"\n _, diffusion = sde_fn(y_corrupted, t, y_reconstructed)\n score = self.score_fn(y_corrupted, y_reconstructed, t, rng=rng)\n drift = batch_mul(diffusion**2, score * (0.5 if self.sde_config.probability_flow else 1.0))\n diffusion = jnp.zeros_like(diffusion) if self.sde_config.probability_flow else diffusion\n return drift, diffusion\n\n return ReverseSDE()" }, { "identifier": "batch_mul", "path": "src/functional_diffusion_processes/utils/common.py", "snippet": "def batch_mul(a: jnp.ndarray, b: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Perform element-wise multiplication of two arrays.\n\n Args:\n a: First array.\n b: Second array.\n\n Returns:\n The element-wise multiplication of the two arrays.\n \"\"\"\n return jax.vmap(lambda x, y: x * y)(a, b)" } ]
import abc import jax import jax.numpy as jnp from functools import partial from typing import Any, Callable, TypeVar, Union from flax.core import FrozenDict from jax.random import PRNGKeyArray from omegaconf import DictConfig from ..models import BaseMAML, BaseViT from ..sdetools import SDE from ..utils.common import batch_mul
8,996
Params = FrozenDict[str, Any] T = TypeVar("T") class MSELoss(abc.ABC): """Abstract class for computing Mean Squared Error (MSE) Loss. Provides a structure for constructing a loss function to compute the MSE loss between model predictions and real data, with potential modifications for different domains (frequency or normal) and scheduling. Attributes: sde (SDE): An instance of stochastic differential equation to be used to calculate the weight factor in loss computation. loss_config (DictConfig): A configuration object holding parameters for loss computation. """
Params = FrozenDict[str, Any] T = TypeVar("T") class MSELoss(abc.ABC): """Abstract class for computing Mean Squared Error (MSE) Loss. Provides a structure for constructing a loss function to compute the MSE loss between model predictions and real data, with potential modifications for different domains (frequency or normal) and scheduling. Attributes: sde (SDE): An instance of stochastic differential equation to be used to calculate the weight factor in loss computation. loss_config (DictConfig): A configuration object holding parameters for loss computation. """
def __init__(self, sde: SDE, loss_config: DictConfig) -> None:
2
2023-10-24 22:01:35+00:00
12k
R1999RC-official/Reverse1999ResonanceCalculator
python/python_env/Lib/site-packages/pip/_internal/resolution/resolvelib/resolver.py
[ { "identifier": "Candidate", "path": "python/python_env/Lib/site-packages/pip/_internal/resolution/resolvelib/base.py", "snippet": "class Candidate:\n @property\n def project_name(self) -> NormalizedName:\n \"\"\"The \"project name\" of the candidate.\n\n This is different from ``name`` if this candidate contains extras,\n in which case ``name`` would contain the ``[...]`` part, while this\n refers to the name of the project.\n \"\"\"\n raise NotImplementedError(\"Override in subclass\")\n\n @property\n def name(self) -> str:\n \"\"\"The name identifying this candidate in the resolver.\n\n This is different from ``project_name`` if this candidate contains\n extras, where ``project_name`` would not contain the ``[...]`` part.\n \"\"\"\n raise NotImplementedError(\"Override in subclass\")\n\n @property\n def version(self) -> CandidateVersion:\n raise NotImplementedError(\"Override in subclass\")\n\n @property\n def is_installed(self) -> bool:\n raise NotImplementedError(\"Override in subclass\")\n\n @property\n def is_editable(self) -> bool:\n raise NotImplementedError(\"Override in subclass\")\n\n @property\n def source_link(self) -> Optional[Link]:\n raise NotImplementedError(\"Override in subclass\")\n\n def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]:\n raise NotImplementedError(\"Override in subclass\")\n\n def get_install_requirement(self) -> Optional[InstallRequirement]:\n raise NotImplementedError(\"Override in subclass\")\n\n def format_for_error(self) -> str:\n raise NotImplementedError(\"Subclass should override\")" }, { "identifier": "Requirement", "path": "python/python_env/Lib/site-packages/pip/_internal/resolution/resolvelib/base.py", "snippet": "class Requirement:\n @property\n def project_name(self) -> NormalizedName:\n \"\"\"The \"project name\" of a requirement.\n\n This is different from ``name`` if this requirement contains extras,\n in which case ``name`` would contain the ``[...]`` part, while this\n refers to the name of the project.\n \"\"\"\n raise NotImplementedError(\"Subclass should override\")\n\n @property\n def name(self) -> str:\n \"\"\"The name identifying this requirement in the resolver.\n\n This is different from ``project_name`` if this requirement contains\n extras, where ``project_name`` would not contain the ``[...]`` part.\n \"\"\"\n raise NotImplementedError(\"Subclass should override\")\n\n def is_satisfied_by(self, candidate: \"Candidate\") -> bool:\n return False\n\n def get_candidate_lookup(self) -> CandidateLookup:\n raise NotImplementedError(\"Subclass should override\")\n\n def format_for_error(self) -> str:\n raise NotImplementedError(\"Subclass should override\")" }, { "identifier": "Factory", "path": "python/python_env/Lib/site-packages/pip/_internal/resolution/resolvelib/factory.py", "snippet": "class Factory:\n def __init__(\n self,\n finder: PackageFinder,\n preparer: RequirementPreparer,\n make_install_req: InstallRequirementProvider,\n wheel_cache: Optional[WheelCache],\n use_user_site: bool,\n force_reinstall: bool,\n ignore_installed: bool,\n ignore_requires_python: bool,\n py_version_info: Optional[Tuple[int, ...]] = None,\n ) -> None:\n self._finder = finder\n self.preparer = preparer\n self._wheel_cache = wheel_cache\n self._python_candidate = RequiresPythonCandidate(py_version_info)\n self._make_install_req_from_spec = make_install_req\n self._use_user_site = use_user_site\n self._force_reinstall = force_reinstall\n self._ignore_requires_python = ignore_requires_python\n\n self._build_failures: Cache[InstallationError] = {}\n self._link_candidate_cache: Cache[LinkCandidate] = {}\n self._editable_candidate_cache: Cache[EditableCandidate] = {}\n self._installed_candidate_cache: Dict[str, AlreadyInstalledCandidate] = {}\n self._extras_candidate_cache: Dict[\n Tuple[int, FrozenSet[NormalizedName]], ExtrasCandidate\n ] = {}\n\n if not ignore_installed:\n env = get_default_environment()\n self._installed_dists = {\n dist.canonical_name: dist\n for dist in env.iter_installed_distributions(local_only=False)\n }\n else:\n self._installed_dists = {}\n\n @property\n def force_reinstall(self) -> bool:\n return self._force_reinstall\n\n def _fail_if_link_is_unsupported_wheel(self, link: Link) -> None:\n if not link.is_wheel:\n return\n wheel = Wheel(link.filename)\n if wheel.supported(self._finder.target_python.get_unsorted_tags()):\n return\n msg = f\"{link.filename} is not a supported wheel on this platform.\"\n raise UnsupportedWheel(msg)\n\n def _make_extras_candidate(\n self,\n base: BaseCandidate,\n extras: FrozenSet[str],\n *,\n comes_from: Optional[InstallRequirement] = None,\n ) -> ExtrasCandidate:\n cache_key = (id(base), frozenset(canonicalize_name(e) for e in extras))\n try:\n candidate = self._extras_candidate_cache[cache_key]\n except KeyError:\n candidate = ExtrasCandidate(base, extras, comes_from=comes_from)\n self._extras_candidate_cache[cache_key] = candidate\n return candidate\n\n def _make_candidate_from_dist(\n self,\n dist: BaseDistribution,\n extras: FrozenSet[str],\n template: InstallRequirement,\n ) -> Candidate:\n try:\n base = self._installed_candidate_cache[dist.canonical_name]\n except KeyError:\n base = AlreadyInstalledCandidate(dist, template, factory=self)\n self._installed_candidate_cache[dist.canonical_name] = base\n if not extras:\n return base\n return self._make_extras_candidate(base, extras, comes_from=template)\n\n def _make_candidate_from_link(\n self,\n link: Link,\n extras: FrozenSet[str],\n template: InstallRequirement,\n name: Optional[NormalizedName],\n version: Optional[CandidateVersion],\n ) -> Optional[Candidate]:\n # TODO: Check already installed candidate, and use it if the link and\n # editable flag match.\n\n if link in self._build_failures:\n # We already tried this candidate before, and it does not build.\n # Don't bother trying again.\n return None\n\n if template.editable:\n if link not in self._editable_candidate_cache:\n try:\n self._editable_candidate_cache[link] = EditableCandidate(\n link,\n template,\n factory=self,\n name=name,\n version=version,\n )\n except MetadataInconsistent as e:\n logger.info(\n \"Discarding [blue underline]%s[/]: [yellow]%s[reset]\",\n link,\n e,\n extra={\"markup\": True},\n )\n self._build_failures[link] = e\n return None\n\n base: BaseCandidate = self._editable_candidate_cache[link]\n else:\n if link not in self._link_candidate_cache:\n try:\n self._link_candidate_cache[link] = LinkCandidate(\n link,\n template,\n factory=self,\n name=name,\n version=version,\n )\n except MetadataInconsistent as e:\n logger.info(\n \"Discarding [blue underline]%s[/]: [yellow]%s[reset]\",\n link,\n e,\n extra={\"markup\": True},\n )\n self._build_failures[link] = e\n return None\n base = self._link_candidate_cache[link]\n\n if not extras:\n return base\n return self._make_extras_candidate(base, extras, comes_from=template)\n\n def _iter_found_candidates(\n self,\n ireqs: Sequence[InstallRequirement],\n specifier: SpecifierSet,\n hashes: Hashes,\n prefers_installed: bool,\n incompatible_ids: Set[int],\n ) -> Iterable[Candidate]:\n if not ireqs:\n return ()\n\n # The InstallRequirement implementation requires us to give it a\n # \"template\". Here we just choose the first requirement to represent\n # all of them.\n # Hopefully the Project model can correct this mismatch in the future.\n template = ireqs[0]\n assert template.req, \"Candidates found on index must be PEP 508\"\n name = canonicalize_name(template.req.name)\n\n extras: FrozenSet[str] = frozenset()\n for ireq in ireqs:\n assert ireq.req, \"Candidates found on index must be PEP 508\"\n specifier &= ireq.req.specifier\n hashes &= ireq.hashes(trust_internet=False)\n extras |= frozenset(ireq.extras)\n\n def _get_installed_candidate() -> Optional[Candidate]:\n \"\"\"Get the candidate for the currently-installed version.\"\"\"\n # If --force-reinstall is set, we want the version from the index\n # instead, so we \"pretend\" there is nothing installed.\n if self._force_reinstall:\n return None\n try:\n installed_dist = self._installed_dists[name]\n except KeyError:\n return None\n # Don't use the installed distribution if its version does not fit\n # the current dependency graph.\n if not specifier.contains(installed_dist.version, prereleases=True):\n return None\n candidate = self._make_candidate_from_dist(\n dist=installed_dist,\n extras=extras,\n template=template,\n )\n # The candidate is a known incompatibility. Don't use it.\n if id(candidate) in incompatible_ids:\n return None\n return candidate\n\n def iter_index_candidate_infos() -> Iterator[IndexCandidateInfo]:\n result = self._finder.find_best_candidate(\n project_name=name,\n specifier=specifier,\n hashes=hashes,\n )\n icans = list(result.iter_applicable())\n\n # PEP 592: Yanked releases are ignored unless the specifier\n # explicitly pins a version (via '==' or '===') that can be\n # solely satisfied by a yanked release.\n all_yanked = all(ican.link.is_yanked for ican in icans)\n\n def is_pinned(specifier: SpecifierSet) -> bool:\n for sp in specifier:\n if sp.operator == \"===\":\n return True\n if sp.operator != \"==\":\n continue\n if sp.version.endswith(\".*\"):\n continue\n return True\n return False\n\n pinned = is_pinned(specifier)\n\n # PackageFinder returns earlier versions first, so we reverse.\n for ican in reversed(icans):\n if not (all_yanked and pinned) and ican.link.is_yanked:\n continue\n func = functools.partial(\n self._make_candidate_from_link,\n link=ican.link,\n extras=extras,\n template=template,\n name=name,\n version=ican.version,\n )\n yield ican.version, func\n\n return FoundCandidates(\n iter_index_candidate_infos,\n _get_installed_candidate(),\n prefers_installed,\n incompatible_ids,\n )\n\n def _iter_explicit_candidates_from_base(\n self,\n base_requirements: Iterable[Requirement],\n extras: FrozenSet[str],\n ) -> Iterator[Candidate]:\n \"\"\"Produce explicit candidates from the base given an extra-ed package.\n\n :param base_requirements: Requirements known to the resolver. The\n requirements are guaranteed to not have extras.\n :param extras: The extras to inject into the explicit requirements'\n candidates.\n \"\"\"\n for req in base_requirements:\n lookup_cand, _ = req.get_candidate_lookup()\n if lookup_cand is None: # Not explicit.\n continue\n # We've stripped extras from the identifier, and should always\n # get a BaseCandidate here, unless there's a bug elsewhere.\n base_cand = as_base_candidate(lookup_cand)\n assert base_cand is not None, \"no extras here\"\n yield self._make_extras_candidate(base_cand, extras)\n\n def _iter_candidates_from_constraints(\n self,\n identifier: str,\n constraint: Constraint,\n template: InstallRequirement,\n ) -> Iterator[Candidate]:\n \"\"\"Produce explicit candidates from constraints.\n\n This creates \"fake\" InstallRequirement objects that are basically clones\n of what \"should\" be the template, but with original_link set to link.\n \"\"\"\n for link in constraint.links:\n self._fail_if_link_is_unsupported_wheel(link)\n candidate = self._make_candidate_from_link(\n link,\n extras=frozenset(),\n template=install_req_from_link_and_ireq(link, template),\n name=canonicalize_name(identifier),\n version=None,\n )\n if candidate:\n yield candidate\n\n def find_candidates(\n self,\n identifier: str,\n requirements: Mapping[str, Iterable[Requirement]],\n incompatibilities: Mapping[str, Iterator[Candidate]],\n constraint: Constraint,\n prefers_installed: bool,\n ) -> Iterable[Candidate]:\n # Collect basic lookup information from the requirements.\n explicit_candidates: Set[Candidate] = set()\n ireqs: List[InstallRequirement] = []\n for req in requirements[identifier]:\n cand, ireq = req.get_candidate_lookup()\n if cand is not None:\n explicit_candidates.add(cand)\n if ireq is not None:\n ireqs.append(ireq)\n\n # If the current identifier contains extras, add requires and explicit\n # candidates from entries from extra-less identifier.\n with contextlib.suppress(InvalidRequirement):\n parsed_requirement = get_requirement(identifier)\n if parsed_requirement.name != identifier:\n explicit_candidates.update(\n self._iter_explicit_candidates_from_base(\n requirements.get(parsed_requirement.name, ()),\n frozenset(parsed_requirement.extras),\n ),\n )\n for req in requirements.get(parsed_requirement.name, []):\n _, ireq = req.get_candidate_lookup()\n if ireq is not None:\n ireqs.append(ireq)\n\n # Add explicit candidates from constraints. We only do this if there are\n # known ireqs, which represent requirements not already explicit. If\n # there are no ireqs, we're constraining already-explicit requirements,\n # which is handled later when we return the explicit candidates.\n if ireqs:\n try:\n explicit_candidates.update(\n self._iter_candidates_from_constraints(\n identifier,\n constraint,\n template=ireqs[0],\n ),\n )\n except UnsupportedWheel:\n # If we're constrained to install a wheel incompatible with the\n # target architecture, no candidates will ever be valid.\n return ()\n\n # Since we cache all the candidates, incompatibility identification\n # can be made quicker by comparing only the id() values.\n incompat_ids = {id(c) for c in incompatibilities.get(identifier, ())}\n\n # If none of the requirements want an explicit candidate, we can ask\n # the finder for candidates.\n if not explicit_candidates:\n return self._iter_found_candidates(\n ireqs,\n constraint.specifier,\n constraint.hashes,\n prefers_installed,\n incompat_ids,\n )\n\n return (\n c\n for c in explicit_candidates\n if id(c) not in incompat_ids\n and constraint.is_satisfied_by(c)\n and all(req.is_satisfied_by(c) for req in requirements[identifier])\n )\n\n def _make_requirements_from_install_req(\n self, ireq: InstallRequirement, requested_extras: Iterable[str]\n ) -> Iterator[Requirement]:\n \"\"\"\n Returns requirement objects associated with the given InstallRequirement. In\n most cases this will be a single object but the following special cases exist:\n - the InstallRequirement has markers that do not apply -> result is empty\n - the InstallRequirement has both a constraint and extras -> result is split\n in two requirement objects: one with the constraint and one with the\n extra. This allows centralized constraint handling for the base,\n resulting in fewer candidate rejections.\n \"\"\"\n if not ireq.match_markers(requested_extras):\n logger.info(\n \"Ignoring %s: markers '%s' don't match your environment\",\n ireq.name,\n ireq.markers,\n )\n elif not ireq.link:\n if ireq.extras and ireq.req is not None and ireq.req.specifier:\n yield SpecifierWithoutExtrasRequirement(ireq)\n yield SpecifierRequirement(ireq)\n else:\n self._fail_if_link_is_unsupported_wheel(ireq.link)\n cand = self._make_candidate_from_link(\n ireq.link,\n extras=frozenset(ireq.extras),\n template=ireq,\n name=canonicalize_name(ireq.name) if ireq.name else None,\n version=None,\n )\n if cand is None:\n # There's no way we can satisfy a URL requirement if the underlying\n # candidate fails to build. An unnamed URL must be user-supplied, so\n # we fail eagerly. If the URL is named, an unsatisfiable requirement\n # can make the resolver do the right thing, either backtrack (and\n # maybe find some other requirement that's buildable) or raise a\n # ResolutionImpossible eventually.\n if not ireq.name:\n raise self._build_failures[ireq.link]\n yield UnsatisfiableRequirement(canonicalize_name(ireq.name))\n else:\n yield self.make_requirement_from_candidate(cand)\n\n def collect_root_requirements(\n self, root_ireqs: List[InstallRequirement]\n ) -> CollectedRootRequirements:\n collected = CollectedRootRequirements([], {}, {})\n for i, ireq in enumerate(root_ireqs):\n if ireq.constraint:\n # Ensure we only accept valid constraints\n problem = check_invalid_constraint_type(ireq)\n if problem:\n raise InstallationError(problem)\n if not ireq.match_markers():\n continue\n assert ireq.name, \"Constraint must be named\"\n name = canonicalize_name(ireq.name)\n if name in collected.constraints:\n collected.constraints[name] &= ireq\n else:\n collected.constraints[name] = Constraint.from_ireq(ireq)\n else:\n reqs = list(\n self._make_requirements_from_install_req(\n ireq,\n requested_extras=(),\n )\n )\n if not reqs:\n continue\n template = reqs[0]\n if ireq.user_supplied and template.name not in collected.user_requested:\n collected.user_requested[template.name] = i\n collected.requirements.extend(reqs)\n # Put requirements with extras at the end of the root requires. This does not\n # affect resolvelib's picking preference but it does affect its initial criteria\n # population: by putting extras at the end we enable the candidate finder to\n # present resolvelib with a smaller set of candidates to resolvelib, already\n # taking into account any non-transient constraints on the associated base. This\n # means resolvelib will have fewer candidates to visit and reject.\n # Python's list sort is stable, meaning relative order is kept for objects with\n # the same key.\n collected.requirements.sort(key=lambda r: r.name != r.project_name)\n return collected\n\n def make_requirement_from_candidate(\n self, candidate: Candidate\n ) -> ExplicitRequirement:\n return ExplicitRequirement(candidate)\n\n def make_requirements_from_spec(\n self,\n specifier: str,\n comes_from: Optional[InstallRequirement],\n requested_extras: Iterable[str] = (),\n ) -> Iterator[Requirement]:\n \"\"\"\n Returns requirement objects associated with the given specifier. In most cases\n this will be a single object but the following special cases exist:\n - the specifier has markers that do not apply -> result is empty\n - the specifier has both a constraint and extras -> result is split\n in two requirement objects: one with the constraint and one with the\n extra. This allows centralized constraint handling for the base,\n resulting in fewer candidate rejections.\n \"\"\"\n ireq = self._make_install_req_from_spec(specifier, comes_from)\n return self._make_requirements_from_install_req(ireq, requested_extras)\n\n def make_requires_python_requirement(\n self,\n specifier: SpecifierSet,\n ) -> Optional[Requirement]:\n if self._ignore_requires_python:\n return None\n # Don't bother creating a dependency for an empty Requires-Python.\n if not str(specifier):\n return None\n return RequiresPythonRequirement(specifier, self._python_candidate)\n\n def get_wheel_cache_entry(\n self, link: Link, name: Optional[str]\n ) -> Optional[CacheEntry]:\n \"\"\"Look up the link in the wheel cache.\n\n If ``preparer.require_hashes`` is True, don't use the wheel cache,\n because cached wheels, always built locally, have different hashes\n than the files downloaded from the index server and thus throw false\n hash mismatches. Furthermore, cached wheels at present have\n nondeterministic contents due to file modification times.\n \"\"\"\n if self._wheel_cache is None:\n return None\n return self._wheel_cache.get_cache_entry(\n link=link,\n package_name=name,\n supported_tags=get_supported(),\n )\n\n def get_dist_to_uninstall(self, candidate: Candidate) -> Optional[BaseDistribution]:\n # TODO: Are there more cases this needs to return True? Editable?\n dist = self._installed_dists.get(candidate.project_name)\n if dist is None: # Not installed, no uninstallation required.\n return None\n\n # We're installing into global site. The current installation must\n # be uninstalled, no matter it's in global or user site, because the\n # user site installation has precedence over global.\n if not self._use_user_site:\n return dist\n\n # We're installing into user site. Remove the user site installation.\n if dist.in_usersite:\n return dist\n\n # We're installing into user site, but the installed incompatible\n # package is in global site. We can't uninstall that, and would let\n # the new user installation to \"shadow\" it. But shadowing won't work\n # in virtual environments, so we error out.\n if running_under_virtualenv() and dist.in_site_packages:\n message = (\n f\"Will not install to the user site because it will lack \"\n f\"sys.path precedence to {dist.raw_name} in {dist.location}\"\n )\n raise InstallationError(message)\n return None\n\n def _report_requires_python_error(\n self, causes: Sequence[\"ConflictCause\"]\n ) -> UnsupportedPythonVersion:\n assert causes, \"Requires-Python error reported with no cause\"\n\n version = self._python_candidate.version\n\n if len(causes) == 1:\n specifier = str(causes[0].requirement.specifier)\n message = (\n f\"Package {causes[0].parent.name!r} requires a different \"\n f\"Python: {version} not in {specifier!r}\"\n )\n return UnsupportedPythonVersion(message)\n\n message = f\"Packages require a different Python. {version} not in:\"\n for cause in causes:\n package = cause.parent.format_for_error()\n specifier = str(cause.requirement.specifier)\n message += f\"\\n{specifier!r} (required by {package})\"\n return UnsupportedPythonVersion(message)\n\n def _report_single_requirement_conflict(\n self, req: Requirement, parent: Optional[Candidate]\n ) -> DistributionNotFound:\n if parent is None:\n req_disp = str(req)\n else:\n req_disp = f\"{req} (from {parent.name})\"\n\n cands = self._finder.find_all_candidates(req.project_name)\n skipped_by_requires_python = self._finder.requires_python_skipped_reasons()\n\n versions_set: Set[CandidateVersion] = set()\n yanked_versions_set: Set[CandidateVersion] = set()\n for c in cands:\n is_yanked = c.link.is_yanked if c.link else False\n if is_yanked:\n yanked_versions_set.add(c.version)\n else:\n versions_set.add(c.version)\n\n versions = [str(v) for v in sorted(versions_set)]\n yanked_versions = [str(v) for v in sorted(yanked_versions_set)]\n\n if yanked_versions:\n # Saying \"version X is yanked\" isn't entirely accurate.\n # https://github.com/pypa/pip/issues/11745#issuecomment-1402805842\n logger.critical(\n \"Ignored the following yanked versions: %s\",\n \", \".join(yanked_versions) or \"none\",\n )\n if skipped_by_requires_python:\n logger.critical(\n \"Ignored the following versions that require a different python \"\n \"version: %s\",\n \"; \".join(skipped_by_requires_python) or \"none\",\n )\n logger.critical(\n \"Could not find a version that satisfies the requirement %s \"\n \"(from versions: %s)\",\n req_disp,\n \", \".join(versions) or \"none\",\n )\n if str(req) == \"requirements.txt\":\n logger.info(\n \"HINT: You are attempting to install a package literally \"\n 'named \"requirements.txt\" (which cannot exist). Consider '\n \"using the '-r' flag to install the packages listed in \"\n \"requirements.txt\"\n )\n\n return DistributionNotFound(f\"No matching distribution found for {req}\")\n\n def get_installation_error(\n self,\n e: \"ResolutionImpossible[Requirement, Candidate]\",\n constraints: Dict[str, Constraint],\n ) -> InstallationError:\n assert e.causes, \"Installation error reported with no cause\"\n\n # If one of the things we can't solve is \"we need Python X.Y\",\n # that is what we report.\n requires_python_causes = [\n cause\n for cause in e.causes\n if isinstance(cause.requirement, RequiresPythonRequirement)\n and not cause.requirement.is_satisfied_by(self._python_candidate)\n ]\n if requires_python_causes:\n # The comprehension above makes sure all Requirement instances are\n # RequiresPythonRequirement, so let's cast for convenience.\n return self._report_requires_python_error(\n cast(\"Sequence[ConflictCause]\", requires_python_causes),\n )\n\n # Otherwise, we have a set of causes which can't all be satisfied\n # at once.\n\n # The simplest case is when we have *one* cause that can't be\n # satisfied. We just report that case.\n if len(e.causes) == 1:\n req, parent = e.causes[0]\n if req.name not in constraints:\n return self._report_single_requirement_conflict(req, parent)\n\n # OK, we now have a list of requirements that can't all be\n # satisfied at once.\n\n # A couple of formatting helpers\n def text_join(parts: List[str]) -> str:\n if len(parts) == 1:\n return parts[0]\n\n return \", \".join(parts[:-1]) + \" and \" + parts[-1]\n\n def describe_trigger(parent: Candidate) -> str:\n ireq = parent.get_install_requirement()\n if not ireq or not ireq.comes_from:\n return f\"{parent.name}=={parent.version}\"\n if isinstance(ireq.comes_from, InstallRequirement):\n return str(ireq.comes_from.name)\n return str(ireq.comes_from)\n\n triggers = set()\n for req, parent in e.causes:\n if parent is None:\n # This is a root requirement, so we can report it directly\n trigger = req.format_for_error()\n else:\n trigger = describe_trigger(parent)\n triggers.add(trigger)\n\n if triggers:\n info = text_join(sorted(triggers))\n else:\n info = \"the requested packages\"\n\n msg = (\n \"Cannot install {} because these package versions \"\n \"have conflicting dependencies.\".format(info)\n )\n logger.critical(msg)\n msg = \"\\nThe conflict is caused by:\"\n\n relevant_constraints = set()\n for req, parent in e.causes:\n if req.name in constraints:\n relevant_constraints.add(req.name)\n msg = msg + \"\\n \"\n if parent:\n msg = msg + f\"{parent.name} {parent.version} depends on \"\n else:\n msg = msg + \"The user requested \"\n msg = msg + req.format_for_error()\n for key in relevant_constraints:\n spec = constraints[key].specifier\n msg += f\"\\n The user requested (constraint) {key}{spec}\"\n\n msg = (\n msg\n + \"\\n\\n\"\n + \"To fix this you could try to:\\n\"\n + \"1. loosen the range of package versions you've specified\\n\"\n + \"2. remove package versions to allow pip attempt to solve \"\n + \"the dependency conflict\\n\"\n )\n\n logger.info(msg)\n\n return DistributionNotFound(\n \"ResolutionImpossible: for help visit \"\n \"https://pip.pypa.io/en/latest/topics/dependency-resolution/\"\n \"#dealing-with-dependency-conflicts\"\n )" } ]
import contextlib import functools import logging import os from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, cast from pip._vendor.packaging.utils import canonicalize_name from pip._vendor.resolvelib import BaseReporter, ResolutionImpossible from pip._vendor.resolvelib import Resolver as RLResolver from pip._vendor.resolvelib.structs import DirectedGraph from pip._internal.cache import WheelCache from pip._internal.index.package_finder import PackageFinder from pip._internal.operations.prepare import RequirementPreparer from pip._internal.req.constructors import install_req_extend_extras from pip._internal.req.req_install import InstallRequirement from pip._internal.req.req_set import RequirementSet from pip._internal.resolution.base import BaseResolver, InstallRequirementProvider from pip._internal.resolution.resolvelib.provider import PipProvider from pip._internal.resolution.resolvelib.reporter import ( PipDebuggingReporter, PipReporter, ) from pip._internal.utils.packaging import get_requirement from .base import Candidate, Requirement from .factory import Factory from pip._vendor.resolvelib.resolvers import Result as RLResult
7,464
if TYPE_CHECKING: Result = RLResult[Requirement, Candidate, str] logger = logging.getLogger(__name__) class Resolver(BaseResolver): _allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"} def __init__( self, preparer: RequirementPreparer, finder: PackageFinder, wheel_cache: Optional[WheelCache], make_install_req: InstallRequirementProvider, use_user_site: bool, ignore_dependencies: bool, ignore_installed: bool, ignore_requires_python: bool, force_reinstall: bool, upgrade_strategy: str, py_version_info: Optional[Tuple[int, ...]] = None, ): super().__init__() assert upgrade_strategy in self._allowed_strategies
if TYPE_CHECKING: Result = RLResult[Requirement, Candidate, str] logger = logging.getLogger(__name__) class Resolver(BaseResolver): _allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"} def __init__( self, preparer: RequirementPreparer, finder: PackageFinder, wheel_cache: Optional[WheelCache], make_install_req: InstallRequirementProvider, use_user_site: bool, ignore_dependencies: bool, ignore_installed: bool, ignore_requires_python: bool, force_reinstall: bool, upgrade_strategy: str, py_version_info: Optional[Tuple[int, ...]] = None, ): super().__init__() assert upgrade_strategy in self._allowed_strategies
self.factory = Factory(
2
2023-10-24 06:48:58+00:00
12k
KosinskiLab/pyTME
tme/tests/test_matching_exhaustive.py
[ { "identifier": "scan", "path": "tme/matching_exhaustive.py", "snippet": "@device_memory_handler\ndef scan(\n matching_data: MatchingData,\n matching_setup: Callable,\n matching_score: Callable,\n n_jobs: int = 4,\n callback_class: CallbackClass = None,\n callback_class_args: Dict = {},\n fftargs: Dict = {},\n pad_fourier: bool = True,\n interpolation_order: int = 3,\n jobs_per_callback_class: int = 8,\n **kwargs,\n) -> Tuple:\n \"\"\"\n Perform template matching between target and template and sample\n different rotations of template.\n\n Parameters\n ----------\n matching_data : MatchingData\n Template matching data.\n matching_setup : Callable\n Function pointer to setup function.\n matching_score : Callable\n Function pointer to scoring function.\n n_jobs : int, optional\n Number of parallel jobs. Default is 4.\n callback_class : type, optional\n Analyzer class pointer to operate on computed scores.\n callback_class_args : dict, optional\n Arguments passed to the callback_class. Default is an empty dictionary.\n fftargs : dict, optional\n Arguments for the FFT operations. Default is an empty dictionary.\n pad_fourier: bool, optional\n Whether to pad target and template to the full convolution shape.\n interpolation_order : int, optional\n Order of spline interpolation for rotations.\n jobs_per_callback_class : int, optional\n How many jobs should be processed by a single callback_class instance,\n if ones is provided.\n **kwargs : various\n Additional arguments.\n\n Returns\n -------\n Tuple\n The merged results from callback_class if provided otherwise None.\n \"\"\"\n matching_data.to_backend()\n fourier_pad = matching_data._templateshape\n fourier_shift = backend.zeros(len(fourier_pad))\n if not pad_fourier:\n fourier_pad = backend.full(shape=fourier_shift.shape, fill_value=1, dtype=int)\n\n convolution_shape, fast_shape, fast_ft_shape = backend.compute_convolution_shapes(\n matching_data._target.shape, fourier_pad\n )\n if not pad_fourier:\n fourier_shift = 1 - backend.astype(\n backend.divide(matching_data._templateshape, 2), int\n )\n fourier_shift -= backend.mod(matching_data._templateshape, 2)\n fourier_shift = backend.flip(fourier_shift, axis=(0,))\n shape_diff = backend.subtract(fast_shape, convolution_shape)\n shape_diff = backend.astype(backend.divide(shape_diff, 2), int)\n backend.add(fourier_shift, shape_diff, out=fourier_shift)\n\n callback_class_args[\"fourier_shift\"] = fourier_shift\n rfftn, irfftn = backend.build_fft(\n fast_shape=fast_shape,\n fast_ft_shape=fast_ft_shape,\n real_dtype=matching_data._default_dtype,\n complex_dtype=matching_data._complex_dtype,\n fftargs=fftargs,\n )\n setup = matching_setup(\n rfftn=rfftn,\n irfftn=irfftn,\n template=matching_data.template,\n template_mask=matching_data.template_mask,\n target=matching_data.target,\n target_mask=matching_data.target_mask,\n fast_shape=fast_shape,\n fast_ft_shape=fast_ft_shape,\n real_dtype=matching_data._default_dtype,\n complex_dtype=matching_data._complex_dtype,\n callback_class=callback_class,\n callback_class_args=callback_class_args,\n **kwargs,\n )\n rfftn, irfftn = None, None\n\n template_filter, preprocessor = None, Preprocessor()\n for method, parameters in matching_data.template_filter.items():\n parameters[\"shape\"] = fast_shape\n parameters[\"omit_negative_frequencies\"] = True\n out = preprocessor.apply_method(method=method, parameters=parameters)\n if template_filter is None:\n template_filter = out\n np.multiply(template_filter, out, out=template_filter)\n\n if template_filter is None:\n template_filter = backend.full(\n shape=(1,), fill_value=1, dtype=backend._default_dtype\n )\n else:\n template_filter = backend.to_backend_array(template_filter)\n\n template_filter = backend.astype(template_filter, backend._default_dtype)\n template_filter_buffer = backend.arr_to_sharedarr(\n arr=template_filter,\n shared_memory_handler=kwargs.get(\"shared_memory_handler\", None),\n )\n setup[\"template_filter\"] = (\n template_filter_buffer,\n template_filter.shape,\n template_filter.dtype,\n )\n\n callback_class_args[\"translation_offset\"] = backend.astype(\n matching_data._translation_offset, int\n )\n callback_class_args[\"thread_safe\"] = n_jobs > 1\n callback_class_args[\"gpu_index\"] = kwargs.get(\"gpu_index\", -1)\n\n n_callback_classes = max(n_jobs // jobs_per_callback_class, 1)\n callback_class = setup.pop(\"callback_class\", callback_class)\n callback_class_args = setup.pop(\"callback_class_args\", callback_class_args)\n callback_classes = [callback_class for _ in range(n_callback_classes)]\n if callback_class == MaxScoreOverRotations:\n score_space_shape = backend.subtract(\n matching_data.target.shape,\n matching_data._target_pad,\n )\n callback_classes = [\n class_name(\n score_space_shape=score_space_shape,\n score_space_dtype=matching_data._default_dtype,\n shared_memory_handler=kwargs.get(\"shared_memory_handler\", None),\n rotation_space_dtype=backend._default_dtype_int,\n **callback_class_args,\n )\n for class_name in callback_classes\n ]\n\n matching_data._target, matching_data._template = None, None\n matching_data._target_mask, matching_data._template_mask = None, None\n\n setup[\"fftargs\"] = fftargs.copy()\n convolution_mode = \"same\"\n if backend.sum(matching_data._target_pad) > 0:\n convolution_mode = \"valid\"\n setup[\"convolution_mode\"] = convolution_mode\n setup[\"interpolation_order\"] = interpolation_order\n rotation_list = matching_data._split_rotations_on_jobs(n_jobs)\n\n backend.free_cache()\n\n def _run_scoring(backend_name, backend_args, rotations, **kwargs):\n from tme.backends import backend\n\n backend.change_backend(backend_name, **backend_args)\n return matching_score(rotations=rotations, **kwargs)\n\n callbacks = Parallel(n_jobs=n_jobs)(\n delayed(_run_scoring)(\n backend_name=backend._backend_name,\n backend_args=backend._backend_args,\n rotations=rotation,\n callback_class=callback_classes[index % n_callback_classes],\n callback_class_args=callback_class_args,\n **setup,\n )\n for index, rotation in enumerate(rotation_list)\n )\n\n callbacks = [\n tuple(callback)\n for callback in callbacks[0:n_callback_classes]\n if callback is not None\n ]\n backend.free_cache()\n\n merged_callback = None\n if callback_class is not None:\n merged_callback = callback_class.merge(\n callbacks,\n **callback_class_args,\n score_indices=matching_data.indices,\n inner_merge=True,\n )\n\n return merged_callback" }, { "identifier": "scan_subsets", "path": "tme/matching_exhaustive.py", "snippet": "def scan_subsets(\n matching_data: MatchingData,\n matching_score: Callable,\n matching_setup: Callable,\n callback_class: CallbackClass = None,\n callback_class_args: Dict = {},\n job_schedule: Tuple[int] = (1, 1),\n target_splits: Dict = {},\n template_splits: Dict = {},\n pad_target_edges: bool = False,\n pad_fourier: bool = True,\n interpolation_order: int = 3,\n jobs_per_callback_class: int = 8,\n **kwargs,\n) -> Tuple:\n \"\"\"\n Wrapper around :py:meth:`scan` that supports template matching on splits\n of template and target.\n\n Parameters\n ----------\n matching_data : MatchingData\n Template matching data.\n matching_func : type\n Function pointer to setup function.\n matching_score : type\n Function pointer to scoring function.\n callback_class : type, optional\n Analyzer class pointer to operate on computed scores.\n callback_class_args : dict, optional\n Arguments passed to the callback_class. Default is an empty dictionary.\n job_schedule : tuple of int, optional\n Schedule of jobs. Default is (1, 1).\n target_splits : dict, optional\n Splits for target. Default is an empty dictionary, i.e. no splits\n template_splits : dict, optional\n Splits for template. Default is an empty dictionary, i.e. no splits.\n pad_target_edges : bool, optional\n Whether to pad the target boundaries by half the template shape\n along each axis.\n pad_fourier: bool, optional\n Whether to pad target and template to the full convolution shape.\n interpolation_order : int, optional\n Order of spline interpolation for rotations.\n jobs_per_callback_class : int, optional\n How many jobs should be processed by a single callback_class instance,\n if ones is provided.\n **kwargs : various\n Additional arguments.\n\n Notes\n -----\n Objects in matching_data might be destroyed during computation.\n\n Returns\n -------\n Tuple\n The merged results from callback_class if provided otherwise None.\n \"\"\"\n target_splits = split_numpy_array_slices(\n matching_data.target.shape, splits=target_splits\n )\n template_splits = split_numpy_array_slices(\n matching_data._templateshape, splits=template_splits\n )\n\n target_pad = np.zeros(len(matching_data.target.shape), dtype=int)\n if pad_target_edges:\n target_pad = np.subtract(\n matching_data._templateshape, np.mod(matching_data._templateshape, 2)\n )\n outer_jobs, inner_jobs = job_schedule\n results = Parallel(n_jobs=outer_jobs)(\n delayed(_run_inner)(\n backend_name=backend._backend_name,\n backend_args=backend._backend_args,\n matching_data=matching_data.subset_by_slice(\n target_slice=target_split,\n target_pad=target_pad,\n template_slice=template_split,\n ),\n matching_score=matching_score,\n matching_setup=matching_setup,\n n_jobs=inner_jobs,\n callback_class=callback_class,\n callback_class_args=callback_class_args,\n interpolation_order=interpolation_order,\n pad_fourier=pad_fourier,\n gpu_index=index % outer_jobs,\n **kwargs,\n )\n for index, (target_split, template_split) in enumerate(\n product(target_splits, template_splits)\n )\n )\n\n matching_data._target, matching_data._template = None, None\n matching_data._target_mask, matching_data._template_mask = None, None\n\n if callback_class is not None:\n candidates = callback_class.merge(\n results, **callback_class_args, inner_merge=False\n )\n return candidates" }, { "identifier": "MATCHING_EXHAUSTIVE_REGISTER", "path": "tme/matching_exhaustive.py", "snippet": "MATCHING_EXHAUSTIVE_REGISTER = {\n \"CC\": (cc_setup, corr_scoring),\n \"LCC\": (lcc_setup, corr_scoring),\n \"CORR\": (corr_setup, corr_scoring),\n \"CAM\": (cam_setup, corr_scoring),\n \"FLCSphericalMask\": (flcSphericalMask_setup, corr_scoring),\n \"FLC\": (flc_setup, flc_scoring),\n \"MCC\": (mcc_setup, mcc_scoring),\n}" }, { "identifier": "register_matching_exhaustive", "path": "tme/matching_exhaustive.py", "snippet": "def register_matching_exhaustive(\n matching: str,\n matching_setup: Callable,\n matching_scoring: Callable,\n memory_class: MatchingMemoryUsage,\n) -> None:\n \"\"\"\n Registers a new matching scheme.\n\n Parameters\n ----------\n matching : str\n Name of the matching method.\n matching_setup : Callable\n The setup function associated with the name.\n matching_scoring : Callable\n The scoring function associated with the name.\n memory_class : MatchingMemoryUsage\n The custom memory estimation class extending\n :py:class:`tme.matching_memory.MatchingMemoryUsage`.\n\n Raises\n ------\n ValueError\n If a function with the name ``matching`` already exists in the registry.\n ValueError\n If ``memory_class`` is not a subclass of\n :py:class:`tme.matching_memory.MatchingMemoryUsage`.\n \"\"\"\n\n if matching in MATCHING_EXHAUSTIVE_REGISTER:\n raise ValueError(f\"A method with name '{matching}' is already registered.\")\n if not issubclass(memory_class, MatchingMemoryUsage):\n raise ValueError(f\"{memory_class} is not a subclass of {MatchingMemoryUsage}.\")\n\n MATCHING_EXHAUSTIVE_REGISTER[matching] = (matching_setup, matching_scoring)\n MATCHING_MEMORY_REGISTRY[matching] = memory_class" }, { "identifier": "MatchingData", "path": "tme/matching_data.py", "snippet": "class MatchingData:\n \"\"\"\n Contains data required for template matching.\n\n Parameters\n ----------\n target : np.ndarray or Density\n Target data array for template matching.\n template : np.ndarray or Density\n Template data array for template matching.\n\n \"\"\"\n\n def __init__(self, target: NDArray, template: NDArray):\n self._default_dtype = np.float32\n self._complex_dtype = np.complex64\n\n self._target = target\n self._target_mask = None\n self._template_mask = None\n self._translation_offset = np.zeros(len(target.shape), dtype=int)\n\n self.template = template\n\n self._target_pad = np.zeros(len(target.shape), dtype=int)\n self._template_pad = np.zeros(len(template.shape), dtype=int)\n\n self.template_filter = {}\n self.target_filter = {}\n\n self._invert_target = False\n\n @staticmethod\n def _shape_to_slice(shape: Tuple[int]):\n return tuple(slice(0, dim) for dim in shape)\n\n @classmethod\n def _slice_to_mesh(cls, slice_variable: (slice,), shape: (int,)):\n if slice_variable is None:\n slice_variable = cls._shape_to_slice(shape)\n ranges = [range(slc.start, slc.stop) for slc in slice_variable]\n indices = np.meshgrid(*ranges, sparse=True, indexing=\"ij\")\n return indices\n\n @staticmethod\n def _load_array(arr: NDArray):\n \"\"\"\n Load ``arr``, If ``arr`` type is memmap, reload from disk.\n\n Parameters\n ----------\n arr : NDArray\n Array to load.\n\n Returns\n -------\n NDArray\n Loaded array.\n \"\"\"\n\n if type(arr) == np.memmap:\n return np.memmap(arr.filename, mode=\"r\", shape=arr.shape, dtype=arr.dtype)\n return arr\n\n def subset_array(\n self, arr: NDArray, arr_slice: Tuple[slice], padding: NDArray\n ) -> NDArray:\n \"\"\"\n Extract a subset of the input array according to the given slice and\n apply padding.\n\n Parameters\n ----------\n arr : NDArray\n The input array from which a subset is extracted.\n arr_slice : tuple of slice\n Defines the region of the input array to be extracted.\n padding : NDArray\n Padding values for each dimension. If the padding exceeds the array\n dimensions, the extra regions are filled with the mean of the array\n values, otherwise, the\n values in ``arr`` are used.\n\n Returns\n -------\n NDArray\n Subset of the input array with padding applied.\n \"\"\"\n padding = np.maximum(padding, 0)\n\n slice_start = np.array([x.start for x in arr_slice], dtype=int)\n slice_stop = np.array([x.stop for x in arr_slice], dtype=int)\n slice_shape = np.subtract(slice_stop, slice_start)\n\n padding = np.add(padding, np.mod(padding, 2))\n left_pad = right_pad = np.divide(padding, 2).astype(int)\n\n data_voxels_left = np.minimum(slice_start, left_pad)\n data_voxels_right = np.minimum(\n np.subtract(arr.shape, slice_stop), right_pad\n ).astype(int)\n\n ret_shape = np.add(slice_shape, padding)\n arr_start = np.subtract(slice_start, data_voxels_left)\n arr_stop = np.add(slice_stop, data_voxels_right)\n arr_slice = tuple(slice(*pos) for pos in zip(arr_start, arr_stop))\n arr_mesh = self._slice_to_mesh(arr_slice, arr.shape)\n\n subset_start = np.subtract(left_pad, data_voxels_left)\n subset_stop = np.add(subset_start, np.subtract(arr_stop, arr_start))\n subset_slice = tuple(slice(*prod) for prod in zip(subset_start, subset_stop))\n subset_mesh = self._slice_to_mesh(subset_slice, ret_shape)\n\n if type(arr) == Density:\n if type(arr.data) == np.memmap:\n arr = Density.from_file(arr.data.filename, subset=arr_slice).data\n else:\n arr = np.asarray(arr.data[*arr_mesh])\n else:\n if type(arr) == np.memmap:\n arr = np.memmap(\n arr.filename, mode=\"r\", shape=arr.shape, dtype=arr.dtype\n )\n arr = np.asarray(arr[*arr_mesh])\n ret = np.full(\n shape=np.add(slice_shape, padding), fill_value=arr.mean(), dtype=arr.dtype\n )\n ret[*subset_mesh] = arr\n\n return ret\n\n def subset_by_slice(\n self,\n target_slice: Tuple[slice] = None,\n template_slice: Tuple[slice] = None,\n target_pad: NDArray = None,\n template_pad: NDArray = None,\n invert_target: bool = False,\n ) -> \"MatchingData\":\n \"\"\"\n Slice the instance arrays based on the provided slices.\n\n Parameters\n ----------\n target_slice : tuple of slice, optional\n Slices for the target. If not provided, the full shape is used.\n template_slice : tuple of slice, optional\n Slices for the template. If not provided, the full shape is used.\n target_pad : NDArray, optional\n Padding for target. Defaults to zeros. If padding exceeds target,\n pad with mean.\n template_pad : NDArray, optional\n Padding for template. Defaults to zeros. If padding exceeds template,\n pad with mean.\n\n Returns\n -------\n MatchingData\n Newly allocated sliced class instance.\n \"\"\"\n target_shape = self.target.shape\n template_shape = self._template.shape\n\n if target_slice is None:\n target_slice = self._shape_to_slice(target_shape)\n if template_slice is None:\n template_slice = self._shape_to_slice(template_shape)\n\n if target_pad is None:\n target_pad = np.zeros(len(self.target.shape), dtype=int)\n if template_pad is None:\n template_pad = np.zeros(len(self.target.shape), dtype=int)\n\n indices = compute_full_convolution_index(\n outer_shape=self._target.shape,\n inner_shape=self._template.shape,\n outer_split=target_slice,\n inner_split=template_slice,\n )\n\n target_subset = self.subset_array(\n arr=self._target, arr_slice=target_slice, padding=target_pad\n )\n if self._invert_target:\n target_subset *= -1\n target_min, target_max = target_subset.min(), target_subset.max()\n target_subset = (target_subset - target_min) / (target_max - target_min)\n template_subset = self.subset_array(\n arr=self._template,\n arr_slice=template_slice,\n padding=template_pad,\n )\n ret = self.__class__(target=target_subset, template=template_subset)\n\n ret._translation_offset = np.add(\n [x.start for x in target_slice],\n [x.start for x in template_slice],\n )\n ret.template_filter = self.template_filter\n\n ret.rotations, ret.indices = self.rotations, indices\n ret._target_pad, ret._template_pad = target_pad, template_pad\n ret._invert_target = self._invert_target\n\n if self._target_mask is not None:\n ret.target_mask = self.subset_array(\n arr=self._target_mask, arr_slice=target_slice, padding=target_pad\n )\n if self._template_mask is not None:\n ret.template_mask = self.subset_array(\n arr=self._template_mask,\n arr_slice=template_slice,\n padding=template_pad,\n )\n\n return ret\n\n def to_backend(self) -> None:\n \"\"\"\n Transfer the class instance's numpy arrays to the current backend.\n \"\"\"\n for attr_name, attr_value in vars(self).items():\n if isinstance(attr_value, np.ndarray):\n converted_array = backend.to_backend_array(attr_value.copy())\n setattr(self, attr_name, converted_array)\n\n self._default_dtype = backend._default_dtype\n self._complex_dtype = backend._complex_dtype\n\n @property\n def rotations(self):\n \"\"\"Return rotation matrices used for fitting.\"\"\"\n return self._rotations\n\n @rotations.setter\n def rotations(self, rotations: NDArray):\n \"\"\"\n Set and reshape the rotation matrices for fitting.\n\n Parameters\n ----------\n rotations : NDArray\n Rotations in shape (3 x 3), (1 x 3 x 3), or (n x k x k).\n \"\"\"\n if rotations.__class__ != np.ndarray:\n raise ValueError(\"Rotation set has to be of type numpy ndarray.\")\n if rotations.ndim == 2:\n print(\"Reshaping rotations array to rank 3.\")\n rotations = rotations.reshape(1, *rotations.shape)\n elif rotations.ndim == 3:\n pass\n else:\n raise ValueError(\"Rotations have to be a rank 2 or 3 array.\")\n self._rotations = rotations.astype(self._default_dtype)\n\n @property\n def target(self):\n \"\"\"Returns the target NDArray.\"\"\"\n if type(self._target) == Density:\n return self._target.data\n return self._target\n\n @property\n def template(self):\n \"\"\"Returns the reversed template NDArray.\"\"\"\n if type(self._template) == Density:\n return backend.reverse(self._template.data)\n return backend.reverse(self._template)\n\n @template.setter\n def template(self, template: NDArray):\n \"\"\"\n Set the template array.\n\n Parameters\n ----------\n template : NDArray\n Array to set as the template.\n \"\"\"\n if type(template) == Density:\n template.data = template.data.astype(self._default_dtype, copy=False)\n self._template = template\n self._templateshape = self._template.shape[::-1]\n return None\n self._template = template.astype(self._default_dtype, copy=False)\n self._templateshape = self._template.shape[::-1]\n\n @property\n def target_mask(self):\n \"\"\"Returns the target mask NDArray.\"\"\"\n if type(self._target_mask) == Density:\n return self._target_mask.data\n return self._target_mask\n\n @target_mask.setter\n def target_mask(self, mask: NDArray):\n \"\"\"Sets the target mask.\"\"\"\n if not np.all(self.target.shape == mask.shape):\n raise ValueError(\"Target and its mask have to have the same shape.\")\n\n if type(mask) == Density:\n mask.data = mask.data.astype(self._default_dtype, copy=False)\n self._target_mask = mask\n self._targetmaskshape = self._target_mask.shape[::-1]\n return None\n self._target_mask = mask.astype(self._default_dtype, copy=False)\n self._targetmaskshape = self._target_mask.shape\n\n @property\n def template_mask(self):\n \"\"\"\n Set the template mask array after reversing it.\n\n Parameters\n ----------\n template : NDArray\n Array to set as the template.\n \"\"\"\n if type(self._template_mask) == Density:\n return backend.reverse(self._template_mask.data)\n return backend.reverse(self._template_mask)\n\n @template_mask.setter\n def template_mask(self, mask: NDArray):\n \"\"\"Returns the reversed template mask NDArray.\"\"\"\n if not np.all(self._template.shape == mask.shape):\n raise ValueError(\"Target and its mask have to have the same shape.\")\n\n if type(mask) == Density:\n mask.data = mask.data.astype(self._default_dtype, copy=False)\n self._template_mask = mask\n self._templatemaskshape = self._template_mask.shape[::-1]\n return None\n\n self._template_mask = mask.astype(self._default_dtype, copy=False)\n self._templatemaskshape = self._template_mask.shape[::-1]\n\n def _split_rotations_on_jobs(self, n_jobs: int) -> List[NDArray]:\n \"\"\"\n Split the rotation matrices into parts based on the number of jobs.\n\n Parameters\n ----------\n n_jobs : int\n Number of jobs for splitting.\n\n Returns\n -------\n list of NDArray\n List of split rotation matrices.\n \"\"\"\n nrot_per_job = self.rotations.shape[0] // n_jobs\n rot_list = []\n for n in range(n_jobs):\n init_rot = n * nrot_per_job\n end_rot = init_rot + nrot_per_job\n if n == n_jobs - 1:\n end_rot = None\n rot_list.append(self.rotations[init_rot:end_rot])\n return rot_list" }, { "identifier": "get_rotation_matrices", "path": "tme/matching_utils.py", "snippet": "def get_rotation_matrices(\n angular_sampling: float, dim: int = 3, use_optimized_set: bool = True\n) -> NDArray:\n \"\"\"\n Returns rotation matrices in format k x dim x dim, where k is determined\n by ``angular_sampling``.\n\n Parameters\n ----------\n angular_sampling : float\n The angle in degrees used for the generation of rotation matrices.\n dim : int, optional\n Dimension of the rotation matrices.\n use_optimized_set : bool, optional\n Whether to use pre-computed rotational sets with more optimal sampling.\n Currently only available when dim=3.\n\n Notes\n -----\n For the case of dim = 3 optimized rotational sets are used, otherwise\n QR-decomposition.\n\n Returns\n -------\n NDArray\n Array of shape (k, dim, dim) containing k rotation matrices.\n \"\"\"\n if dim == 3 and use_optimized_set:\n quaternions, *_ = load_quaternions_by_angle(angular_sampling)\n ret = quaternion_to_rotation_matrix(quaternions)\n else:\n num_rotations = dim * (dim - 1) // 2\n k = int((360 / angular_sampling) ** num_rotations)\n As = np.random.randn(k, dim, dim)\n ret, _ = np.linalg.qr(As)\n dets = np.linalg.det(ret)\n neg_dets = dets < 0\n ret[neg_dets, :, -1] *= -1\n return ret" }, { "identifier": "MATCHING_MEMORY_REGISTRY", "path": "tme/matching_memory.py", "snippet": "MATCHING_MEMORY_REGISTRY = {\n \"CC\": CCMemoryUsage,\n \"LCC\": LCCMemoryUsage,\n \"CORR\": CORRMemoryUsage,\n \"CAM\": CAMMemoryUsage,\n \"MCC\": MCCMemoryUsage,\n \"FLCSphericalMask\": FLCSphericalMaskMemoryUsage,\n \"FLC\": FLCMemoryUsage,\n \"MaxScoreOverRotations\": MaxScoreOverRotationsMemoryUsage,\n \"PeakCallerMaximumFilter\": PeakCallerMaximumFilterMemoryUsage,\n \"cupy\": CupyBackendMemoryUsage,\n \"pytorch\": CupyBackendMemoryUsage,\n}" } ]
import numpy as np import pytest from tme.matching_exhaustive import ( scan, scan_subsets, MATCHING_EXHAUSTIVE_REGISTER, register_matching_exhaustive, ) from tme.matching_data import MatchingData from tme.matching_utils import get_rotation_matrices from tme.matching_memory import MATCHING_MEMORY_REGISTRY
7,738
class TestMatchExhaustive: def setup_method(self): target = np.zeros((50, 50, 50)) target[20:30, 30:40, 12:17] = 1 self.target = target template = np.zeros((50, 50, 50)) template[15:25, 20:30, 2:7] = 1 self.template = template self.rotations = get_rotation_matrices(60)[0:2,] def teardown_method(self): self.target = None self.template = None self.coordinates = None self.coordinates_weights = None self.rotations = None @pytest.mark.parametrize("score", list(MATCHING_EXHAUSTIVE_REGISTER.keys())) def test_scan_single_core(self, score): matching_data = MatchingData(target=self.target, template=self.template) matching_data.target_mask = self.target matching_data.template_mask = self.template matching_data.rotations = self.rotations setup, process = MATCHING_EXHAUSTIVE_REGISTER[score] scan(matching_data=matching_data, matching_setup=setup, matching_score=process) @pytest.mark.parametrize("score", list(MATCHING_EXHAUSTIVE_REGISTER.keys())) def test_scan_single_multi_core(self, score): matching_data = MatchingData(target=self.target, template=self.template) matching_data.target_mask = self.target matching_data.template_mask = self.template matching_data.rotations = self.rotations setup, process = MATCHING_EXHAUSTIVE_REGISTER[score] scan( matching_data=matching_data, matching_setup=setup, matching_score=process, n_jobs=2, ) @pytest.mark.parametrize("score", list(MATCHING_EXHAUSTIVE_REGISTER.keys())) def test_scan_subsets_single_core(self, score): matching_data = MatchingData(target=self.target, template=self.template) matching_data.target_mask = self.target matching_data.template_mask = self.template matching_data.rotations = self.rotations setup, process = MATCHING_EXHAUSTIVE_REGISTER[score] target_splits = {i: 1 for i in range(self.target.ndim)} template_splits = {i: 1 for i in range(self.target.ndim)} target_splits[0], template_splits[1] = 2, 2 scan_subsets( matching_data=matching_data, matching_setup=setup, matching_score=process, target_splits=target_splits, template_splits=template_splits, job_schedule=(2, 1), ) @pytest.mark.parametrize("score", list(MATCHING_EXHAUSTIVE_REGISTER.keys())) def test_scan_subsets_single_multi_core(self, score): matching_data = MatchingData(target=self.target, template=self.template) matching_data.target_mask = self.target matching_data.template_mask = self.template matching_data.rotations = self.rotations setup, process = MATCHING_EXHAUSTIVE_REGISTER[score] target_splits = {i: 1 for i in range(self.target.ndim)} template_splits = {i: 1 for i in range(self.target.ndim)} target_splits[0], template_splits[1] = 2, 2 scan_subsets( matching_data=matching_data, matching_setup=setup, matching_score=process, target_splits=target_splits, template_splits=template_splits, job_schedule=(2, 1), ) @pytest.mark.parametrize("score", list(MATCHING_EXHAUSTIVE_REGISTER.keys())) def test_scan_subsets_single_multi_core_both(self, score): matching_data = MatchingData(target=self.target, template=self.template) matching_data.target_mask = self.target matching_data.template_mask = self.template matching_data.rotations = self.rotations setup, process = MATCHING_EXHAUSTIVE_REGISTER[score] target_splits = {i: 1 for i in range(self.target.ndim)} template_splits = {i: 1 for i in range(self.target.ndim)} target_splits[0], template_splits[1] = 2, 2 scan_subsets( matching_data=matching_data, matching_setup=setup, matching_score=process, target_splits=target_splits, template_splits=template_splits, job_schedule=(2, 2), ) def test_register_matching_exhaustive(self): setup, matching = MATCHING_EXHAUSTIVE_REGISTER[ list(MATCHING_EXHAUSTIVE_REGISTER.keys())[0] ]
class TestMatchExhaustive: def setup_method(self): target = np.zeros((50, 50, 50)) target[20:30, 30:40, 12:17] = 1 self.target = target template = np.zeros((50, 50, 50)) template[15:25, 20:30, 2:7] = 1 self.template = template self.rotations = get_rotation_matrices(60)[0:2,] def teardown_method(self): self.target = None self.template = None self.coordinates = None self.coordinates_weights = None self.rotations = None @pytest.mark.parametrize("score", list(MATCHING_EXHAUSTIVE_REGISTER.keys())) def test_scan_single_core(self, score): matching_data = MatchingData(target=self.target, template=self.template) matching_data.target_mask = self.target matching_data.template_mask = self.template matching_data.rotations = self.rotations setup, process = MATCHING_EXHAUSTIVE_REGISTER[score] scan(matching_data=matching_data, matching_setup=setup, matching_score=process) @pytest.mark.parametrize("score", list(MATCHING_EXHAUSTIVE_REGISTER.keys())) def test_scan_single_multi_core(self, score): matching_data = MatchingData(target=self.target, template=self.template) matching_data.target_mask = self.target matching_data.template_mask = self.template matching_data.rotations = self.rotations setup, process = MATCHING_EXHAUSTIVE_REGISTER[score] scan( matching_data=matching_data, matching_setup=setup, matching_score=process, n_jobs=2, ) @pytest.mark.parametrize("score", list(MATCHING_EXHAUSTIVE_REGISTER.keys())) def test_scan_subsets_single_core(self, score): matching_data = MatchingData(target=self.target, template=self.template) matching_data.target_mask = self.target matching_data.template_mask = self.template matching_data.rotations = self.rotations setup, process = MATCHING_EXHAUSTIVE_REGISTER[score] target_splits = {i: 1 for i in range(self.target.ndim)} template_splits = {i: 1 for i in range(self.target.ndim)} target_splits[0], template_splits[1] = 2, 2 scan_subsets( matching_data=matching_data, matching_setup=setup, matching_score=process, target_splits=target_splits, template_splits=template_splits, job_schedule=(2, 1), ) @pytest.mark.parametrize("score", list(MATCHING_EXHAUSTIVE_REGISTER.keys())) def test_scan_subsets_single_multi_core(self, score): matching_data = MatchingData(target=self.target, template=self.template) matching_data.target_mask = self.target matching_data.template_mask = self.template matching_data.rotations = self.rotations setup, process = MATCHING_EXHAUSTIVE_REGISTER[score] target_splits = {i: 1 for i in range(self.target.ndim)} template_splits = {i: 1 for i in range(self.target.ndim)} target_splits[0], template_splits[1] = 2, 2 scan_subsets( matching_data=matching_data, matching_setup=setup, matching_score=process, target_splits=target_splits, template_splits=template_splits, job_schedule=(2, 1), ) @pytest.mark.parametrize("score", list(MATCHING_EXHAUSTIVE_REGISTER.keys())) def test_scan_subsets_single_multi_core_both(self, score): matching_data = MatchingData(target=self.target, template=self.template) matching_data.target_mask = self.target matching_data.template_mask = self.template matching_data.rotations = self.rotations setup, process = MATCHING_EXHAUSTIVE_REGISTER[score] target_splits = {i: 1 for i in range(self.target.ndim)} template_splits = {i: 1 for i in range(self.target.ndim)} target_splits[0], template_splits[1] = 2, 2 scan_subsets( matching_data=matching_data, matching_setup=setup, matching_score=process, target_splits=target_splits, template_splits=template_splits, job_schedule=(2, 2), ) def test_register_matching_exhaustive(self): setup, matching = MATCHING_EXHAUSTIVE_REGISTER[ list(MATCHING_EXHAUSTIVE_REGISTER.keys())[0] ]
memory_class = MATCHING_MEMORY_REGISTRY[
6
2023-10-20 13:46:01+00:00
12k
tonnetonne814/MB-iSTFT-BERT-VITS2-44100-Ja
text/chinese.py
[ { "identifier": "punctuation", "path": "text/symbols.py", "snippet": "" }, { "identifier": "ToneSandhi", "path": "text/tone_sandhi.py", "snippet": "class ToneSandhi:\n def __init__(self):\n self.must_neural_tone_words = {\n \"麻烦\",\n \"麻利\",\n \"鸳鸯\",\n \"高粱\",\n \"骨头\",\n \"骆驼\",\n \"马虎\",\n \"首饰\",\n \"馒头\",\n \"馄饨\",\n \"风筝\",\n \"难为\",\n \"队伍\",\n \"阔气\",\n \"闺女\",\n \"门道\",\n \"锄头\",\n \"铺盖\",\n \"铃铛\",\n \"铁匠\",\n \"钥匙\",\n \"里脊\",\n \"里头\",\n \"部分\",\n \"那么\",\n \"道士\",\n \"造化\",\n \"迷糊\",\n \"连累\",\n \"这么\",\n \"这个\",\n \"运气\",\n \"过去\",\n \"软和\",\n \"转悠\",\n \"踏实\",\n \"跳蚤\",\n \"跟头\",\n \"趔趄\",\n \"财主\",\n \"豆腐\",\n \"讲究\",\n \"记性\",\n \"记号\",\n \"认识\",\n \"规矩\",\n \"见识\",\n \"裁缝\",\n \"补丁\",\n \"衣裳\",\n \"衣服\",\n \"衙门\",\n \"街坊\",\n \"行李\",\n \"行当\",\n \"蛤蟆\",\n \"蘑菇\",\n \"薄荷\",\n \"葫芦\",\n \"葡萄\",\n \"萝卜\",\n \"荸荠\",\n \"苗条\",\n \"苗头\",\n \"苍蝇\",\n \"芝麻\",\n \"舒服\",\n \"舒坦\",\n \"舌头\",\n \"自在\",\n \"膏药\",\n \"脾气\",\n \"脑袋\",\n \"脊梁\",\n \"能耐\",\n \"胳膊\",\n \"胭脂\",\n \"胡萝\",\n \"胡琴\",\n \"胡同\",\n \"聪明\",\n \"耽误\",\n \"耽搁\",\n \"耷拉\",\n \"耳朵\",\n \"老爷\",\n \"老实\",\n \"老婆\",\n \"老头\",\n \"老太\",\n \"翻腾\",\n \"罗嗦\",\n \"罐头\",\n \"编辑\",\n \"结实\",\n \"红火\",\n \"累赘\",\n \"糨糊\",\n \"糊涂\",\n \"精神\",\n \"粮食\",\n \"簸箕\",\n \"篱笆\",\n \"算计\",\n \"算盘\",\n \"答应\",\n \"笤帚\",\n \"笑语\",\n \"笑话\",\n \"窟窿\",\n \"窝囊\",\n \"窗户\",\n \"稳当\",\n \"稀罕\",\n \"称呼\",\n \"秧歌\",\n \"秀气\",\n \"秀才\",\n \"福气\",\n \"祖宗\",\n \"砚台\",\n \"码头\",\n \"石榴\",\n \"石头\",\n \"石匠\",\n \"知识\",\n \"眼睛\",\n \"眯缝\",\n \"眨巴\",\n \"眉毛\",\n \"相声\",\n \"盘算\",\n \"白净\",\n \"痢疾\",\n \"痛快\",\n \"疟疾\",\n \"疙瘩\",\n \"疏忽\",\n \"畜生\",\n \"生意\",\n \"甘蔗\",\n \"琵琶\",\n \"琢磨\",\n \"琉璃\",\n \"玻璃\",\n \"玫瑰\",\n \"玄乎\",\n \"狐狸\",\n \"状元\",\n \"特务\",\n \"牲口\",\n \"牙碜\",\n \"牌楼\",\n \"爽快\",\n \"爱人\",\n \"热闹\",\n \"烧饼\",\n \"烟筒\",\n \"烂糊\",\n \"点心\",\n \"炊帚\",\n \"灯笼\",\n \"火候\",\n \"漂亮\",\n \"滑溜\",\n \"溜达\",\n \"温和\",\n \"清楚\",\n \"消息\",\n \"浪头\",\n \"活泼\",\n \"比方\",\n \"正经\",\n \"欺负\",\n \"模糊\",\n \"槟榔\",\n \"棺材\",\n \"棒槌\",\n \"棉花\",\n \"核桃\",\n \"栅栏\",\n \"柴火\",\n \"架势\",\n \"枕头\",\n \"枇杷\",\n \"机灵\",\n \"本事\",\n \"木头\",\n \"木匠\",\n \"朋友\",\n \"月饼\",\n \"月亮\",\n \"暖和\",\n \"明白\",\n \"时候\",\n \"新鲜\",\n \"故事\",\n \"收拾\",\n \"收成\",\n \"提防\",\n \"挖苦\",\n \"挑剔\",\n \"指甲\",\n \"指头\",\n \"拾掇\",\n \"拳头\",\n \"拨弄\",\n \"招牌\",\n \"招呼\",\n \"抬举\",\n \"护士\",\n \"折腾\",\n \"扫帚\",\n \"打量\",\n \"打算\",\n \"打点\",\n \"打扮\",\n \"打听\",\n \"打发\",\n \"扎实\",\n \"扁担\",\n \"戒指\",\n \"懒得\",\n \"意识\",\n \"意思\",\n \"情形\",\n \"悟性\",\n \"怪物\",\n \"思量\",\n \"怎么\",\n \"念头\",\n \"念叨\",\n \"快活\",\n \"忙活\",\n \"志气\",\n \"心思\",\n \"得罪\",\n \"张罗\",\n \"弟兄\",\n \"开通\",\n \"应酬\",\n \"庄稼\",\n \"干事\",\n \"帮手\",\n \"帐篷\",\n \"希罕\",\n \"师父\",\n \"师傅\",\n \"巴结\",\n \"巴掌\",\n \"差事\",\n \"工夫\",\n \"岁数\",\n \"屁股\",\n \"尾巴\",\n \"少爷\",\n \"小气\",\n \"小伙\",\n \"将就\",\n \"对头\",\n \"对付\",\n \"寡妇\",\n \"家伙\",\n \"客气\",\n \"实在\",\n \"官司\",\n \"学问\",\n \"学生\",\n \"字号\",\n \"嫁妆\",\n \"媳妇\",\n \"媒人\",\n \"婆家\",\n \"娘家\",\n \"委屈\",\n \"姑娘\",\n \"姐夫\",\n \"妯娌\",\n \"妥当\",\n \"妖精\",\n \"奴才\",\n \"女婿\",\n \"头发\",\n \"太阳\",\n \"大爷\",\n \"大方\",\n \"大意\",\n \"大夫\",\n \"多少\",\n \"多么\",\n \"外甥\",\n \"壮实\",\n \"地道\",\n \"地方\",\n \"在乎\",\n \"困难\",\n \"嘴巴\",\n \"嘱咐\",\n \"嘟囔\",\n \"嘀咕\",\n \"喜欢\",\n \"喇嘛\",\n \"喇叭\",\n \"商量\",\n \"唾沫\",\n \"哑巴\",\n \"哈欠\",\n \"哆嗦\",\n \"咳嗽\",\n \"和尚\",\n \"告诉\",\n \"告示\",\n \"含糊\",\n \"吓唬\",\n \"后头\",\n \"名字\",\n \"名堂\",\n \"合同\",\n \"吆喝\",\n \"叫唤\",\n \"口袋\",\n \"厚道\",\n \"厉害\",\n \"千斤\",\n \"包袱\",\n \"包涵\",\n \"匀称\",\n \"勤快\",\n \"动静\",\n \"动弹\",\n \"功夫\",\n \"力气\",\n \"前头\",\n \"刺猬\",\n \"刺激\",\n \"别扭\",\n \"利落\",\n \"利索\",\n \"利害\",\n \"分析\",\n \"出息\",\n \"凑合\",\n \"凉快\",\n \"冷战\",\n \"冤枉\",\n \"冒失\",\n \"养活\",\n \"关系\",\n \"先生\",\n \"兄弟\",\n \"便宜\",\n \"使唤\",\n \"佩服\",\n \"作坊\",\n \"体面\",\n \"位置\",\n \"似的\",\n \"伙计\",\n \"休息\",\n \"什么\",\n \"人家\",\n \"亲戚\",\n \"亲家\",\n \"交情\",\n \"云彩\",\n \"事情\",\n \"买卖\",\n \"主意\",\n \"丫头\",\n \"丧气\",\n \"两口\",\n \"东西\",\n \"东家\",\n \"世故\",\n \"不由\",\n \"不在\",\n \"下水\",\n \"下巴\",\n \"上头\",\n \"上司\",\n \"丈夫\",\n \"丈人\",\n \"一辈\",\n \"那个\",\n \"菩萨\",\n \"父亲\",\n \"母亲\",\n \"咕噜\",\n \"邋遢\",\n \"费用\",\n \"冤家\",\n \"甜头\",\n \"介绍\",\n \"荒唐\",\n \"大人\",\n \"泥鳅\",\n \"幸福\",\n \"熟悉\",\n \"计划\",\n \"扑腾\",\n \"蜡烛\",\n \"姥爷\",\n \"照顾\",\n \"喉咙\",\n \"吉他\",\n \"弄堂\",\n \"蚂蚱\",\n \"凤凰\",\n \"拖沓\",\n \"寒碜\",\n \"糟蹋\",\n \"倒腾\",\n \"报复\",\n \"逻辑\",\n \"盘缠\",\n \"喽啰\",\n \"牢骚\",\n \"咖喱\",\n \"扫把\",\n \"惦记\",\n }\n self.must_not_neural_tone_words = {\n \"男子\",\n \"女子\",\n \"分子\",\n \"原子\",\n \"量子\",\n \"莲子\",\n \"石子\",\n \"瓜子\",\n \"电子\",\n \"人人\",\n \"虎虎\",\n }\n self.punc = \":,;。?!“”‘’':,;.?!\"\n\n # the meaning of jieba pos tag: https://blog.csdn.net/weixin_44174352/article/details/113731041\n # e.g.\n # word: \"家里\"\n # pos: \"s\"\n # finals: ['ia1', 'i3']\n def _neural_sandhi(self, word: str, pos: str, finals: List[str]) -> List[str]:\n # reduplication words for n. and v. e.g. 奶奶, 试试, 旺旺\n for j, item in enumerate(word):\n if (\n j - 1 >= 0\n and item == word[j - 1]\n and pos[0] in {\"n\", \"v\", \"a\"}\n and word not in self.must_not_neural_tone_words\n ):\n finals[j] = finals[j][:-1] + \"5\"\n ge_idx = word.find(\"个\")\n if len(word) >= 1 and word[-1] in \"吧呢啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶\":\n finals[-1] = finals[-1][:-1] + \"5\"\n elif len(word) >= 1 and word[-1] in \"的地得\":\n finals[-1] = finals[-1][:-1] + \"5\"\n # e.g. 走了, 看着, 去过\n # elif len(word) == 1 and word in \"了着过\" and pos in {\"ul\", \"uz\", \"ug\"}:\n # finals[-1] = finals[-1][:-1] + \"5\"\n elif (\n len(word) > 1\n and word[-1] in \"们子\"\n and pos in {\"r\", \"n\"}\n and word not in self.must_not_neural_tone_words\n ):\n finals[-1] = finals[-1][:-1] + \"5\"\n # e.g. 桌上, 地下, 家里\n elif len(word) > 1 and word[-1] in \"上下里\" and pos in {\"s\", \"l\", \"f\"}:\n finals[-1] = finals[-1][:-1] + \"5\"\n # e.g. 上来, 下去\n elif len(word) > 1 and word[-1] in \"来去\" and word[-2] in \"上下进出回过起开\":\n finals[-1] = finals[-1][:-1] + \"5\"\n # 个做量词\n elif (\n ge_idx >= 1\n and (word[ge_idx - 1].isnumeric() or word[ge_idx - 1] in \"几有两半多各整每做是\")\n ) or word == \"个\":\n finals[ge_idx] = finals[ge_idx][:-1] + \"5\"\n else:\n if (\n word in self.must_neural_tone_words\n or word[-2:] in self.must_neural_tone_words\n ):\n finals[-1] = finals[-1][:-1] + \"5\"\n\n word_list = self._split_word(word)\n finals_list = [finals[: len(word_list[0])], finals[len(word_list[0]) :]]\n for i, word in enumerate(word_list):\n # conventional neural in Chinese\n if (\n word in self.must_neural_tone_words\n or word[-2:] in self.must_neural_tone_words\n ):\n finals_list[i][-1] = finals_list[i][-1][:-1] + \"5\"\n finals = sum(finals_list, [])\n return finals\n\n def _bu_sandhi(self, word: str, finals: List[str]) -> List[str]:\n # e.g. 看不懂\n if len(word) == 3 and word[1] == \"不\":\n finals[1] = finals[1][:-1] + \"5\"\n else:\n for i, char in enumerate(word):\n # \"不\" before tone4 should be bu2, e.g. 不怕\n if char == \"不\" and i + 1 < len(word) and finals[i + 1][-1] == \"4\":\n finals[i] = finals[i][:-1] + \"2\"\n return finals\n\n def _yi_sandhi(self, word: str, finals: List[str]) -> List[str]:\n # \"一\" in number sequences, e.g. 一零零, 二一零\n if word.find(\"一\") != -1 and all(\n [item.isnumeric() for item in word if item != \"一\"]\n ):\n return finals\n # \"一\" between reduplication words should be yi5, e.g. 看一看\n elif len(word) == 3 and word[1] == \"一\" and word[0] == word[-1]:\n finals[1] = finals[1][:-1] + \"5\"\n # when \"一\" is ordinal word, it should be yi1\n elif word.startswith(\"第一\"):\n finals[1] = finals[1][:-1] + \"1\"\n else:\n for i, char in enumerate(word):\n if char == \"一\" and i + 1 < len(word):\n # \"一\" before tone4 should be yi2, e.g. 一段\n if finals[i + 1][-1] == \"4\":\n finals[i] = finals[i][:-1] + \"2\"\n # \"一\" before non-tone4 should be yi4, e.g. 一天\n else:\n # \"一\" 后面如果是标点,还读一声\n if word[i + 1] not in self.punc:\n finals[i] = finals[i][:-1] + \"4\"\n return finals\n\n def _split_word(self, word: str) -> List[str]:\n word_list = jieba.cut_for_search(word)\n word_list = sorted(word_list, key=lambda i: len(i), reverse=False)\n first_subword = word_list[0]\n first_begin_idx = word.find(first_subword)\n if first_begin_idx == 0:\n second_subword = word[len(first_subword) :]\n new_word_list = [first_subword, second_subword]\n else:\n second_subword = word[: -len(first_subword)]\n new_word_list = [second_subword, first_subword]\n return new_word_list\n\n def _three_sandhi(self, word: str, finals: List[str]) -> List[str]:\n if len(word) == 2 and self._all_tone_three(finals):\n finals[0] = finals[0][:-1] + \"2\"\n elif len(word) == 3:\n word_list = self._split_word(word)\n if self._all_tone_three(finals):\n # disyllabic + monosyllabic, e.g. 蒙古/包\n if len(word_list[0]) == 2:\n finals[0] = finals[0][:-1] + \"2\"\n finals[1] = finals[1][:-1] + \"2\"\n # monosyllabic + disyllabic, e.g. 纸/老虎\n elif len(word_list[0]) == 1:\n finals[1] = finals[1][:-1] + \"2\"\n else:\n finals_list = [finals[: len(word_list[0])], finals[len(word_list[0]) :]]\n if len(finals_list) == 2:\n for i, sub in enumerate(finals_list):\n # e.g. 所有/人\n if self._all_tone_three(sub) and len(sub) == 2:\n finals_list[i][0] = finals_list[i][0][:-1] + \"2\"\n # e.g. 好/喜欢\n elif (\n i == 1\n and not self._all_tone_three(sub)\n and finals_list[i][0][-1] == \"3\"\n and finals_list[0][-1][-1] == \"3\"\n ):\n finals_list[0][-1] = finals_list[0][-1][:-1] + \"2\"\n finals = sum(finals_list, [])\n # split idiom into two words who's length is 2\n elif len(word) == 4:\n finals_list = [finals[:2], finals[2:]]\n finals = []\n for sub in finals_list:\n if self._all_tone_three(sub):\n sub[0] = sub[0][:-1] + \"2\"\n finals += sub\n\n return finals\n\n def _all_tone_three(self, finals: List[str]) -> bool:\n return all(x[-1] == \"3\" for x in finals)\n\n # merge \"不\" and the word behind it\n # if don't merge, \"不\" sometimes appears alone according to jieba, which may occur sandhi error\n def _merge_bu(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n new_seg = []\n last_word = \"\"\n for word, pos in seg:\n if last_word == \"不\":\n word = last_word + word\n if word != \"不\":\n new_seg.append((word, pos))\n last_word = word[:]\n if last_word == \"不\":\n new_seg.append((last_word, \"d\"))\n last_word = \"\"\n return new_seg\n\n # function 1: merge \"一\" and reduplication words in it's left and right, e.g. \"听\",\"一\",\"听\" ->\"听一听\"\n # function 2: merge single \"一\" and the word behind it\n # if don't merge, \"一\" sometimes appears alone according to jieba, which may occur sandhi error\n # e.g.\n # input seg: [('听', 'v'), ('一', 'm'), ('听', 'v')]\n # output seg: [['听一听', 'v']]\n def _merge_yi(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n new_seg = []\n # function 1\n for i, (word, pos) in enumerate(seg):\n if (\n i - 1 >= 0\n and word == \"一\"\n and i + 1 < len(seg)\n and seg[i - 1][0] == seg[i + 1][0]\n and seg[i - 1][1] == \"v\"\n ):\n new_seg[i - 1][0] = new_seg[i - 1][0] + \"一\" + new_seg[i - 1][0]\n else:\n if (\n i - 2 >= 0\n and seg[i - 1][0] == \"一\"\n and seg[i - 2][0] == word\n and pos == \"v\"\n ):\n continue\n else:\n new_seg.append([word, pos])\n seg = new_seg\n new_seg = []\n # function 2\n for i, (word, pos) in enumerate(seg):\n if new_seg and new_seg[-1][0] == \"一\":\n new_seg[-1][0] = new_seg[-1][0] + word\n else:\n new_seg.append([word, pos])\n return new_seg\n\n # the first and the second words are all_tone_three\n def _merge_continuous_three_tones(\n self, seg: List[Tuple[str, str]]\n ) -> List[Tuple[str, str]]:\n new_seg = []\n sub_finals_list = [\n lazy_pinyin(word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)\n for (word, pos) in seg\n ]\n assert len(sub_finals_list) == len(seg)\n merge_last = [False] * len(seg)\n for i, (word, pos) in enumerate(seg):\n if (\n i - 1 >= 0\n and self._all_tone_three(sub_finals_list[i - 1])\n and self._all_tone_three(sub_finals_list[i])\n and not merge_last[i - 1]\n ):\n # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi\n if (\n not self._is_reduplication(seg[i - 1][0])\n and len(seg[i - 1][0]) + len(seg[i][0]) <= 3\n ):\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\n merge_last[i] = True\n else:\n new_seg.append([word, pos])\n else:\n new_seg.append([word, pos])\n\n return new_seg\n\n def _is_reduplication(self, word: str) -> bool:\n return len(word) == 2 and word[0] == word[1]\n\n # the last char of first word and the first char of second word is tone_three\n def _merge_continuous_three_tones_2(\n self, seg: List[Tuple[str, str]]\n ) -> List[Tuple[str, str]]:\n new_seg = []\n sub_finals_list = [\n lazy_pinyin(word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)\n for (word, pos) in seg\n ]\n assert len(sub_finals_list) == len(seg)\n merge_last = [False] * len(seg)\n for i, (word, pos) in enumerate(seg):\n if (\n i - 1 >= 0\n and sub_finals_list[i - 1][-1][-1] == \"3\"\n and sub_finals_list[i][0][-1] == \"3\"\n and not merge_last[i - 1]\n ):\n # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi\n if (\n not self._is_reduplication(seg[i - 1][0])\n and len(seg[i - 1][0]) + len(seg[i][0]) <= 3\n ):\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\n merge_last[i] = True\n else:\n new_seg.append([word, pos])\n else:\n new_seg.append([word, pos])\n return new_seg\n\n def _merge_er(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n new_seg = []\n for i, (word, pos) in enumerate(seg):\n if i - 1 >= 0 and word == \"儿\" and seg[i - 1][0] != \"#\":\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\n else:\n new_seg.append([word, pos])\n return new_seg\n\n def _merge_reduplication(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n new_seg = []\n for i, (word, pos) in enumerate(seg):\n if new_seg and word == new_seg[-1][0]:\n new_seg[-1][0] = new_seg[-1][0] + seg[i][0]\n else:\n new_seg.append([word, pos])\n return new_seg\n\n def pre_merge_for_modify(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:\n seg = self._merge_bu(seg)\n try:\n seg = self._merge_yi(seg)\n except:\n print(\"_merge_yi failed\")\n seg = self._merge_reduplication(seg)\n seg = self._merge_continuous_three_tones(seg)\n seg = self._merge_continuous_three_tones_2(seg)\n seg = self._merge_er(seg)\n return seg\n\n def modified_tone(self, word: str, pos: str, finals: List[str]) -> List[str]:\n finals = self._bu_sandhi(word, finals)\n finals = self._yi_sandhi(word, finals)\n finals = self._neural_sandhi(word, pos, finals)\n finals = self._three_sandhi(word, finals)\n return finals" } ]
import os import re import cn2an import jieba.posseg as psg from pypinyin import lazy_pinyin, Style from text.symbols import punctuation from text.tone_sandhi import ToneSandhi from text import chinese_bert from text.chinese_bert import get_bert_feature
7,663
current_file_path = os.path.dirname(__file__) pinyin_to_symbol_map = { line.split("\t")[0]: line.strip().split("\t")[1] for line in open(os.path.join(current_file_path, "opencpop-strict.txt")).readlines() } rep_map = { ":": ",", ";": ",", ",": ",", "。": ".", "!": "!", "?": "?", "\n": ".", "·": ",", "、": ",", "...": "…", "$": ".", "“": "'", "”": "'", "‘": "'", "’": "'", "(": "'", ")": "'", "(": "'", ")": "'", "《": "'", "》": "'", "【": "'", "】": "'", "[": "'", "]": "'", "—": "-", "~": "-", "~": "-", "「": "'", "」": "'", } tone_modifier = ToneSandhi() def replace_punctuation(text): text = text.replace("嗯", "恩").replace("呣", "母") pattern = re.compile("|".join(re.escape(p) for p in rep_map.keys())) replaced_text = pattern.sub(lambda x: rep_map[x.group()], text) replaced_text = re.sub(
current_file_path = os.path.dirname(__file__) pinyin_to_symbol_map = { line.split("\t")[0]: line.strip().split("\t")[1] for line in open(os.path.join(current_file_path, "opencpop-strict.txt")).readlines() } rep_map = { ":": ",", ";": ",", ",": ",", "。": ".", "!": "!", "?": "?", "\n": ".", "·": ",", "、": ",", "...": "…", "$": ".", "“": "'", "”": "'", "‘": "'", "’": "'", "(": "'", ")": "'", "(": "'", ")": "'", "《": "'", "》": "'", "【": "'", "】": "'", "[": "'", "]": "'", "—": "-", "~": "-", "~": "-", "「": "'", "」": "'", } tone_modifier = ToneSandhi() def replace_punctuation(text): text = text.replace("嗯", "恩").replace("呣", "母") pattern = re.compile("|".join(re.escape(p) for p in rep_map.keys())) replaced_text = pattern.sub(lambda x: rep_map[x.group()], text) replaced_text = re.sub(
r"[^\u4e00-\u9fa5" + "".join(punctuation) + r"]+", "", replaced_text
0
2023-10-16 10:04:32+00:00
12k
cfs-energy/cfspopcon
cfspopcon/formulas/radiated_power/mavrin_noncoronal.py
[ { "identifier": "Impurity", "path": "cfspopcon/named_options.py", "snippet": "class Impurity(Enum):\n \"\"\"Enum of possible impurity elements.\n\n The enum value represents the element's atomic number (Z).\n \"\"\"\n\n Helium = 2\n Lithium = 3\n Beryllium = 4\n Carbon = 6\n Nitrogen = 7\n Oxygen = 8\n Neon = 10\n Argon = 18\n Krypton = 36\n Xenon = 54\n Tungsten = 74" }, { "identifier": "wraps_ufunc", "path": "cfspopcon/unit_handling/decorator.py", "snippet": "def wraps_ufunc( # noqa: PLR0915\n input_units: dict[str, Union[str, Unit, None]],\n return_units: dict[str, Union[str, Unit, None]],\n pass_as_kwargs: tuple = (),\n # kwargs for apply_ufunc\n input_core_dims: Optional[Sequence[Sequence]] = None,\n output_core_dims: Optional[Sequence[Sequence]] = ((),),\n exclude_dims: Set = frozenset(),\n vectorize: bool = True,\n join: str = \"exact\",\n dataset_join: str = \"exact\",\n keep_attrs: str = \"drop_conflicts\",\n dask: str = \"forbidden\",\n output_dtypes: Optional[Sequence] = None,\n output_sizes: Optional[Mapping[Any, int]] = None,\n dask_gufunc_kwargs: Optional[dict[str, Any]] = None,\n) -> FunctionType:\n \"\"\"Decorator for functions to add in unit and dimension handling.\n\n input_units and return_units must be provided, as dictionaries giving\n a mapping between the function arguments/returns and their units.\n\n pass_as_kwargs can be used to optionally declare that specific arguments\n should be pass directly into the function, rather than vectorized.\n\n The remaining arguments for the wrapper correspond to arguments for\n xr.apply_ufunc.\n https://docs.xarray.dev/en/stable/examples/apply_ufunc_vectorize_1d.html\n \"\"\"\n input_units = _check_units(input_units)\n return_units = _check_units(return_units)\n\n ufunc_kwargs: dict[str, Any] = dict(\n input_core_dims=input_core_dims,\n output_core_dims=output_core_dims,\n exclude_dims=exclude_dims,\n vectorize=vectorize,\n join=join,\n dataset_join=dataset_join,\n keep_attrs=keep_attrs,\n dask=dask,\n output_dtypes=output_dtypes,\n output_sizes=output_sizes,\n dask_gufunc_kwargs=dask_gufunc_kwargs,\n )\n input_keys = list(input_units.keys())\n\n if not isinstance(pass_as_kwargs, tuple):\n raise ValueError(f\"pass_as_kwargs must be passed as a tuple of keys, not {str(type(pass_as_kwargs))[1:-1]}\")\n\n pass_as_positional_args = [key for key in input_keys if key not in pass_as_kwargs]\n for arg in pass_as_kwargs:\n kwarg_position = input_keys.index(arg)\n if kwarg_position < len(pass_as_positional_args):\n raise ValueError(f\"Argument {arg} in pass_as_kwargs appears before the positional args {pass_as_positional_args}\")\n\n if input_core_dims is not None:\n if not len(input_core_dims) == len(pass_as_positional_args):\n raise ValueError(\n f\"input_core_dims (len {len(input_core_dims)}) must the same length as positional_args ({pass_as_positional_args}, len {len(pass_as_positional_args)})\"\n )\n else:\n input_core_dims = len(pass_as_positional_args) * [()]\n\n def _wraps_ufunc(func: FunctionType) -> FunctionType:\n\n func_signature = signature(func)\n func_parameters = func_signature.parameters\n\n if not list(input_units.keys()) == list(func_parameters.keys()):\n raise ValueError(\n f\"Keys for input_units {input_units.keys()} did not match func_parameters {func_parameters.keys()} (n.b. order matters!)\"\n )\n\n default_values = {key: val.default for key, val in func_parameters.items() if val.default is not Parameter.empty}\n\n @functools.wraps(func)\n def popcon_ufunc_wrapped_call(*args: Any, **kwargs: Any) -> Any: # noqa: PLR0912\n \"\"\"Transform args and kwargs, then call the inner function.\"\"\"\n # if anything goes wrong we can do some extra work to provide a better error below\n try:\n args_dict = dict(zip(input_keys, args))\n\n if not set(args_dict.keys()).isdisjoint(kwargs.keys()):\n raise RuntimeError(\n f\"{func.__name__} was called with repeat arguments. Input was interpreted as args={args_dict}, kwargs={kwargs}\"\n )\n\n args_dict = {**args_dict, **kwargs}\n args_dict = {**args_dict, **{key: val for key, val in default_values.items() if key not in args_dict.keys()}}\n\n args_dict = _return_magnitude_in_specified_units(args_dict, input_units)\n\n positional_args = []\n for i, key in enumerate(pass_as_positional_args):\n arg = args_dict[key]\n if not isinstance(arg, xr.DataArray):\n positional_args.append(xr.DataArray(arg).expand_dims(input_core_dims[i]))\n else:\n positional_args.append(arg)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\", category=UnitStrippedWarning)\n function_return = xr.apply_ufunc(\n func,\n *positional_args,\n kwargs={key: args_dict[key] for key in pass_as_kwargs},\n **ufunc_kwargs,\n )\n\n if len(return_units) == 0:\n # Assume that the function return None\n return function_return.item()\n\n function_return = _convert_return_to_quantities(function_return, return_units)\n\n function_return = list(function_return.values())\n\n if len(function_return) > 1:\n return tuple(function_return)\n else:\n return function_return[0]\n\n except Exception as e:\n # the below checks if we are inside FunctionWrapper being called from another FunctionWrapper\n # if that is the case we try and give a more helpful error\n # if anything goes wrong in our frame inspection or we find that we aren't in a chained\n # call we raise the previous exception\n err = \"\"\n try:\n import inspect\n\n frames = inspect.getouterframes(inspect.currentframe())\n # the first entry is the current call so check if any of the earlier callees are a __call__ from a FunctionWrapper\n for frame in frames[1:]:\n if frame.function == \"popcon_ufunc_wrapped_call\":\n f = frames[1]\n err = \"Calling `wraps_ufunc` decorated function from within `wraps_ufunc` decorated function is not allowed!\\n\"\n err += f\"Error at {f.filename}:{f.lineno}\\n\"\n err += \"\\n\".join(f.code_context) if f.code_context else \"\"\n err += f\"Try using `{frames[0].frame.f_locals['func'].__name__}.unitless_func(...)` instead.\"\n break\n except Exception:\n # error while determining if we are withing a chained FunctionWrapper so re-raise original error\n raise e from None\n\n # if err is not empty we have determined we are within a chained call so we raise a better error\n if err:\n raise RuntimeError(err) from None\n else:\n raise e\n\n # more meaningfull alias to the scalar non-unit version of the function\n popcon_ufunc_wrapped_call.unitless_func = popcon_ufunc_wrapped_call.__wrapped__ # type:ignore[attr-defined]\n popcon_ufunc_wrapped_call.__signature__ = _make_new_sig(func_signature, input_units, return_units) # type:ignore[attr-defined]\n return popcon_ufunc_wrapped_call\n\n return _wraps_ufunc" }, { "identifier": "Quantity", "path": "cfspopcon/unit_handling/setup_unit_handling.py", "snippet": "def suppress_downcast_warning(func: Callable[Params, Ret]) -> Callable[Params, Ret]:\n def wrapper(*args: Params.args, **kwargs: Params.kwargs) -> Ret:\ndef convert_units(array: xr.DataArray, units: Union[str, pint.Unit]) -> xr.DataArray:\ndef convert_units(array: pint.Quantity, units: Union[str, pint.Unit]) -> pint.Quantity:\ndef convert_units(array: Union[xr.DataArray, pint.Quantity], units: Any) -> Union[xr.DataArray, pint.Quantity]:\ndef magnitude(array: Union[xr.DataArray, pint.Quantity]) -> Union[npt.NDArray[np.float32], float]:\ndef dimensionless_magnitude(array: Union[xr.DataArray, pint.Quantity]) -> Union[npt.NDArray[np.float32], float]:" }, { "identifier": "integrate_profile_over_volume", "path": "cfspopcon/formulas/helpers.py", "snippet": "def integrate_profile_over_volume(\n array_per_m3: NDArray[float64],\n rho: NDArray[float64],\n plasma_volume: float,\n) -> float:\n \"\"\"Approximate the volume integral of a profile given as a function of rho.\n\n Args:\n array_per_m3: a profile of values [units * m^-3]\n rho: [~] :term:`glossary link<rho>`\n plasma_volume: [m^3] :term:`glossary link<plasma_volume>`\n\n Returns:\n volume_integrated_value [units]\n \"\"\"\n drho = rho[1] - rho[0]\n return float(np.sum(array_per_m3 * 2.0 * rho * drho) * plasma_volume)" } ]
import warnings import numpy as np from numpy import float64 from numpy.typing import NDArray from ...named_options import Impurity from ...unit_handling import Quantity, ureg, wraps_ufunc from ..helpers import integrate_profile_over_volume
8,147
] ) elif impurity_Z == 7: # Nitrogen temperature_bin_borders = np.array([1.0, 10.0, 30.0, 100.0, 300.0, 1000.0, 15000.0]) radc = np.array( [ [-3.5312e01, -5.8692e01, -2.0301e01, -7.7571e01, -2.9401e01, -2.7201e01], [7.1926e00, 6.8148e01, -8.8594e00, 5.0488e01, -3.8191e-01, -4.4640e00], [7.8200e-03, 3.6209e-01, 6.0500e00, -6.5889e00, 3.5270e00, 7.6960e-01], [-3.5696e00, -5.4257e01, -2.7129e00, -1.8187e01, -1.0347e00, 9.2450e-01], [-1.2800e-02, 1.4835e-01, -7.6700e00, 6.8691e00, -2.4192e00, -6.7720e-01], [1.1180e-02, -1.4700e-03, 1.0705e-01, 8.3119e-01, 3.2269e-01, 2.6185e-01], [3.5812e-01, 1.3476e01, 1.9691e00, 2.0259e00, 2.2501e-01, -5.6280e-02], [-2.5100e-03, -2.9646e-01, 2.3943e00, -1.7572e00, 3.9511e-01, 1.2014e-01], [-2.2020e-02, 2.2706e-01, 1.4088e-01, -2.9376e-01, 2.6510e-02, 4.6870e-02], [-1.0000e-03, 5.4220e-02, 4.7450e-02, 1.7200e-02, 7.8930e-02, 7.9250e-02], ] ) elif impurity_Z == 8: # Oxygen temperature_bin_borders = np.array([1.0, 10.0, 30.0, 100.0, 300.0, 1000.0, 15000.0]) radc = np.array( [ [-3.6208e01, -2.9057e01, -2.9370e01, -4.4120e-02, -3.7073e01, -2.5037e01], [7.5487e00, -1.5228e01, 8.7451e00, -5.4918e01, 7.8826e00, -5.7568e00], [2.3340e-02, -3.1460e00, 6.3827e00, -9.5003e00, 3.7999e00, 1.2973e00], [-2.1983e00, 2.0826e01, -1.2357e01, 2.8883e01, -3.8006e00, 1.2040e00], [-1.0131e-01, 5.9427e00, -7.6451e00, 8.5536e00, -2.2619e00, -9.1955e-01], [8.0600e-03, 1.0610e-01, -2.2230e-02, 5.5336e-01, 5.0270e-01, 2.8988e-01], [-6.5108e-01, -8.0843e00, 3.4958e00, -4.8731e00, 5.2144e-01, -7.6780e-02], [8.4570e-02, -2.6827e00, 2.2661e00, -1.9172e00, 3.0219e-01, 1.4568e-01], [-2.1710e-02, 1.0350e-02, 2.5727e-01, -1.5709e-01, -6.6330e-02, 3.9250e-02], [-2.1200e-03, 2.6480e-02, 7.7800e-02, 1.6370e-02, 6.1140e-02, 8.3010e-02], ] ) elif impurity_Z == 10: # Neon temperature_bin_borders = np.array([1.0, 10.0, 70.0, 300.0, 1000.0, 3000.0, 15000.0]) radc = np.array( [ [-3.8610e01, -3.6822e01, -6.6901e00, -1.1261e02, -2.6330e02, -1.1174e02], [1.2606e01, 4.9706e00, -2.4212e01, 8.5765e01, 2.1673e02, 6.1907e01], [1.7866e-01, -1.5334e00, 7.3589e00, -2.1093e00, 1.2973e00, 4.7967e00], [-1.0213e01, 1.1973e00, 5.7352e00, -3.0372e01, -6.7799e01, -1.6289e01], [-7.7051e-01, 2.7279e00, -7.4602e00, 2.2928e00, -7.3310e-01, -2.5731e00], [2.7510e-02, 9.0090e-02, -7.9030e-02, 7.7055e-01, 4.4883e-01, 4.2620e-01], [4.3390e00, -1.3992e00, -8.5020e-02, 3.5346e00, 7.0398e00, 1.4263e00], [6.4207e-01, -1.1084e00, 1.8679e00, -5.6062e-01, 9.3190e-02, 3.3443e-01], [-3.3560e-02, 1.3620e-02, 2.2507e-01, -1.8569e-01, -1.5390e-02, -9.3734e-04], [-1.3333e-04, 2.4300e-02, 7.1420e-02, 3.7550e-02, 7.7660e-02, 8.4220e-02], ] ) elif impurity_Z == 18: # Argon temperature_bin_borders = np.array([1.0, 10.0, 50.0, 150.0, 500.0, 1500.0, 10000.0]) radc = np.array( [ [-3.6586e01, -4.8732e01, -2.3157e01, -6.8134e01, 5.5851e01, -6.2758e01], [1.2841e01, 3.8185e01, -8.5132e00, 3.6408e01, -7.8618e01, 2.5163e01], [2.3080e-02, -7.0622e-01, 1.5617e00, -7.3868e00, 1.0520e01, -7.4717e-01], [-1.2087e01, -2.5859e01, 1.5478e00, -1.0735e01, 2.2871e01, -6.8170e00], [-9.8000e-03, 1.2850e00, -1.8880e00, 6.8800e00, -7.7061e00, 6.9486e-01], [-2.4600e-03, -6.8710e-02, 2.2830e-01, 3.1142e-01, -1.8530e-01, 4.6946e-01], [4.8823e00, 5.4372e00, 2.8279e-01, 8.0440e-01, -2.1616e00, 5.9969e-01], [-3.7470e-02, -5.2157e-01, 5.5767e-01, -1.5740e00, 1.4123e00, -1.3487e-01], [1.1100e-03, 1.4016e-01, -9.9600e-02, -9.9180e-02, 1.8409e-01, -8.1380e-02], [1.1100e-03, 1.9120e-02, -1.5280e-02, 9.4500e-03, 6.7470e-02, 2.5840e-02], ] ) else: raise RuntimeError("This should never happen, please ensure all impurity cases in zimp array are covered!") # If trying to evaluate for a temperature outside of the given range, assume nearest neighbor # and throw a warning if any(electron_temp_profile < temperature_bin_borders[0]) or any( electron_temp_profile > temperature_bin_borders[-1] ): # pragma: no cover warnings.warn( f"Mavrin 2017 line radiation calculation is only valid between {temperature_bin_borders[0]}eV-{temperature_bin_borders[-1]}eV. Using nearest neighbor extrapolation.", stacklevel=3, ) electron_temp_profile = np.maximum(electron_temp_profile, temperature_bin_borders[0]) electron_temp_profile = np.minimum(electron_temp_profile, temperature_bin_borders[-1]) # solve for radiated power ne_tau_i_per_m3 = electron_density_profile * tau_i X_vals = np.log10(electron_temp_profile) Y_vals = np.log10(ne_tau_i_per_m3 / 1e19) if np.any(Y_vals > 0.0): # pragma: no cover warnings.warn("Warning: treating points with ne_tau_i_per_m3 > 1e19 m^-3 s as coronal.", stacklevel=3) Y_vals = np.minimum(Y_vals, 0.0) log10_Lz = np.zeros(electron_temp_profile.size) for i, Te_test in enumerate(electron_temp_profile): X, Y = X_vals[i], Y_vals[i] for j in range(temperature_bin_borders.size - 1): Te_min, Te_max = temperature_bin_borders[j], temperature_bin_borders[j + 1] if Te_min <= Te_test <= Te_max: log10_Lz[i] = ( radc[0, j] + radc[1, j] * X + radc[2, j] * Y + radc[3, j] * X**2 + radc[4, j] * X * Y + radc[5, j] * Y**2 + radc[6, j] * X**3 + radc[7, j] * X**2 * Y + radc[8, j] * X * Y**2 + radc[9, j] * Y**3 ) continue radrate = 10.0**log10_Lz qRad = radrate * electron_density_profile * electron_density_profile * impurity_concentration # W / (m^3 s)
"""Calculate the radiated power due to impurities, according to an analytical fitted curve from Mavrin 2017.""" @wraps_ufunc( return_units=dict(radiated_power=ureg.MW), input_units=dict( rho=ureg.dimensionless, electron_temp_profile=ureg.keV, electron_density_profile=ureg.n19, tau_i=ureg.s, impurity_concentration=ureg.dimensionless, impurity_species=None, plasma_volume=ureg.m**3, ), input_core_dims=[("dim_rho",), ("dim_rho",), ("dim_rho",), (), (), (), ()], ) def calc_impurity_radiated_power_mavrin_noncoronal( # noqa: PLR0912 rho: NDArray[float64], electron_temp_profile: NDArray[float64], electron_density_profile: NDArray[float64], tau_i: Quantity, impurity_concentration: float, impurity_species: Impurity, plasma_volume: float, ) -> float: """Calculation of radiated power, using fits from A.A. Mavrin's 2017 paper. "Radiative Cooling Rates for Low-Z Impurities in Non-coronal Equilibrium State." :cite:`mavrin_radiative_2017` Args: rho: [~] :term:`glossary link<rho>` electron_temp_profile: [keV] :term:`glossary link<electron_temp_profile>` electron_density_profile: [1e19 m^-3] :term:`glossary link<electron_density_profile>` tau_i: [s] :term:`glossary link<tau_i>` impurity_concentration: [~] :term:`glossary link<impurity_concentration>` impurity_species: [] :term:`glossary link<impurity_species>` plasma_volume: [m^3] :term:`glossary link<plasma_volume>` Returns: [MW] Estimated radiation power due to this impurity """ impurity_Z = impurity_species.value # He, Li, Be, C, N, O, Ne, Ar zimp = np.array([2, 3, 4, 6, 7, 8, 10, 18]) if impurity_Z not in zimp: # pragma: no cover warnings.warn(f"Mavrin 2017 line radiation calculation not supported for impurity with Z={impurity_Z}", stacklevel=3) return np.nan # L_z coefficients for the 11 supported impurities if impurity_Z == 2: # Helium temperature_bin_borders = np.array([1.0, 3.0, 10.0, 30.0, 100.0, 15000.0]) radc = np.array( [ [-3.9341e01, -2.7185e01, -3.4950e01, -3.1299e01, -3.3203e01], [2.2742e01, -3.4465e01, 5.5957e00, -4.4749e00, -2.3306e00], [-8.5940e-02, 3.2223e-01, 2.1542e00, 2.9614e-01, -5.3911e-01], [-2.5420e01, 5.0933e01, -7.4762e00, 1.5259e00, 7.2592e-01], [1.8843e00, 1.0589e-01, -3.7391e00, -6.1433e-01, 9.7550e-02], [-3.5681e-01, 1.1632e-01, 1.4444e-01, 3.2651e-01, 2.6917e-01], [-3.2771e00, -2.3641e01, 2.4534e00, -1.6652e-01, -6.6110e-02], [-4.9766e00, -7.4782e-01, 1.5000e00, 1.5704e-01, 8.9900e-03], [1.9730e-02, -7.6200e-03, 2.1307e-01, -8.0601e-04, 2.9240e-02], [-7.4260e-02, 2.1030e-02, 7.6590e-02, 5.0330e-02, 5.1180e-02], ] ) elif impurity_Z == 3: # Lithium temperature_bin_borders = np.array([1.0, 7.0, 30.0, 60.0, 100.0, 1000.0, 10000.0]) radc = np.array( [ [-3.5752e01, -3.1170e01, -3.6558e01, -3.0560e01, -3.0040e01, -3.4199e01], [-1.6780e00, -1.6918e01, 9.4272e00, -2.4680e00, -4.2963e00, -8.5686e-01], [9.5500e-03, 1.1481e-01, 3.5299e00, 1.7912e00, 2.7407e-01, -6.3246e-01], [-6.1560e00, 2.0492e01, -8.1056e00, -2.8659e-01, 1.1569e00, 2.4968e-01], [-1.5027e00, 2.6136e-01, -4.4113e00, -1.9929e00, -4.5453e-01, 9.9930e-02], [2.5568e-01, 2.4870e-01, 5.1430e-02, 2.8150e-01, 3.0616e-01, 2.5080e-01], [1.1009e01, -7.0035e00, 1.9427e00, 2.3898e-01, -9.1510e-02, -1.7230e-02], [2.1169e00, -3.3910e-01, 1.3459e00, 5.0412e-01, 9.7550e-02, 1.4410e-02], [-9.6420e-02, -3.5570e-02, 2.3865e-01, 5.8550e-02, 1.6540e-02, 3.7030e-02], [1.3460e-02, 4.1910e-02, 8.6850e-02, 6.7410e-02, 5.4690e-02, 5.5670e-02], ] ) elif impurity_Z == 4: # Beryllium temperature_bin_borders = np.array([0.2, 0.7, 3.0, 11.0, 45.0, 170.0, 10000.0]) radc = np.array( [ [-3.0242e01, -3.2152e01, -3.0169e01, -3.7201e01, -4.0868e01, -2.8539e01], [2.1405e01, 3.1572e00, -8.9830e00, -2.5643e00, 1.4625e01, -5.0020e00], [1.0117e-01, 1.4168e-01, 6.3656e-01, -4.0467e00, 3.3373e00, 3.1089e-01], [2.7450e01, -1.4617e01, 4.5232e00, 7.1732e00, -8.8128e00, 1.3149e00], [8.8367e-01, 1.4646e-01, -1.5126e00, 5.8147e00, -3.1064e00, -4.0022e-01], [-6.6110e-02, 1.4683e-01, 4.0756e-01, 4.0114e-01, 2.4343e-01, 3.1788e-01], [3.0202e01, 4.3653e00, -3.7497e-01, -2.5926e00, 1.5996e00, -1.0780e-01], [1.2175e00, -1.1290e00, 7.2552e-01, -2.0708e00, 6.8069e-01, 7.3280e-02], [-1.4883e-01, 3.4914e-01, -2.9810e-02, -1.4775e-01, 6.0120e-02, 1.7320e-02], [4.8900e-03, 4.1730e-02, 5.5620e-02, 2.1900e-02, 6.8350e-02, 6.1360e-02], ] ) elif impurity_Z == 6: # Carbon temperature_bin_borders = np.array([1.0, 7.0, 20.0, 70.0, 200.0, 700.0, 15000.0]) radc = np.array( [ [-3.4509e01, -4.9228e01, -1.9100e01, -6.7743e01, -2.4016e01, -2.8126e01], [6.7599e00, 5.3922e01, -1.5476e01, 4.1606e01, -7.3974e00, -4.1679e00], [-1.7140e-02, 8.4584e-01, 4.2962e00, -5.3665e00, 2.9707e00, 4.9937e-01], [-4.0337e00, -5.1128e01, 2.1893e00, -1.5734e01, 1.6859e00, 9.0578e-01], [1.5517e-01, -8.9366e-01, -6.1658e00, 6.1760e00, -2.1965e00, -5.3687e-01], [2.1110e-02, -2.2710e-02, 1.6098e-01, 7.8010e-01, 3.0521e-01, 2.5962e-01], [6.5977e-01, 1.4758e01, 1.1021e00, 1.7905e00, -1.1147e-01, -5.8310e-02], [-1.7392e-01, 1.6371e-01, 2.1568e00, -1.7320e00, 3.8653e-01, 1.0420e-01], [-2.9270e-02, 2.9362e-01, 1.1101e-01, -2.7897e-01, 3.8970e-02, 4.6610e-02], [1.7600e-03, 5.5880e-02, 4.2700e-02, 2.3450e-02, 7.8690e-02, 7.3950e-02], ] ) elif impurity_Z == 7: # Nitrogen temperature_bin_borders = np.array([1.0, 10.0, 30.0, 100.0, 300.0, 1000.0, 15000.0]) radc = np.array( [ [-3.5312e01, -5.8692e01, -2.0301e01, -7.7571e01, -2.9401e01, -2.7201e01], [7.1926e00, 6.8148e01, -8.8594e00, 5.0488e01, -3.8191e-01, -4.4640e00], [7.8200e-03, 3.6209e-01, 6.0500e00, -6.5889e00, 3.5270e00, 7.6960e-01], [-3.5696e00, -5.4257e01, -2.7129e00, -1.8187e01, -1.0347e00, 9.2450e-01], [-1.2800e-02, 1.4835e-01, -7.6700e00, 6.8691e00, -2.4192e00, -6.7720e-01], [1.1180e-02, -1.4700e-03, 1.0705e-01, 8.3119e-01, 3.2269e-01, 2.6185e-01], [3.5812e-01, 1.3476e01, 1.9691e00, 2.0259e00, 2.2501e-01, -5.6280e-02], [-2.5100e-03, -2.9646e-01, 2.3943e00, -1.7572e00, 3.9511e-01, 1.2014e-01], [-2.2020e-02, 2.2706e-01, 1.4088e-01, -2.9376e-01, 2.6510e-02, 4.6870e-02], [-1.0000e-03, 5.4220e-02, 4.7450e-02, 1.7200e-02, 7.8930e-02, 7.9250e-02], ] ) elif impurity_Z == 8: # Oxygen temperature_bin_borders = np.array([1.0, 10.0, 30.0, 100.0, 300.0, 1000.0, 15000.0]) radc = np.array( [ [-3.6208e01, -2.9057e01, -2.9370e01, -4.4120e-02, -3.7073e01, -2.5037e01], [7.5487e00, -1.5228e01, 8.7451e00, -5.4918e01, 7.8826e00, -5.7568e00], [2.3340e-02, -3.1460e00, 6.3827e00, -9.5003e00, 3.7999e00, 1.2973e00], [-2.1983e00, 2.0826e01, -1.2357e01, 2.8883e01, -3.8006e00, 1.2040e00], [-1.0131e-01, 5.9427e00, -7.6451e00, 8.5536e00, -2.2619e00, -9.1955e-01], [8.0600e-03, 1.0610e-01, -2.2230e-02, 5.5336e-01, 5.0270e-01, 2.8988e-01], [-6.5108e-01, -8.0843e00, 3.4958e00, -4.8731e00, 5.2144e-01, -7.6780e-02], [8.4570e-02, -2.6827e00, 2.2661e00, -1.9172e00, 3.0219e-01, 1.4568e-01], [-2.1710e-02, 1.0350e-02, 2.5727e-01, -1.5709e-01, -6.6330e-02, 3.9250e-02], [-2.1200e-03, 2.6480e-02, 7.7800e-02, 1.6370e-02, 6.1140e-02, 8.3010e-02], ] ) elif impurity_Z == 10: # Neon temperature_bin_borders = np.array([1.0, 10.0, 70.0, 300.0, 1000.0, 3000.0, 15000.0]) radc = np.array( [ [-3.8610e01, -3.6822e01, -6.6901e00, -1.1261e02, -2.6330e02, -1.1174e02], [1.2606e01, 4.9706e00, -2.4212e01, 8.5765e01, 2.1673e02, 6.1907e01], [1.7866e-01, -1.5334e00, 7.3589e00, -2.1093e00, 1.2973e00, 4.7967e00], [-1.0213e01, 1.1973e00, 5.7352e00, -3.0372e01, -6.7799e01, -1.6289e01], [-7.7051e-01, 2.7279e00, -7.4602e00, 2.2928e00, -7.3310e-01, -2.5731e00], [2.7510e-02, 9.0090e-02, -7.9030e-02, 7.7055e-01, 4.4883e-01, 4.2620e-01], [4.3390e00, -1.3992e00, -8.5020e-02, 3.5346e00, 7.0398e00, 1.4263e00], [6.4207e-01, -1.1084e00, 1.8679e00, -5.6062e-01, 9.3190e-02, 3.3443e-01], [-3.3560e-02, 1.3620e-02, 2.2507e-01, -1.8569e-01, -1.5390e-02, -9.3734e-04], [-1.3333e-04, 2.4300e-02, 7.1420e-02, 3.7550e-02, 7.7660e-02, 8.4220e-02], ] ) elif impurity_Z == 18: # Argon temperature_bin_borders = np.array([1.0, 10.0, 50.0, 150.0, 500.0, 1500.0, 10000.0]) radc = np.array( [ [-3.6586e01, -4.8732e01, -2.3157e01, -6.8134e01, 5.5851e01, -6.2758e01], [1.2841e01, 3.8185e01, -8.5132e00, 3.6408e01, -7.8618e01, 2.5163e01], [2.3080e-02, -7.0622e-01, 1.5617e00, -7.3868e00, 1.0520e01, -7.4717e-01], [-1.2087e01, -2.5859e01, 1.5478e00, -1.0735e01, 2.2871e01, -6.8170e00], [-9.8000e-03, 1.2850e00, -1.8880e00, 6.8800e00, -7.7061e00, 6.9486e-01], [-2.4600e-03, -6.8710e-02, 2.2830e-01, 3.1142e-01, -1.8530e-01, 4.6946e-01], [4.8823e00, 5.4372e00, 2.8279e-01, 8.0440e-01, -2.1616e00, 5.9969e-01], [-3.7470e-02, -5.2157e-01, 5.5767e-01, -1.5740e00, 1.4123e00, -1.3487e-01], [1.1100e-03, 1.4016e-01, -9.9600e-02, -9.9180e-02, 1.8409e-01, -8.1380e-02], [1.1100e-03, 1.9120e-02, -1.5280e-02, 9.4500e-03, 6.7470e-02, 2.5840e-02], ] ) else: raise RuntimeError("This should never happen, please ensure all impurity cases in zimp array are covered!") # If trying to evaluate for a temperature outside of the given range, assume nearest neighbor # and throw a warning if any(electron_temp_profile < temperature_bin_borders[0]) or any( electron_temp_profile > temperature_bin_borders[-1] ): # pragma: no cover warnings.warn( f"Mavrin 2017 line radiation calculation is only valid between {temperature_bin_borders[0]}eV-{temperature_bin_borders[-1]}eV. Using nearest neighbor extrapolation.", stacklevel=3, ) electron_temp_profile = np.maximum(electron_temp_profile, temperature_bin_borders[0]) electron_temp_profile = np.minimum(electron_temp_profile, temperature_bin_borders[-1]) # solve for radiated power ne_tau_i_per_m3 = electron_density_profile * tau_i X_vals = np.log10(electron_temp_profile) Y_vals = np.log10(ne_tau_i_per_m3 / 1e19) if np.any(Y_vals > 0.0): # pragma: no cover warnings.warn("Warning: treating points with ne_tau_i_per_m3 > 1e19 m^-3 s as coronal.", stacklevel=3) Y_vals = np.minimum(Y_vals, 0.0) log10_Lz = np.zeros(electron_temp_profile.size) for i, Te_test in enumerate(electron_temp_profile): X, Y = X_vals[i], Y_vals[i] for j in range(temperature_bin_borders.size - 1): Te_min, Te_max = temperature_bin_borders[j], temperature_bin_borders[j + 1] if Te_min <= Te_test <= Te_max: log10_Lz[i] = ( radc[0, j] + radc[1, j] * X + radc[2, j] * Y + radc[3, j] * X**2 + radc[4, j] * X * Y + radc[5, j] * Y**2 + radc[6, j] * X**3 + radc[7, j] * X**2 * Y + radc[8, j] * X * Y**2 + radc[9, j] * Y**3 ) continue radrate = 10.0**log10_Lz qRad = radrate * electron_density_profile * electron_density_profile * impurity_concentration # W / (m^3 s)
radiated_power = integrate_profile_over_volume(qRad, rho, plasma_volume) # [W]
3
2023-10-19 16:58:23+00:00
12k
GXimingLu/IPA
main.py
[ { "identifier": "get_args", "path": "arguments.py", "snippet": "def get_args():\n parser = argparse.ArgumentParser(description='RL')\n\n # dataset\n parser.add_argument(\n '--output-dir', type=str, default=f'{HOME_PATH}/commonGen')\n parser.add_argument(\n '--dataset-train', type=str, default=f'{HOME_PATH}/data/commongen/train.json',\n help='JSON file containing train prompts. Each item contains \"prompt\", \"response\".')\n parser.add_argument(\n '--dataset-val', type=str, default=f'{HOME_PATH}/data/commongen/val.json',\n help='JSON file containing dev prompts. Each item contains \"prompt\", \"response\".')\n\n # reward\n parser.add_argument(\n '--n_extra_tokens', type=int, default=5, help='number of reward categorization')\n parser.add_argument(\n '--sample-interval', type=int, default=750, help='step interval to sample from current policy')\n parser.add_argument(\n '--horizon', type=float, default=2500, help='horizon value in adaptive controller')\n parser.add_argument(\n '--reward_batch_size', type=int, default=16, help='batch size')\n parser.add_argument(\n '--binary_coverage', action='store_true', default=False, help='whether to use binary_coverage')\n\n # KL term\n parser.add_argument(\n '--kl_coef', type=float, default=0.0, help='coefficient for KL term in reward')\n parser.add_argument(\n '--adaptive_kl', action='store_true', default=False, help='whether to use adaptive KL controller')\n parser.add_argument(\n '--target_kl', type=float, default=3, help='target value in adaptive KL controller')\n # entropy term\n parser.add_argument(\n '--entropy_coef', type=float, default=0.0, help='coefficient for entropy term in reward')\n parser.add_argument(\n '--adaptive_entropy', action='store_true', default=False, help='whether to use adaptive entropy controller')\n parser.add_argument(\n '--target_entropy', type=float, default=40, help='target value in adaptive entropy controller')\n\n # policy\n parser.add_argument(\n '--base_model_name', type=str, default='gpt2-xl', help='language model as the base policy.')\n parser.add_argument(\n '--base_model_checkpoint', type=str, default=\"PATH_TO_DISTILLED_GPT3\", help='base policy initialization')\n parser.add_argument(\n '--value_model_name', type=str, default='gpt2-large', help='language model as the value function.')\n parser.add_argument(\n '--alpha', type=float, default=1.0, help='co-efficient to combine policy and value model.')\n parser.add_argument(\n '--response-length', type=int, default=64, help='number of tokens to generate for each prompt.')\n parser.add_argument(\n '--temperature', type=float, default=1.0, help='temperature for sampling policy.')\n parser.add_argument(\n '--gpt3_calibrate', action='store_true', default=False, help='calibrate to adapt gpt3 logprobs')\n\n # training\n parser.add_argument(\n '--total-episodes', type=int, default=2000000, help='total number of episodes')\n parser.add_argument(\n '--batch_size', type=int, default=64, help='batch size')\n parser.add_argument(\n '--grad_accum', type=int, default=2, help='gradient accumulation steps')\n parser.add_argument(\n '--lr', type=float, default=1e-5, help='learning rate')\n parser.add_argument(\n '--num_warmup_steps', type=int, default=500, help='number of warmup steps in lr scheduler')\n parser.add_argument(\n '--clip_grad', action='store_true', default=False, help='whether to clip gradient')\n parser.add_argument(\n '--max-grad-norm', type=float, default=0.5, help='maximum norm of gradients ')\n\n # generation\n parser.add_argument(\n '--num-samples', type=int, default=1, help='number of samples to generate for each prompt.')\n parser.add_argument(\n '--top-p', type=float, default=0.6, help='hyperparameter for nucleus sampling')\n parser.add_argument(\n '--hard_prob', type=float, default=0.75, help='whether to use hard constraint in decoding')\n parser.add_argument(\n '--force_eos', action='store_true', default=False, help='not to generate eos until all constraints satisfied')\n\n # other\n parser.add_argument(\n '--seed', type=int, default=1, help='random seed (default: 1)')\n parser.add_argument(\n '--log-interval', type=int, default=200, help='step interval to print out logs')\n parser.add_argument(\n '--save-interval', type=int, default=500, help='step interval to save model checkpoints')\n parser.add_argument(\n '--min_save_step', type=int, default=8000, help='minimal steps before saving model checkpoints')\n parser.add_argument(\n '--max_save_step', type=int, default=15000, help='maximal steps for saving model checkpoints')\n parser.add_argument(\n '--eval-interval', type=int, default=500, help='step interval to do evaluation')\n parser.add_argument(\n '--cuda-deterministic', action='store_false', default=True,\n help=\"sets flags for determinism when using CUDA (potentially slow!)\")\n\n parser.add_argument(\n '--resume', type=str, default=None, help='directory to resume generation')\n\n args = parser.parse_args()\n args.cuda = torch.cuda.is_available()\n\n return args" }, { "identifier": "Policy", "path": "policy.py", "snippet": "class Policy:\n def __init__(self, base_model_name, base_model_checkpoint, value_model_name, device, tree_tokens,\n alpha, calibrate, force_eos):\n self.device = device\n self.base_model = GPT2LMHeadModel.from_pretrained(base_model_name)\n self.base_model.load_state_dict(base_model_checkpoint)\n self.value_model = GPT2LMHeadModel.from_pretrained(value_model_name)\n\n self.tokenizer = GPT2Tokenizer.from_pretrained(base_model_name, pad_token=\"<|endoftext|>\")\n self.base_model.config.pad_token_id = self.tokenizer.pad_token_id\n self.value_model.config.pad_token_id = self.tokenizer.pad_token_id\n\n self.tokenizer.add_tokens(tree_tokens, special_tokens=True)\n\n weights = self.value_model.get_input_embeddings().weight.detach().numpy()\n mean_weights, std_weights = np.mean(weights, axis=0), np.std(weights, axis=0)\n new_inits = np.vstack([np.random.normal(loc=mean_weights, scale=std_weights) for _ in tree_tokens])\n\n self.base_model.resize_token_embeddings(len(self.tokenizer))\n self.value_model.resize_token_embeddings(len(self.tokenizer))\n with torch.no_grad():\n new_inits = torch.tensor(new_inits)\n self.value_model.get_input_embeddings().weight[-len(tree_tokens):, :] = new_inits\n\n self.base_model = self.base_model.to(self.device)\n self.base_model.parallelize()\n self.value_model = self.value_model.to(self.device)\n self.value_model.parallelize()\n\n self.best_cat = tree_tokens[0]\n self.best_cat_id = self.tokenizer.convert_tokens_to_ids(self.best_cat)\n\n self.alpha = alpha\n self.base_model.eval()\n for param in self.base_model.parameters():\n param.requires_grad = False\n self.calibrate = calibrate\n\n self.eos_tokens = None\n if force_eos:\n self.eos_tokens = self.tokenizer.convert_tokens_to_ids(['.', 'Ġ.', '!', 'Ġ!'])\n\n def sample(self,\n prompts: Union[str, List[str]] = None,\n input_ids: torch.Tensor = None,\n attention_mask: torch.Tensor = None,\n constraints: List[ConstrainedHypothesis] = None,\n max_len: int = 64,\n min_len: int = 16,\n sample: bool = True,\n top_k: int = None,\n top_p: float = None,\n temperature: float = None,\n use_control_code: bool = False) -> Dict[str, Union[torch.Tensor, List[str]]]:\n\n use_constraints = constraints is not None\n if use_constraints:\n constraints = init_batch([json.loads(x) for x in constraints], self.eos_tokens)\n\n if prompts is not None:\n assert input_ids is None and attention_mask is None, 'repeated input'\n if isinstance(prompts, str):\n prompts = [prompts]\n\n encodings_dict = self.tokenizer(prompts, return_tensors=\"pt\", padding=True)\n input_ids = encodings_dict['input_ids'].to(self.device)\n attention_mask = encodings_dict['attention_mask'].to(self.device)\n\n else:\n input_ids = input_ids.to(self.device)\n attention_mask = attention_mask.to(self.device)\n\n model_kwargs = {'attention_mask': attention_mask}\n batch_size, input_seq_len = input_ids.shape\n\n value_input_ids, value_attention_mask = add_control_code(input_ids, attention_mask, self.best_cat_id)\n value_model_kwargs = {'attention_mask': value_attention_mask}\n\n logits_warper = self.base_model._get_logits_warper(\n top_k=top_k, top_p=top_p, temperature=temperature, num_beams=1\n )\n\n unfinished_sequences = torch.ones(batch_size, dtype=torch.long, device=self.device)\n output_logprob = torch.zeros([batch_size, 0], dtype=torch.float, device=self.device)\n output_mask = torch.ones([batch_size, 0], dtype=torch.long, device=self.device)\n\n self.value_model.eval()\n with torch.no_grad():\n for step in range(max_len):\n\n outputs, next_token_logits = get_model_output(self.base_model, step, input_ids, attention_mask, model_kwargs)\n\n # get logit from value model\n if use_control_code:\n value_outputs, value_next_token_logits = get_model_output(self.value_model, step, value_input_ids,\n value_attention_mask, value_model_kwargs)\n if self.calibrate:\n next_token_logits = F.log_softmax(next_token_logits)\n next_token_logits = next_token_logits + self.alpha * value_next_token_logits\n\n if step < min_len:\n next_token_logits[:, self.base_model.config.eos_token_id] = float('-inf')\n if use_constraints:\n for i, constraint in enumerate(constraints):\n for bad_word in constraint.avoid():\n next_token_logits[i, bad_word] = float('-inf')\n log_prob = F.log_softmax(next_token_logits, dim=-1)\n\n if sample:\n # Temperature (higher temperature => more likely to sample low probability tokens)\n next_token_scores = logits_warper(input_ids, next_token_logits)\n probs = F.softmax(next_token_scores, dim=-1)\n next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)\n else:\n # Greedy decoding\n next_tokens = torch.argmax(next_token_logits, dim=-1)\n\n # finished sentences should have their next token be a padding token\n next_tokens = next_tokens * unfinished_sequences + self.tokenizer.pad_token_id * (1 - unfinished_sequences)\n\n # update output mask\n output_mask = torch.cat([output_mask, unfinished_sequences[:, None]], dim=-1)\n # update output log probability\n token_logprob = torch.gather(log_prob, 1, next_tokens[:, None]).squeeze(1)\n token_logprob = token_logprob * unfinished_sequences + NEGATIVE_INF * (1 - unfinished_sequences)\n output_logprob = torch.cat([output_logprob, token_logprob[:, None]], dim=-1)\n\n # update generated ids, model inputs for next step\n input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)\n model_kwargs = self.base_model._update_model_kwargs_for_generation(\n outputs, model_kwargs, is_encoder_decoder=self.base_model.config.is_encoder_decoder\n )\n\n if use_constraints:\n constraints = [c.advance(t) for c, t in zip(constraints, next_tokens.tolist())]\n\n if use_control_code:\n value_input_ids = torch.cat([value_input_ids, next_tokens[:, None]], dim=-1)\n value_model_kwargs = self.value_model._update_model_kwargs_for_generation(\n value_outputs, value_model_kwargs, is_encoder_decoder=self.value_model.config.is_encoder_decoder\n )\n\n # if eos_token was found in one sentence, set sentence to finished\n unfinished_sequences = unfinished_sequences.mul((next_tokens != self.tokenizer.eos_token_id).long())\n\n if unfinished_sequences.max() == 0:\n break\n\n response_ids = input_ids[:, input_seq_len:]\n response_text = [self.tokenizer.decode(output, skip_special_tokens=True, clean_up_tokenization_spaces=True)\n for output in response_ids]\n response_text = [process_generation(t) for t in response_text]\n\n prompt_ids = input_ids[:, :input_seq_len]\n if prompts is None:\n prompts = [self.tokenizer.decode(query, skip_special_tokens=True, clean_up_tokenization_spaces=True)\n for query in prompt_ids]\n\n return {\n 'query/input_ids': prompt_ids,\n 'query/text': prompts,\n 'query/mask': attention_mask,\n 'response/input_ids': response_ids,\n 'response/text': response_text,\n 'response/mask': output_mask,\n 'response/log_prob': output_logprob,\n }\n\n def forward_pass(self,\n query_input_ids: torch.Tensor,\n query_mask: torch.Tensor,\n response_input_ids: torch.Tensor,\n response_mask: torch.Tensor,\n use_control_code: bool = False):\n\n query_input_ids = query_input_ids.to(self.device)\n query_mask = query_mask.to(self.device)\n response_input_ids = response_input_ids.to(self.device)\n response_mask = response_mask.to(self.device)\n\n if use_control_code:\n value_query_input_ids, value_query_mask = query_input_ids, query_mask\n query_input_ids, query_mask = remove_control_code(query_input_ids, query_mask)\n\n logits = get_response_logits(self.base_model, query_input_ids, response_input_ids, query_mask, response_mask)\n\n if use_control_code:\n value_logits = get_response_logits(self.value_model, value_query_input_ids, response_input_ids,\n value_query_mask, response_mask)\n logits = logits + self.alpha * value_logits\n\n log_prob = F.log_softmax(logits, dim=-1)\n output_logprob = torch.gather(log_prob, 2, response_input_ids[:, :, None]).squeeze(2)\n output_entropy = logits_to_entropy(logits)\n lm_loss = -1. * output_logprob\n\n return {\n 'response/log_prob': mask_pad(output_logprob, response_mask),\n 'response/lm_loss': mask_pad(lm_loss, response_mask),\n 'response/entropy': mask_pad(output_entropy, response_mask),\n 'response/logits': logits,\n }" }, { "identifier": "DataPool", "path": "data_pool.py", "snippet": "class DataPool:\n def __init__(self, tree_tokens, n_extra_tokens):\n self.tree_tokens = tree_tokens\n self.n_extra_tokens = n_extra_tokens\n\n self.cat_tokens = None\n self.prompt_pool, self.response_pool, self.score_pool = [], [], []\n\n def add(self, prompts: List[str], responses: List[str], scores: List[float]):\n self.prompt_pool.extend(prompts)\n self.response_pool.extend(responses)\n self.score_pool.extend(scores)\n\n data = zip(self.prompt_pool, self.response_pool, self.score_pool)\n data = [x for x in data if x[-1] is not None]\n sorted_data = sorted(data, key=lambda x: x[-1], reverse=True)\n self.prompt_pool, self.response_pool, self.score_pool = [list(x) for x in list(zip(*sorted_data))]\n\n cat_pos = [[i] * (len(sorted_data) // self.n_extra_tokens) for i in range(self.n_extra_tokens)]\n cat_pos = [y for x in cat_pos for y in x]\n cat_pos = cat_pos + [self.n_extra_tokens - 1] * (len(sorted_data) - len(cat_pos))\n self.cat_tokens = [self.tree_tokens[i] for i in cat_pos]\n\n def get_data(self):\n return deepcopy(self.prompt_pool), deepcopy(self.response_pool), deepcopy(self.cat_tokens)\n\n def data_to_save(self):\n return {'prompts': self.prompt_pool, 'responses': self.response_pool, 'scores': self.score_pool}" }, { "identifier": "Reward", "path": "reward.py", "snippet": "class Reward:\n def __init__(self, save_path: str, batch_size: int, device: int, params: argparse.Namespace):\n self.path = save_path\n self.batch_size = batch_size\n self.params = params\n self.device = f'cuda:{device}'\n\n cola_model_name = \"textattack/roberta-base-CoLA\"\n self.cola_tokenizer = RobertaTokenizer.from_pretrained(cola_model_name)\n self.cola_model = RobertaForSequenceClassification.from_pretrained(cola_model_name).to(self.device)\n\n def get_reward(self, prompts: List[str], responses: List[str], concepts: List[str], epoch: str) -> Dict[str, List[float]]:\n reward_dict = {'coverage': [], 'cola': []}\n\n for response, concept in tqdm(zip(responses, concepts), total=len(concepts), desc='computing coverage'):\n reward_dict['coverage'].append(self._compute_coverage(response, concept, use_binary=self.params.binary_coverage))\n\n if not self.params.binary_coverage:\n reward_dict['binary_coverage'] = [int(c == 1) for c in reward_dict['coverage']]\n\n for texts in tqdm(batchify(responses, self.batch_size), total=math.ceil(len(responses) // self.batch_size),\n desc='scoring generations'):\n\n texts = [t.strip() for t in texts]\n inputs = self.cola_tokenizer(texts, padding=True, truncation=True, return_tensors=\"pt\").to(self.device)\n with torch.no_grad():\n logits = self.cola_model(**inputs).logits\n probs = logits.softmax(dim=-1)\n scores = probs[:, 1].tolist()\n reward_dict['cola'].extend(scores)\n\n overall_reward = product_rewards([reward_dict['coverage'], reward_dict['cola']])\n reward_dict.update({'reward': overall_reward})\n\n zip_scores = list(zip(reward_dict['coverage'], reward_dict['cola']))\n data = pd.DataFrame.from_dict({'prompt': prompts, 'concepts': concepts})\n collate(data, responses, zip_scores, os.path.join(self.path, f'reward_{epoch}.json'))\n\n return reward_dict\n\n @staticmethod\n def _compute_coverage(output, concept, use_binary=False):\n lematized_concepts = [nlp(c.strip())[0].lemma_ for c in concept.split('-')]\n lemmatized_output = []\n for token in output.strip().split():\n lemmatized_output.extend([x.lemma_ for x in nlp(token)])\n\n if use_binary:\n score = 0\n for word in lematized_concepts:\n if word in lemmatized_output:\n score += 1\n\n if score < len(lematized_concepts):\n return 0\n ordered_concept = sorted(lematized_concepts, key=lambda x: lemmatized_output.index(x))\n return int(ordered_concept == lematized_concepts)\n\n else:\n output_keywords = []\n for token in lemmatized_output:\n if token in lematized_concepts and token not in output_keywords:\n output_keywords.append(token)\n assert len(output_keywords) <= len(lematized_concepts), f'concepts: {concept}, keywords: {output_keywords}'\n\n coverage = 0\n for i in range(len(output_keywords)):\n if lematized_concepts[i] == output_keywords[i]:\n coverage += 1\n else:\n break\n return coverage / len(lematized_concepts)" }, { "identifier": "ensure_dir", "path": "utils/utils.py", "snippet": "def ensure_dir(d):\n if not os.path.exists(d):\n os.makedirs(d)" }, { "identifier": "ceil_div", "path": "utils/utils.py", "snippet": "def ceil_div(a, b):\n return (a - 1) // b + 1" }, { "identifier": "reduce_mean", "path": "utils/utils.py", "snippet": "def reduce_mean(value, mask, axis=None):\n if axis is None:\n return torch.sum(value * mask) / torch.sum(mask)\n return reduce_sum(value, mask, axis) / torch.sum(mask, axis)" }, { "identifier": "reduce_sum", "path": "utils/utils.py", "snippet": "def reduce_sum(value, mask, axis=None):\n if axis is None:\n return torch.sum(value * mask)\n return torch.sum(value * mask, axis)" }, { "identifier": "decode", "path": "utils/generation_utils.py", "snippet": "def decode(tokenizer, query_input_ids, response_input_ids=None):\n query = [tokenizer.decode(p, skip_special_tokens=True, clean_up_tokenization_spaces=True)\n for p in query_input_ids]\n\n if response_input_ids is None:\n return query\n\n response = [tokenizer.decode(r, skip_special_tokens=True, clean_up_tokenization_spaces=True)\n for r in response_input_ids]\n return query, response" } ]
import os import torch import json import time import logging import random import argparse import numpy as np import torch.nn.functional as F from typing import List from datetime import datetime from tqdm import tqdm from torch.utils.data import Dataset, DataLoader from torch.optim import Adam, Optimizer from torch.optim.lr_scheduler import LambdaLR from torch.utils.tensorboard import SummaryWriter from transformers import get_linear_schedule_with_warmup from arguments import get_args from policy import Policy from data_pool import DataPool from reward import Reward from utils.utils import ensure_dir, ceil_div, reduce_mean, reduce_sum from utils.generation_utils import decode
7,558
self.kl_ctl = FixedController(self.params.kl_coef) self.kl_loss = torch.nn.KLDivLoss(reduction="none") if self.params.adaptive_entropy: self.entropy_ctl = AdaptiveController(self.params.entropy_coef, self.params.target_entropy, self.params.horizon) else: self.entropy_ctl = FixedController(self.params.entropy_coef) self.tree_tokens = tree_tokens self.best_cat = self.tree_tokens[0] self.best_cat_id = self.policy.tokenizer.convert_tokens_to_ids(self.best_cat) self.sample_dataloader, self.sampler = None, None self.seq_collator = SequenceCollator(tokenizer=policy.tokenizer) if resume: sample_dataset = SequenceDataset(data_pool=self.data_pool) self.sample_dataloader = DataLoader(sample_dataset, batch_size=self.params.batch_size, shuffle=True, drop_last=True, collate_fn=self.seq_collator) self.sampler = iter(self.sample_dataloader) def sample(self, step): if step % self.params.sample_interval != 0: return log.info(f"[step {step}] Sampling ...") concepts, prompts, responses = [], [], [] for i, batch in enumerate(tqdm(self.train_dataloader, total=len(self.train_dataloader), desc='Sampling from current policy')): input_ids, attention_mask, concept, constraints = batch use_constraint = random.choices([1, 0], weights=[self.params.hard_prob, 1 - self.params.hard_prob], k=1)[0] rollouts = self.policy.sample(input_ids=input_ids, attention_mask=attention_mask, constraints=constraints if use_constraint else None, max_len=self.params.response_length, top_p=self.params.top_p, use_control_code=(step > 0)) prompt, response = rollouts['query/text'], rollouts['response/text'] concepts.extend(concept) prompts.extend(prompt) responses.extend(response) scores = self.score_model.get_reward(prompts, responses, concepts, f'step{step}') self.data_pool.add(prompts=prompts, responses=responses, scores=scores['reward']) sample_dataset = SequenceDataset(data_pool=self.data_pool) self.sample_dataloader = DataLoader(sample_dataset, batch_size=self.params.batch_size, shuffle=True, drop_last=True, collate_fn=self.seq_collator) self.sampler = iter(self.sample_dataloader) def step(self, step_num): step_started_at = time.time() self.save(step=step_num) self.eval(step=step_num) self.sample(step=step_num) try: batch = next(self.sampler) assert len(batch[0]) == self.params.batch_size, 'insufficient batch' except (StopIteration, AssertionError): self.sampler = iter(self.sample_dataloader) batch = next(self.sampler) self.policy.value_model.train() ppo_loss, stats = self.loss(step_num, *batch) ppo_loss = ppo_loss / self.params.grad_accum ppo_loss.backward() if self.params.clip_grad: torch.nn.utils.clip_grad_norm_(self.policy.value_model.parameters(), self.params.max_grad_norm) if (step_num + 1) % self.params.grad_accum == 0: self.optimizer.step() self.optimizer.zero_grad() self.scheduler.step() for metric in ['kl', 'entropy']: self.writer.add_scalar(f'Objective/{metric}', stats[f'objective/{metric}'], step_num) for metric in ['lm', 'kl', 'entropy', 'total']: self.writer.add_scalar(f'Loss/{metric}', stats[f'loss/{metric}'], step_num) self.writer.add_scalar(f'Params/lr', self.optimizer.param_groups[0]['lr'], step_num) self.writer.add_scalar(f'Params/kl_coef', self.kl_ctl.value, step_num) self.writer.add_scalar(f'Params/entropy_coef', self.entropy_ctl.value, step_num) self.kl_ctl.update(stats['objective/kl'], self.params.batch_size, True) self.entropy_ctl.update(stats['objective/entropy'], self.params.batch_size, False) step_time = time.time() - step_started_at eps_per_second = float(self.params.batch_size) / step_time log.info(f"[step {step_num}] step_time={step_time:.2f}s, eps/s={eps_per_second:.2f}") def loss(self, step, query_input_ids, query_mask, response_input_ids, response_mask): outputs = self.policy.forward_pass(query_input_ids, query_mask, response_input_ids, response_mask, use_control_code=True) lm_loss, logprobs, entropy, logits = outputs['response/lm_loss'], outputs['response/log_prob'], \ outputs['response/entropy'], outputs['response/logits'] masks = response_mask.to(self.policy.device) with torch.no_grad(): ref_outputs = self.policy.forward_pass(query_input_ids[:, 1:], query_mask[:, 1:], response_input_ids, response_mask, use_control_code=False) ref_logprobs, ref_logits = ref_outputs['response/log_prob'], ref_outputs['response/logits'] kl = torch.sum(self.kl_loss(F.log_softmax(ref_logits, dim=-1), F.softmax(logits, dim=-1)), dim=-1) loss = reduce_mean(lm_loss + self.kl_ctl.value * kl - self.entropy_ctl.value * entropy, masks) data = {'logprobs': logprobs, 'ref_logprobs': ref_logprobs, 'masks': masks, 'logits': logits, 'ref_logits': ref_logits, 'lm_loss': reduce_mean(lm_loss, masks), 'kl_loss': reduce_mean(kl, masks), 'entropy': reduce_mean(entropy, masks), 'total_loss': loss} stats = self.record_step_stats(data) queries, responses = decode(self.policy.tokenizer, query_input_ids, response_input_ids) self.print_samples(queries=queries, responses=responses, lm_loss=reduce_mean(lm_loss, masks, axis=1), logprobs=logprobs, ref_logprobs=ref_logprobs, masks=masks, step=step) return loss, stats def record_step_stats(self, data): masks = data['masks'] kl = torch.sum(self.kl_loss(F.log_softmax(data['ref_logits'], dim=-1), F.softmax(data['logits'], dim=-1)), dim=-1)
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO")) log = logging.getLogger(__name__) class PromptDataset(Dataset): def __init__(self, path, tokenizer): data = json.load(open(path, 'r')) self.items = [v for k, v in data.items() if v['human_order']] self.tokenizer = tokenizer def __len__(self): return len(self.items) def __getitem__(self, idx): item = self.items[idx] order_words = random.choice(item['human_order']) constraint = json.dumps([list(map(lambda x: self.tokenizer.encode(f' {x}'), item['inflection'][w])) for w in order_words.split('-')]) prompt = 'Generate a sentence including the following keywords in the same order as listed: %s\n\nAnswer:' prompt = prompt % ' '.join(order_words.split('-')) return { 'order': order_words, 'constraint': constraint, 'prompt': prompt, } class PromptCollator(object): def __init__(self, tokenizer): self.tokenizer = tokenizer def __call__(self, sequences): concepts = [sequence['order'] for sequence in sequences] prompts = [sequence['prompt'] for sequence in sequences] constraints = [sequence['constraint'] for sequence in sequences] encodings_dict = self.tokenizer(prompts, return_tensors="pt", padding=True) input_ids = encodings_dict['input_ids'] attention_mask = encodings_dict['attention_mask'] return input_ids, attention_mask, concepts, constraints class SequenceDataset(Dataset): def __init__(self, data_pool: DataPool): self.queries, self.responses, self.cat_tokens = data_pool.get_data() def __len__(self): return len(self.queries) def __getitem__(self, idx): return {'query': self.queries[idx], 'response': self.responses[idx], 'cat_tokens': self.cat_tokens[idx] } class SequenceCollator(object): def __init__(self, tokenizer): self.tokenizer = tokenizer def __call__(self, sequences): queries = [sequence['query'] for sequence in sequences] responses = [sequence['response'] + self.tokenizer.eos_token for sequence in sequences] cat_ids = [self.tokenizer.convert_tokens_to_ids(sequence['cat_tokens']) for sequence in sequences] query_encodings_dict = self.tokenizer(queries, return_tensors="pt", padding=True) query_input_ids = query_encodings_dict['input_ids'] query_mask = query_encodings_dict['attention_mask'] query_input_ids = torch.cat([query_input_ids.new(cat_ids)[:, None], query_input_ids], dim=1) query_mask = torch.cat([query_mask.new([1] * len(query_mask))[:, None], query_mask], dim=1) response_encodings_dict = self.tokenizer(responses, return_tensors="pt", padding=True) response_input_ids = response_encodings_dict['input_ids'] response_mask = response_encodings_dict['attention_mask'] return query_input_ids, query_mask, response_input_ids, response_mask class FixedController: def __init__(self, coef): self.value = coef def update(self, current, n_steps, lower_bound): pass class AdaptiveController: def __init__(self, init_coef, target, horizon): self.value = init_coef self.target = target self.horizon = horizon def update(self, current, n_steps, lower_bound): proportional_error = np.clip(current / self.target - 1, -0.2, 0.2) if lower_bound: mult = 1 + proportional_error * n_steps / self.horizon else: mult = 1 - proportional_error * n_steps / self.horizon self.value *= mult class ConditionTrainer: def __init__(self, params: argparse.Namespace, policy: Policy, data_pool: DataPool, score_model: Reward, tree_tokens: List[str], train_dataloader: DataLoader, val_dataloader: DataLoader, optimizer: Optimizer, scheduler: LambdaLR, resume: bool): self.params = params self.policy = policy self.data_pool = data_pool self.score_model = score_model self.optimizer = optimizer self.scheduler = scheduler self.train_dataloader = train_dataloader self.val_dataloader = val_dataloader self.writer = SummaryWriter(log_dir=params.tensorboard_dir) if self.params.adaptive_kl: self.kl_ctl = AdaptiveController(self.params.kl_coef, self.params.target_kl, self.params.horizon) else: self.kl_ctl = FixedController(self.params.kl_coef) self.kl_loss = torch.nn.KLDivLoss(reduction="none") if self.params.adaptive_entropy: self.entropy_ctl = AdaptiveController(self.params.entropy_coef, self.params.target_entropy, self.params.horizon) else: self.entropy_ctl = FixedController(self.params.entropy_coef) self.tree_tokens = tree_tokens self.best_cat = self.tree_tokens[0] self.best_cat_id = self.policy.tokenizer.convert_tokens_to_ids(self.best_cat) self.sample_dataloader, self.sampler = None, None self.seq_collator = SequenceCollator(tokenizer=policy.tokenizer) if resume: sample_dataset = SequenceDataset(data_pool=self.data_pool) self.sample_dataloader = DataLoader(sample_dataset, batch_size=self.params.batch_size, shuffle=True, drop_last=True, collate_fn=self.seq_collator) self.sampler = iter(self.sample_dataloader) def sample(self, step): if step % self.params.sample_interval != 0: return log.info(f"[step {step}] Sampling ...") concepts, prompts, responses = [], [], [] for i, batch in enumerate(tqdm(self.train_dataloader, total=len(self.train_dataloader), desc='Sampling from current policy')): input_ids, attention_mask, concept, constraints = batch use_constraint = random.choices([1, 0], weights=[self.params.hard_prob, 1 - self.params.hard_prob], k=1)[0] rollouts = self.policy.sample(input_ids=input_ids, attention_mask=attention_mask, constraints=constraints if use_constraint else None, max_len=self.params.response_length, top_p=self.params.top_p, use_control_code=(step > 0)) prompt, response = rollouts['query/text'], rollouts['response/text'] concepts.extend(concept) prompts.extend(prompt) responses.extend(response) scores = self.score_model.get_reward(prompts, responses, concepts, f'step{step}') self.data_pool.add(prompts=prompts, responses=responses, scores=scores['reward']) sample_dataset = SequenceDataset(data_pool=self.data_pool) self.sample_dataloader = DataLoader(sample_dataset, batch_size=self.params.batch_size, shuffle=True, drop_last=True, collate_fn=self.seq_collator) self.sampler = iter(self.sample_dataloader) def step(self, step_num): step_started_at = time.time() self.save(step=step_num) self.eval(step=step_num) self.sample(step=step_num) try: batch = next(self.sampler) assert len(batch[0]) == self.params.batch_size, 'insufficient batch' except (StopIteration, AssertionError): self.sampler = iter(self.sample_dataloader) batch = next(self.sampler) self.policy.value_model.train() ppo_loss, stats = self.loss(step_num, *batch) ppo_loss = ppo_loss / self.params.grad_accum ppo_loss.backward() if self.params.clip_grad: torch.nn.utils.clip_grad_norm_(self.policy.value_model.parameters(), self.params.max_grad_norm) if (step_num + 1) % self.params.grad_accum == 0: self.optimizer.step() self.optimizer.zero_grad() self.scheduler.step() for metric in ['kl', 'entropy']: self.writer.add_scalar(f'Objective/{metric}', stats[f'objective/{metric}'], step_num) for metric in ['lm', 'kl', 'entropy', 'total']: self.writer.add_scalar(f'Loss/{metric}', stats[f'loss/{metric}'], step_num) self.writer.add_scalar(f'Params/lr', self.optimizer.param_groups[0]['lr'], step_num) self.writer.add_scalar(f'Params/kl_coef', self.kl_ctl.value, step_num) self.writer.add_scalar(f'Params/entropy_coef', self.entropy_ctl.value, step_num) self.kl_ctl.update(stats['objective/kl'], self.params.batch_size, True) self.entropy_ctl.update(stats['objective/entropy'], self.params.batch_size, False) step_time = time.time() - step_started_at eps_per_second = float(self.params.batch_size) / step_time log.info(f"[step {step_num}] step_time={step_time:.2f}s, eps/s={eps_per_second:.2f}") def loss(self, step, query_input_ids, query_mask, response_input_ids, response_mask): outputs = self.policy.forward_pass(query_input_ids, query_mask, response_input_ids, response_mask, use_control_code=True) lm_loss, logprobs, entropy, logits = outputs['response/lm_loss'], outputs['response/log_prob'], \ outputs['response/entropy'], outputs['response/logits'] masks = response_mask.to(self.policy.device) with torch.no_grad(): ref_outputs = self.policy.forward_pass(query_input_ids[:, 1:], query_mask[:, 1:], response_input_ids, response_mask, use_control_code=False) ref_logprobs, ref_logits = ref_outputs['response/log_prob'], ref_outputs['response/logits'] kl = torch.sum(self.kl_loss(F.log_softmax(ref_logits, dim=-1), F.softmax(logits, dim=-1)), dim=-1) loss = reduce_mean(lm_loss + self.kl_ctl.value * kl - self.entropy_ctl.value * entropy, masks) data = {'logprobs': logprobs, 'ref_logprobs': ref_logprobs, 'masks': masks, 'logits': logits, 'ref_logits': ref_logits, 'lm_loss': reduce_mean(lm_loss, masks), 'kl_loss': reduce_mean(kl, masks), 'entropy': reduce_mean(entropy, masks), 'total_loss': loss} stats = self.record_step_stats(data) queries, responses = decode(self.policy.tokenizer, query_input_ids, response_input_ids) self.print_samples(queries=queries, responses=responses, lm_loss=reduce_mean(lm_loss, masks, axis=1), logprobs=logprobs, ref_logprobs=ref_logprobs, masks=masks, step=step) return loss, stats def record_step_stats(self, data): masks = data['masks'] kl = torch.sum(self.kl_loss(F.log_softmax(data['ref_logits'], dim=-1), F.softmax(data['logits'], dim=-1)), dim=-1)
mean_kl = torch.mean(reduce_sum(kl, masks, axis=1))
7
2023-10-20 08:30:18+00:00
12k
violet-sto/HN-GFN
proxy/proxy.py
[ { "identifier": "Regressor", "path": "proxy/regression.py", "snippet": "class Regressor(nn.Module):\n def __init__(self, args, nhid, nvec, num_out_per_stem, num_out_per_mol, num_conv_steps, version, dropout_rate=0, do_stem_mask=True, do_nblocks=False):\n nn.Module.__init__(self)\n self.args = args\n self.training_steps = 0\n # atomfeats + stem_mask + atom one hot + nblocks\n num_feat = (14 + int(do_stem_mask) +\n len(atomic_numbers) + int(do_nblocks))\n\n self.proxy = MPNNet_v2(\n num_feat=num_feat,\n num_vec=nvec,\n dim=nhid,\n num_out_per_mol=num_out_per_mol,\n num_out_per_stem=num_out_per_stem,\n num_conv_steps=num_conv_steps,\n version=version,\n dropout_rate=dropout_rate)\n\n def fit(self, dataset, opt):\n last_losses = []\n train_losses = []\n test_losses = []\n time_start = time.time()\n time_last_check = time.time()\n best_test_loss = 1000\n mbsize = self.args.proxy_mbsize\n early_stop_tol = self.args.proxy_early_stop_tol\n early_stop_count = 0\n\n self.proxy.train()\n for i in range(self.args.proxy_num_iterations+1):\n s, r = dataset.sample2batch(dataset.sample(mbsize))\n # s.x s.edge_index, s.edge_attr, s.stems: stem_atmidxs\n\n stem_out_s, mol_out_s = self.proxy(s, None, do_stems=False)\n loss = F.mse_loss(mol_out_s, r)\n last_losses.append((loss.item(),))\n train_losses.append((loss.item(),))\n opt.zero_grad()\n loss.backward()\n opt.step()\n self.proxy.training_steps = i + 1\n\n if not i % 50:\n train_loss = [np.round(np.mean(i), 4)\n for i in zip(*last_losses)]\n last_losses = []\n\n total_test_loss = 0\n\n self.proxy.eval()\n for s, r in dataset.iterset(max(mbsize, 64), mode='test'):\n with torch.no_grad():\n stem_o, mol_o = self.proxy(s, None, do_stems=False)\n loss = F.mse_loss(mol_o, r, reduction='sum')\n total_test_loss += loss.item()\n self.proxy.train()\n\n test_loss = total_test_loss / \\\n (len(dataset.test_mols)*len(self.args.objectives))\n test_losses.append(test_loss)\n print('Iter {}: Train Loss {}, Test Loss {}, Time {}'.format(\n i, train_loss[0], round(test_loss, 4), round(time.time() - time_last_check, 3)))\n time_last_check = time.time()\n\n if test_loss < best_test_loss:\n best_test_loss = test_loss\n best_model = deepcopy(self.proxy)\n best_model.to('cpu')\n early_stop_count = 0\n if self.args.save:\n self.save(self.args.log_dir)\n\n else:\n early_stop_count += 1\n print('Early stop count: {}'.format(early_stop_count))\n\n if early_stop_count >= early_stop_tol:\n print('Early stopping! Training time: {}, Best test loss: {}'.format(\n time.time()-time_start, best_test_loss))\n break\n\n self.proxy = deepcopy(best_model)\n self.proxy.to(self.args.device)\n\n def forward(self, graph, vec=None, do_stems=True, do_bonds=False, k=None):\n return self.proxy(graph, vec, do_stems=do_stems, do_bonds=do_bonds, k=k)\n\n def posterior(self, x):\n self.proxy.eval()\n with torch.no_grad():\n outputs = self.forward(x, None, do_stems=False)[1].squeeze(0)\n\n posterior = MyPosterior(outputs, torch.zeros_like(outputs))\n\n return posterior\n\n def save(self, checkpoint_dir):\n checkpoint_path = os.path.join(\n checkpoint_dir, f\"proxy_init_checkpoint.pth\")\n torch.save(self.proxy.state_dict(), checkpoint_path)" }, { "identifier": "DropoutRegressor", "path": "proxy/regression.py", "snippet": "class DropoutRegressor(Regressor):\n def __init__(self, args, nhid, nvec, num_out_per_stem, num_out_per_mol, num_conv_steps, version, \\\n dropout_rate=0, num_dropout_samples=25, do_stem_mask=True, do_nblocks=False):\n super().__init__(args, nhid, nvec, num_out_per_stem, num_out_per_mol,\n num_conv_steps, version, dropout_rate, do_stem_mask, do_nblocks)\n self.proxy_num_dropout_samples = num_dropout_samples\n\n def posterior(self, x):\n self.proxy.train()\n with torch.no_grad():\n outputs = torch.cat([self.forward(x, None, do_stems=False)[1].unsqueeze(0)\n for _ in range(self.proxy_num_dropout_samples)])\n\n posterior = MyPosterior(outputs.mean(dim=0), outputs.var(dim=0))\n\n return posterior" }, { "identifier": "EvidentialRegressor", "path": "proxy/regression.py", "snippet": "class EvidentialRegressor(nn.Module):\n def __init__(self, args, nhid, nvec, num_out_per_stem, num_out_per_mol, num_conv_steps, version, dropout_rate=0, do_stem_mask=True, do_nblocks=False):\n nn.Module.__init__(self)\n self.args = args\n self.training_steps = 0\n # atomfeats + stem_mask + atom one hot + nblocks\n num_feat = (14 + int(do_stem_mask) +\n len(atomic_numbers) + int(do_nblocks))\n\n self.proxy = MPNNet_v2(\n num_feat=num_feat,\n num_vec=nvec,\n dim=nhid,\n num_out_per_mol=num_out_per_mol*4,\n num_out_per_stem=num_out_per_stem,\n num_conv_steps=num_conv_steps,\n version=version,\n dropout_rate=dropout_rate)\n\n def fit(self, dataset, opt, mean, std, round_idx):\n self.mean = mean\n self.std = std\n \n last_losses = []\n train_losses = []\n test_losses = []\n time_start = time.time()\n time_last_check = time.time()\n best_test_loss = 1000\n mbsize = self.args.proxy_mbsize\n early_stop_tol = self.args.proxy_early_stop_tol\n early_stop_count = 0\n \n stop_event = threading.Event()\n sampler = dataset.start_samplers(1, mbsize)\n\n def stop_everything():\n stop_event.set()\n print('joining')\n dataset.stop_samplers_and_join()\n\n self.proxy.train()\n for i in range(self.args.proxy_num_iterations+1):\n r = sampler()\n for thread in dataset.sampler_threads:\n if thread.failed:\n stop_event.set()\n stop_everything()\n pdb.post_mortem(thread.exception.__traceback__)\n s, r = r\n r = (r - mean) / std\n # s.x s.edge_index, s.edge_attr, s.stems: stem_atmidxs\n \n # if bounds is not None:\n # r = normalize(r, bounds)\n means, lambdas, alphas, betas = self.forward(s, None, do_stems=False)\n # the larger the lam, the larger the variance\n loss = evidential_loss(means, lambdas, alphas, betas, r, lam=self.args.evidential_lam).mean()\n last_losses.append((loss.item(),))\n train_losses.append((loss.item(),))\n opt.zero_grad()\n loss.backward()\n opt.step()\n self.proxy.training_steps = i + 1\n\n if not i % 50:\n train_loss = [np.round(np.mean(i), 4)\n for i in zip(*last_losses)]\n last_losses = []\n\n total_test_loss = 0\n total_normalize_test_loss = 0\n\n self.proxy.eval()\n for s, r in dataset.iterset(max(mbsize, 64), mode='test'):\n with torch.no_grad():\n means, lambdas, alphas, betas = self.forward(s, None, do_stems=False)\n # if bounds is not None:\n # means = unnormalize(means, bounds)\n normalize_loss = F.mse_loss(means, (r-mean)/std, reduction='sum')\n total_normalize_test_loss += normalize_loss.item()\n means = means * std + mean\n loss = F.mse_loss(means, r, reduction='sum')\n total_test_loss += loss.item()\n self.proxy.train()\n\n test_loss = total_test_loss / \\\n (len(dataset.test_mols)*len(self.args.objectives))\n normalize_test_loss = total_normalize_test_loss / \\\n (len(dataset.test_mols)*len(self.args.objectives))\n test_losses.append(test_loss)\n print('Iter {}: Train Loss {}, Test Loss {}, Normalize Test Loss {}, Time {}'.format(\n i, train_loss[0], round(test_loss, 4), round(normalize_test_loss, 4), round(time.time() - time_last_check, 3)))\n time_last_check = time.time()\n\n if normalize_test_loss < best_test_loss:\n best_test_loss = normalize_test_loss\n best_model = deepcopy(self.proxy)\n best_model.to('cpu')\n early_stop_count = 0\n if self.args.save:\n self.save(self.args.log_dir, round_idx)\n\n else:\n early_stop_count += 1\n print('Early stop count: {}'.format(early_stop_count))\n\n if early_stop_count >= early_stop_tol:\n print('Early stopping! Training time: {}, Best test loss: {}'.format(\n time.time()-time_start, best_test_loss))\n break\n \n stop_everything()\n self.proxy = deepcopy(best_model)\n self.proxy.to(self.args.device)\n\n def forward(self, graph, vec=None, do_stems=True, do_bonds=False, k=None):\n _, mol_out_s = self.proxy(graph, vec, do_stems=do_stems,\n do_bonds=do_bonds, k=k)\n min_val = 1e-6\n means, loglambdas, logalphas, logbetas = torch.split(\n mol_out_s, mol_out_s.shape[1]//4, dim=1)\n lambdas = F.softplus(loglambdas) + min_val\n alphas = F.softplus(logalphas) + min_val + 1 # add 1 for numerical contraints of Gamma function\n betas = F.softplus(logbetas) + min_val\n\n return means, lambdas, alphas, betas\n \n def posterior(self, X, posterior_transform=None):\n self.proxy.eval()\n with torch.no_grad():\n means, lambdas, alphas, betas = self.forward(X, None, do_stems=False)\n inverse_evidence = 1. / ((alphas-1) * lambdas)\n vars = betas * inverse_evidence\n \n means = means * self.std + self.mean\n vars = vars * self.std ** 2\n \n # vars = BlockDiagLazyTensor(torch.diag(vars.squeeze()).unsqueeze(0))\n covariance_matrix = lazify(torch.diag(vars.squeeze()))\n mvn = MultitaskMultivariateNormal(means, covariance_matrix)\n \n posterior = GPyTorchPosterior(mvn)\n\n if posterior_transform is not None:\n return posterior_transform(posterior) \n return posterior\n\n def save(self, checkpoint_dir, round_idx):\n checkpoint_path = os.path.join(\n checkpoint_dir, f\"{round_idx}_proxy_checkpoint.pth\")\n torch.save(self.proxy.state_dict(), checkpoint_path)" }, { "identifier": "EnsembleRegressor", "path": "proxy/regression.py", "snippet": "class EnsembleRegressor(nn.Module):\n def __init__(self, args, nhid, nvec, num_out_per_stem, num_out_per_mol, num_conv_steps, version, \\\n dropout_rate=0, num_dropout_samples=5, do_stem_mask=True, do_nblocks=False):\n nn.Module.__init__(self)\n self.training_steps = 0\n # atomfeats + stem_mask + atom one hot + nblocks\n num_feat = (14 + int(do_stem_mask) +\n len(atomic_numbers) + int(do_nblocks))\n self.proxy_num_dropout_samples = num_dropout_samples\n self.args = args\n self.device = args.device\n self.proxy = [MPNNet_v2(\n num_feat=num_feat,\n num_vec=nvec,\n dim=nhid,\n num_out_per_mol=num_out_per_mol,\n num_out_per_stem=num_out_per_stem,\n num_conv_steps=num_conv_steps,\n version=version,\n dropout_rate=dropout_rate).to(self.device) \\\n for _ in range(self.proxy_num_dropout_samples)]\n \n def fit(self, dataset, opt, mean, std, round_idx):\n self.mean = mean\n self.std = std\n \n last_losses = []\n train_losses = []\n test_losses = []\n time_start = time.time()\n time_last_check = time.time()\n best_test_loss = 1000\n mbsize = self.args.proxy_mbsize\n early_stop_tol = self.args.proxy_early_stop_tol\n early_stop_count = 0\n \n for i in range(self.args.proxy_num_iterations+1):\n s, r = dataset.sample2batch(dataset.sample(mbsize))\n r = (r - mean) / std # (batch_size, num_obj)\n mol_out_s = self._call_models_train(s).mean(0)\n \n loss = F.mse_loss(mol_out_s, r)\n last_losses.append((loss.item(),))\n train_losses.append((loss.item(),))\n opt.zero_grad()\n loss.backward()\n opt.step()\n self.training_steps = i + 1\n \n if not i % 50:\n train_loss = [np.round(np.mean(i), 4)\n for i in zip(*last_losses)]\n last_losses = []\n\n total_test_loss = 0\n\n for s, r in dataset.iterset(max(mbsize, 64), mode='test'):\n with torch.no_grad():\n mol_o = self._call_models_eval(s).mean(0)\n loss = F.mse_loss(mol_o, (r-mean)/std, reduction='sum')\n total_test_loss += loss.item()\n\n test_loss = total_test_loss / \\\n (len(dataset.test_mols)*len(self.args.objectives))\n test_losses.append(test_loss)\n print('Iter {}: Train Loss {}, Test Loss {}, Time {}'.format(\n i, train_loss[0], round(test_loss, 4), round(time.time() - time_last_check, 3)))\n time_last_check = time.time()\n\n if test_loss < best_test_loss:\n best_test_loss = test_loss\n # best_model = deepcopy(self.proxy)\n # best_model.to('cpu')\n best_params = [[i.data.cpu().numpy() for i in model.parameters()] for model in self.proxy]\n early_stop_count = 0\n if self.args.save:\n self.save(self.args.log_dir, round_idx)\n\n else:\n early_stop_count += 1\n print('Early stop count: {}'.format(early_stop_count))\n\n if early_stop_count >= early_stop_tol:\n print('Early stopping! Training time: {}, Best test loss: {}'.format(\n time.time()-time_start, best_test_loss))\n break\n \n # load best parameters \n for i, model in enumerate(self.proxy):\n for i, besti in zip(model.parameters(), best_params[i]):\n i.data = torch.tensor(besti).to(self.device)\n # self.args.logger.save(self.args.save_path, self.args)\n \n def _call_models_train(self, x):\n for model in self.proxy:\n model.train()\n ys = torch.stack([model(x, None, do_stems=False)[1] for model in self.proxy], dim=0) # (5, 64, 2)\n return ys\n \n def _call_models_eval(self, x):\n for model in self.proxy:\n model.eval()\n ys = torch.stack([model(x, None, do_stems=False)[1] for model in self.proxy], dim=0)\n return ys\n \n def posterior(self, x):\n with torch.no_grad():\n outputs = self._call_models_eval(x)\n posterior = MyPosterior(outputs.mean(dim=0), outputs.var(dim=0))\n posterior.mean = posterior.mean * self.std + self.mean\n posterior.variance = posterior.variance * self.std ** 2\n return posterior\n \n def save(self, checkpoint_dir, round_idx):\n for i, model in enumerate(self.proxy):\n checkpoint_path = os.path.join(checkpoint_dir, f\"{round_idx}_proxy_checkpoint_model_{i}.pth\")\n torch.save(model.state_dict(), checkpoint_path) " }, { "identifier": "GPRegressor", "path": "proxy/regression.py", "snippet": "class GPRegressor(nn.Module):\n def __init__(self, args, nhid, nvec, num_out_per_stem, num_out_per_mol, num_conv_steps, version, \\\n dropout_rate=0, num_dropout_samples=5, do_stem_mask=True, do_nblocks=False):\n nn.Module.__init__(self)\n self.training_steps = 0\n # atomfeats + stem_mask + atom one hot + nblocks\n num_feat = (14 + int(do_stem_mask) +\n len(atomic_numbers) + int(do_nblocks))\n self.proxy_num_dropout_samples = num_dropout_samples\n self.args = args\n self.device = args.device\n # self.objective = AnalyticMultiOutputObjective()\n self.NP_DTYPE = np.float32\n fingerprint_func = functools.partial(\n rdMolDescriptors.GetMorganFingerprintAsBitVect,\n radius=self.args.fp_radius,\n nBits=self.args.fp_nbits,\n )\n self.my_smiles_to_fp_array = functools.partial(\n smiles_to_fp_array, fingerprint_func=fingerprint_func\n )\n \n def fit(self, dataset):\n x_train = np.stack([self.my_smiles_to_fp_array(s) for s in dataset.smis]).astype(self.NP_DTYPE) # (200, 1024)\n y_train = pd.DataFrame.from_dict(dataset.scores).values.astype(self.NP_DTYPE) # (200, num_obj)\n x_train = torch.as_tensor(x_train)\n y_train = torch.as_tensor(y_train)\n self.proxy = self.get_trained_gp(X_train=x_train, y_train=y_train).to(self.device)\n \n def get_trained_gp(self, X_train, y_train):\n models = []\n for i in range(y_train.shape[-1]):\n obj = y_train[:, i]\n models.append(TanimotoGP(train_x=X_train, train_y=obj)) # input should be tensor\n model = ModelListGP(*models)\n \n fit_gp_hyperparameters(model)\n \n return model\n \n def posterior(self, x): \n x = self.my_smiles_to_fp_array(Chem.MolToSmiles(x.mol))\n x = torch.as_tensor(x).unsqueeze(0).to(self.device)\n with torch.no_grad():\n posterior = self.proxy.posterior(x) #! oracle scale\n return posterior" }, { "identifier": "MolMDPExtended", "path": "mol_mdp_ext.py", "snippet": "class MolMDPExtended(MolMDP):\n\n def build_translation_table(self):\n \"\"\"build a symmetry mapping for blocks. Necessary to compute parent transitions\"\"\"\n self.translation_table = {}\n for blockidx in range(len(self.block_mols)):\n # Blocks have multiple ways of being attached. By default,\n # a new block is attached to the target stem by attaching\n # it's kth atom, where k = block_rs[new_block_idx][0].\n # When computing a reverse action (from a parent), we may\n # wish to attach the new block to a different atom. In\n # the blocks library, there are duplicates of the same\n # block but with block_rs[block][0] set to a different\n # atom. Thus, for the reverse action we have to find out\n # which duplicate this corresponds to.\n\n # Here, we compute, for block blockidx, what is the index\n # of the duplicate block, if someone wants to attach to\n # atom x of the block.\n # So atom_map[x] == bidx, such that block_rs[bidx][0] == x\n atom_map = {}\n for j in range(len(self.block_mols)):\n if self.block_smi[blockidx] == self.block_smi[j]:\n atom_map[self.block_rs[j][0]] = j\n self.translation_table[blockidx] = atom_map\n\n # We're still missing some \"duplicates\", as some might be\n # symmetric versions of each other. For example, block CC with\n # block_rs == [0,1] has no duplicate, because the duplicate\n # with block_rs [1,0] would be a symmetric version (both C\n # atoms are the \"same\").\n\n # To test this, let's create nonsense molecules by attaching\n # duplicate blocks to a Gold atom, and testing whether they\n # are the same.\n gold = Chem.MolFromSmiles('[Au]')\n # If we find that two molecules are the same when attaching\n # them with two different atoms, then that means the atom\n # numbers are symmetries. We can add those to the table.\n for blockidx in range(len(self.block_mols)):\n for j in self.block_rs[blockidx]:\n if j not in self.translation_table[blockidx]:\n symmetric_duplicate = None\n for atom, block_duplicate in self.translation_table[blockidx].items():\n molA, _ = chem.mol_from_frag(\n jun_bonds=[[0,1,0,j]],\n frags=[gold, self.block_mols[blockidx]])\n molB, _ = chem.mol_from_frag(\n jun_bonds=[[0,1,0,atom]],\n frags=[gold, self.block_mols[blockidx]])\n if (Chem.MolToSmiles(molA) == Chem.MolToSmiles(molB) or\n molA.HasSubstructMatch(molB)):\n symmetric_duplicate = block_duplicate\n break\n if symmetric_duplicate is None:\n raise ValueError('block', blockidx, self.block_smi[blockidx],\n 'has no duplicate for atom', j,\n 'in position 0, and no symmetrical correspondance')\n self.translation_table[blockidx][j] = symmetric_duplicate\n #print('block', blockidx, '+ atom', j,\n # 'in position 0 is a symmetric duplicate of',\n # symmetric_duplicate)\n\n def parents(self, mol=None):\n \"\"\"returns all the possible parents of molecule mol (or the current\n molecule if mol is None.\n\n Returns a list of (BlockMoleculeDataExtended, (block_idx, stem_idx)) pairs such that\n for a pair (m, (b, s)), MolMDPExtended.add_block_to(m, b, s) == mol.\n \"\"\"\n if len(mol.blockidxs) == 1:\n # If there's just a single block, then the only parent is\n # the empty block with the action that recreates that block\n return [(BlockMoleculeDataExtended(), (mol.blockidxs[0], 0))]\n\n # Compute the how many blocks each block is connected to\n blocks_degree = defaultdict(int)\n for a,b,_,_ in mol.jbonds:\n blocks_degree[a] += 1\n blocks_degree[b] += 1\n # Keep only blocks of degree 1 (those are the ones that could\n # have just been added)\n blocks_degree_1 = [i for i, d in blocks_degree.items() if d == 1]\n # Form new molecules without these blocks\n parent_mols = []\n\n for rblockidx in blocks_degree_1:\n new_mol = mol.copy()\n # find which bond we're removing\n removed_bonds = [(jbidx, bond) for jbidx, bond in enumerate(new_mol.jbonds)\n if rblockidx in bond[:2]]\n assert len(removed_bonds) == 1\n rjbidx, rbond = removed_bonds[0]\n # Pop the bond\n new_mol.jbonds.pop(rjbidx)\n # Remove the block\n mask = np.ones(len(new_mol.blockidxs), dtype=np.bool)\n mask[rblockidx] = 0\n reindex = new_mol.delete_blocks(mask)\n # reindex maps old blockidx to new blockidx, since the\n # block the removed block was attached to might have its\n # index shifted by 1.\n\n # Compute which stem the bond was using\n stem = ([reindex[rbond[0]], rbond[2]] if rblockidx == rbond[1] else\n [reindex[rbond[1]], rbond[3]])\n # and add it back\n new_mol.stems = [list(i) for i in new_mol.stems] + [stem]\n #new_mol.stems.append(stem)\n # and we have a parent. The stem idx to recreate mol is\n # the last stem, since we appended `stem` in the back of\n # the stem list.\n # We also have to translate the block id to match the bond\n # we broke, see build_translation_table().\n removed_stem_atom = (\n rbond[3] if rblockidx == rbond[1] else rbond[2])\n blockid = mol.blockidxs[rblockidx]\n if removed_stem_atom not in self.translation_table[blockid]:\n raise ValueError('Could not translate removed stem to duplicate or symmetric block.')\n parent_mols.append([new_mol,\n # action = (block_idx, stem_idx)\n (self.translation_table[blockid][removed_stem_atom],\n len(new_mol.stems) - 1)])\n if not len(parent_mols):\n raise ValueError('Could not find any parents')\n return parent_mols\n\n\n def add_block_to(self, mol, block_idx, stem_idx=None, atmidx=None):\n '''out-of-place version of add_block'''\n #assert (block_idx >= 0) and (block_idx <= len(self.block_mols)), \"unknown block\"\n if mol.numblocks == 0:\n stem_idx = None\n new_mol = mol.copy()\n new_mol.add_block(block_idx,\n block=self.block_mols[block_idx],\n block_r=self.block_rs[block_idx],\n stem_idx=stem_idx, atmidx=atmidx)\n return new_mol\n\n def remove_jbond_from(self, mol, jbond_idx=None, atmidx=None):\n new_mol = mol.copy()\n new_mol.remove_jbond(jbond_idx, atmidx)\n return new_mol\n\n def a2mol(self, acts):\n mol = BlockMoleculeDataExtended()\n for i in acts:\n if i[0] >= 0:\n mol = self.add_block_to(mol, *i)\n return mol\n\n def reset(self):\n self.molecule = BlockMoleculeDataExtended()\n return None\n\n\n def post_init(self, device, repr_type, include_bonds=False, include_nblocks=False):\n self.device = device\n self.repr_type = repr_type\n #self.max_bond_atmidx = max([max(i) for i in self.block_rs])\n self.max_num_atm = max(self.block_natm)\n # see model_block.mol2graph\n self.true_block_set = sorted(set(self.block_smi))\n self.stem_type_offset = np.int32([0] + list(np.cumsum([\n max(self.block_rs[self.block_smi.index(i)])+1 for i in self.true_block_set])))\n self.num_stem_types = self.stem_type_offset[-1]\n self.true_blockidx = [self.true_block_set.index(i) for i in self.block_smi]\n self.num_true_blocks = len(self.true_block_set)\n self.include_nblocks = include_nblocks\n self.include_bonds = include_bonds\n #print(self.max_num_atm, self.num_stem_types)\n self.molcache = {}\n\n def mols2batch(self, mols):\n if self.repr_type == 'block_graph':\n return model_block.mols2batch(mols, self)\n elif self.repr_type == 'atom_graph':\n return model_atom.mols2batch(mols, self)\n elif self.repr_type == 'morgan_fingerprint':\n return model_fingerprint.mols2batch(mols, self)\n\n def mol2repr(self, mol=None):\n if mol is None:\n mol = self.molecule\n #molhash = str(mol.blockidxs)+':'+str(mol.stems)+':'+str(mol.jbonds)\n #if molhash in self.molcache:\n # return self.molcache[molhash]\n if self.repr_type == 'block_graph':\n r = model_block.mol2graph(mol, self, self.floatX)\n elif self.repr_type == 'atom_graph':\n r = model_atom.mol2graph(mol, self, self.floatX,\n bonds=self.include_bonds,\n nblocks=self.include_nblocks)\n elif self.repr_type == 'morgan_fingerprint':\n r = model_fingerprint.mol2fp(mol, self, self.floatX)\n #self.molcache[molhash] = r\n return r\n\n def get_nx_graph(self, mol: BlockMoleculeData, true_block=False):\n true_blockidx = self.true_blockidx\n\n G = nx.DiGraph()\n blockidxs = [true_blockidx[xx] for xx in mol.blockidxs] if true_block else mol.blockidxs\n\n G.add_nodes_from([(ix, {\"block\": blockidxs[ix]}) for ix in range(len(blockidxs))])\n\n if len(mol.jbonds) > 0:\n edges = []\n for jbond in mol.jbonds:\n edges.append((jbond[0], jbond[1],\n {\"bond\": [jbond[2], jbond[3]]}))\n edges.append((jbond[1], jbond[0],\n {\"bond\": [jbond[3], jbond[2]]}))\n G.add_edges_from(edges)\n return G\n\n def graphs_are_isomorphic(self, g1, g2):\n return nx.algorithms.is_isomorphic(g1, g2, node_match=node_match, edge_match=edge_match)" }, { "identifier": "qUpperConfidenceBound", "path": "utils/acq_func.py", "snippet": "class qUpperConfidenceBound(MCAcquisitionFunction):\n r\"\"\"MC-based batch Upper Confidence Bound.\n\n Uses a reparameterization to extend UCB to qUCB for q > 1 (See Appendix A\n of [Wilson2017reparam].)\n\n `qUCB = E(max(mu + |Y_tilde - mu|))`, where `Y_tilde ~ N(mu, beta pi/2 Sigma)`\n and `f(X)` has distribution `N(mu, Sigma)`.\n\n Example:\n >>> model = SingleTaskGP(train_X, train_Y)\n >>> sampler = SobolQMCNormalSampler(1024)\n >>> qUCB = qUpperConfidenceBound(model, 0.1, sampler)\n >>> qucb = qUCB(test_X)\n \"\"\"\n\n def __init__(\n self,\n model: Model,\n beta: float,\n sampler: Optional[MCSampler] = None,\n objective: Optional[MCAcquisitionObjective] = None,\n posterior_transform: Optional[PosteriorTransform] = None,\n X_pending: Optional[Tensor] = None,\n ) -> None:\n r\"\"\"q-Upper Confidence Bound.\n\n Args:\n model: A fitted model.\n beta: Controls tradeoff between mean and standard deviation in UCB.\n sampler: The sampler used to draw base samples. Defaults to\n `SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`\n objective: The MCAcquisitionObjective under which the samples are\n evaluated. Defaults to `IdentityMCObjective()`.\n posterior_transform: A PosteriorTransform (optional).\n X_pending: A `batch_shape x m x d`-dim Tensor of `m` design points that have\n points that have been submitted for function evaluation but have not yet\n been evaluated. Concatenated into X upon forward call. Copied and set to\n have no gradient.\n \"\"\"\n super().__init__(\n model=model,\n sampler=sampler,\n objective=objective,\n posterior_transform=posterior_transform,\n X_pending=X_pending,\n )\n self.beta_prime = math.sqrt(beta * math.pi / 2)\n\n @concatenate_pending_points\n @t_batch_mode_transform()\n def forward(self, X: Tensor) -> Tensor:\n r\"\"\"Evaluate qUpperConfidenceBound on the candidate set `X`.\n\n Args:\n X: A `batch_sahpe x q x d`-dim Tensor of t-batches with `q` `d`-dim design\n points each.\n\n Returns:\n A `batch_shape'`-dim Tensor of Upper Confidence Bound values at the given\n design points `X`, where `batch_shape'` is the broadcasted batch shape of\n model and input `X`.\n \"\"\"\n posterior = self.model.posterior(\n X=X, posterior_transform=self.posterior_transform\n )\n samples = self.sampler(posterior)\n obj = self.objective(samples)\n mean = obj.mean(dim=0)\n ucb_samples = mean + self.beta_prime * (obj - mean).abs()\n return ucb_samples.max(dim=-1)[0].mean(dim=0)" }, { "identifier": "qExpectedImprovement", "path": "utils/acq_func.py", "snippet": "class qExpectedImprovement(MCAcquisitionFunction):\n r\"\"\"MC-based batch Expected Improvement.\n\n This computes qEI by\n (1) sampling the joint posterior over q points\n (2) evaluating the improvement over the current best for each sample\n (3) maximizing over q\n (4) averaging over the samples\n\n `qEI(X) = E(max(max Y - best_f, 0)), Y ~ f(X), where X = (x_1,...,x_q)`\n\n Example:\n >>> model = SingleTaskGP(train_X, train_Y)\n >>> best_f = train_Y.max()[0]\n >>> sampler = SobolQMCNormalSampler(1024)\n >>> qEI = qExpectedImprovement(model, best_f, sampler)\n >>> qei = qEI(test_X)\n \"\"\"\n\n def __init__(\n self,\n model: Model,\n best_f: Union[float, Tensor],\n sampler: Optional[MCSampler] = None,\n objective: Optional[MCAcquisitionObjective] = None,\n posterior_transform: Optional[PosteriorTransform] = None,\n X_pending: Optional[Tensor] = None,\n **kwargs: Any,\n ) -> None:\n r\"\"\"q-Expected Improvement.\n\n Args:\n model: A fitted model.\n best_f: The best objective value observed so far (assumed noiseless). Can be\n a `batch_shape`-shaped tensor, which in case of a batched model\n specifies potentially different values for each element of the batch.\n sampler: The sampler used to draw base samples. Defaults to\n `SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`\n objective: The MCAcquisitionObjective under which the samples are evaluated.\n Defaults to `IdentityMCObjective()`.\n posterior_transform: A PosteriorTransform (optional).\n X_pending: A `m x d`-dim Tensor of `m` design points that have been\n submitted for function evaluation but have not yet been evaluated.\n Concatenated into X upon forward call. Copied and set to have no\n gradient.\n \"\"\"\n super().__init__(\n model=model,\n sampler=sampler,\n objective=objective,\n posterior_transform=posterior_transform,\n X_pending=X_pending,\n )\n self.register_buffer(\"best_f\", torch.as_tensor(best_f, dtype=float))\n\n @concatenate_pending_points\n @t_batch_mode_transform()\n def forward(self, X: Tensor) -> Tensor:\n r\"\"\"Evaluate qExpectedImprovement on the candidate set `X`.\n\n Args:\n X: A `batch_shape x q x d`-dim Tensor of t-batches with `q` `d`-dim design\n points each.\n\n Returns:\n A `batch_shape'`-dim Tensor of Expected Improvement values at the given\n design points `X`, where `batch_shape'` is the broadcasted batch shape of\n model and input `X`.\n \"\"\"\n posterior = self.model.posterior(\n X=X, posterior_transform=self.posterior_transform\n )\n samples = self.sampler(posterior)\n obj = self.objective(samples)\n obj = (obj - self.best_f.unsqueeze(-1).to(obj)).clamp_min(0)\n q_ei = obj.max(dim=-1)[0].mean(dim=0)\n return q_ei" } ]
import numpy as np import pandas as pd import os import torch import torch.nn as nn import torch.nn.functional as F import time from proxy.regression import Regressor, DropoutRegressor, EvidentialRegressor, EnsembleRegressor, GPRegressor from mol_mdp_ext import MolMDPExtended from botorch.utils.multi_objective.box_decompositions.non_dominated import FastNondominatedPartitioning from botorch.utils.multi_objective.hypervolume import Hypervolume from botorch.acquisition.multi_objective.monte_carlo import qExpectedHypervolumeImprovement from botorch.acquisition.multi_objective.analytic import ExpectedHypervolumeImprovement from botorch.acquisition.analytic import UpperConfidenceBound, ExpectedImprovement from botorch.utils.multi_objective.scalarization import get_chebyshev_scalarization from botorch.utils.transforms import normalize, unnormalize from botorch.acquisition.objective import GenericMCObjective from botorch.acquisition.objective import ScalarizedPosteriorTransform from botorch.utils.multi_objective.pareto import is_non_dominated from botorch.sampling.samplers import SobolQMCNormalSampler from sklearn.model_selection import train_test_split from utils.acq_func import qUpperConfidenceBound, qExpectedImprovement from copy import copy, deepcopy
9,858
# from botorch.acquisition.monte_carlo import qUpperConfidenceBound, qExpectedImprovement def make_proxy_model(args, mdp): repr_type = args.proxy_repr_type nemb = args.proxy_nemb num_conv_steps = args.proxy_num_conv_steps model_version = args.proxy_model_version if args.proxy_uncertainty == "none": model = Regressor(args, nhid=nemb, nvec=0, num_out_per_stem=mdp.num_blocks, num_out_per_mol=len(args.objectives), num_conv_steps=num_conv_steps, version=model_version, dropout_rate=args.proxy_dropout) if args.proxy_uncertainty == "dropout": model = DropoutRegressor(args, nhid=nemb, nvec=0, num_out_per_stem=mdp.num_blocks, num_out_per_mol=len(args.objectives), num_conv_steps=num_conv_steps, version=model_version, dropout_rate=args.proxy_dropout, num_dropout_samples=args.proxy_num_dropout_samples) elif args.proxy_uncertainty == 'ensemble': model = EnsembleRegressor(args, nhid=nemb, nvec=0, num_out_per_stem=mdp.num_blocks, num_out_per_mol=len(args.objectives), num_conv_steps=num_conv_steps, version=model_version, dropout_rate=args.proxy_dropout, num_dropout_samples=args.proxy_num_dropout_samples) elif args.proxy_uncertainty == 'evidential': model = EvidentialRegressor(args, nhid=nemb, nvec=0, num_out_per_stem=mdp.num_blocks, num_out_per_mol=len(args.objectives), num_conv_steps=num_conv_steps, version=model_version, dropout_rate=args.proxy_dropout) elif args.proxy_uncertainty == 'GP': model = GPRegressor(args, nhid=nemb, nvec=0, num_out_per_stem=mdp.num_blocks, num_out_per_mol=len(args.objectives), num_conv_steps=num_conv_steps, version=model_version, dropout_rate=args.proxy_dropout) model.to(args.device) if args.floatX == 'float64': model = model.double() return model def get_proxy(args, bpath, oracle): if args.acq_fn.lower() == 'none': return NoAF(args, bpath, oracle) elif args.acq_fn.lower() == 'ucb': return UCB(args, bpath, oracle) elif args.acq_fn.lower() == 'ucb_chebyshev': return UCB_chebyshev(args, bpath, oracle) elif args.acq_fn.lower() == 'ei': return EI(args, bpath, oracle) class Proxy: def __init__(self, args, bpath, oracle): self.args = args self.ref_point = torch.zeros(len(args.objectives)).to(args.device) self.oracle = oracle self.device = args.device
# from botorch.acquisition.monte_carlo import qUpperConfidenceBound, qExpectedImprovement def make_proxy_model(args, mdp): repr_type = args.proxy_repr_type nemb = args.proxy_nemb num_conv_steps = args.proxy_num_conv_steps model_version = args.proxy_model_version if args.proxy_uncertainty == "none": model = Regressor(args, nhid=nemb, nvec=0, num_out_per_stem=mdp.num_blocks, num_out_per_mol=len(args.objectives), num_conv_steps=num_conv_steps, version=model_version, dropout_rate=args.proxy_dropout) if args.proxy_uncertainty == "dropout": model = DropoutRegressor(args, nhid=nemb, nvec=0, num_out_per_stem=mdp.num_blocks, num_out_per_mol=len(args.objectives), num_conv_steps=num_conv_steps, version=model_version, dropout_rate=args.proxy_dropout, num_dropout_samples=args.proxy_num_dropout_samples) elif args.proxy_uncertainty == 'ensemble': model = EnsembleRegressor(args, nhid=nemb, nvec=0, num_out_per_stem=mdp.num_blocks, num_out_per_mol=len(args.objectives), num_conv_steps=num_conv_steps, version=model_version, dropout_rate=args.proxy_dropout, num_dropout_samples=args.proxy_num_dropout_samples) elif args.proxy_uncertainty == 'evidential': model = EvidentialRegressor(args, nhid=nemb, nvec=0, num_out_per_stem=mdp.num_blocks, num_out_per_mol=len(args.objectives), num_conv_steps=num_conv_steps, version=model_version, dropout_rate=args.proxy_dropout) elif args.proxy_uncertainty == 'GP': model = GPRegressor(args, nhid=nemb, nvec=0, num_out_per_stem=mdp.num_blocks, num_out_per_mol=len(args.objectives), num_conv_steps=num_conv_steps, version=model_version, dropout_rate=args.proxy_dropout) model.to(args.device) if args.floatX == 'float64': model = model.double() return model def get_proxy(args, bpath, oracle): if args.acq_fn.lower() == 'none': return NoAF(args, bpath, oracle) elif args.acq_fn.lower() == 'ucb': return UCB(args, bpath, oracle) elif args.acq_fn.lower() == 'ucb_chebyshev': return UCB_chebyshev(args, bpath, oracle) elif args.acq_fn.lower() == 'ei': return EI(args, bpath, oracle) class Proxy: def __init__(self, args, bpath, oracle): self.args = args self.ref_point = torch.zeros(len(args.objectives)).to(args.device) self.oracle = oracle self.device = args.device
self.mdp = MolMDPExtended(bpath)
5
2023-10-24 14:10:35+00:00
12k
SALT-NLP/Efficient_Unlearning
src/models/transformers/parameter-efficient-finetuning/layer.py
[ { "identifier": "AdapterCompositionBlock", "path": "src/models/transformers/parameter-efficient-finetuning/composition.py", "snippet": "class AdapterCompositionBlock(Sequence):\n def __init__(self, *children):\n self.children = [parse_composition(b, None) for b in children]\n\n def __getitem__(self, key):\n return self.children[key]\n\n def __len__(self):\n return len(self.children)\n\n def __eq__(self, o: object) -> bool:\n if isinstance(o, type(self)):\n return all([c1 == c2 for c1, c2 in zip(self.children, o.children)])\n else:\n return False\n\n def __repr__(self):\n child_repr = \", \".join(map(str, self.children))\n return f\"{self.__class__.__name__}[{child_repr}]\"\n\n def first(self):\n if not isinstance(self.children[0], AdapterCompositionBlock):\n return self.children[0]\n else:\n return self.children[0].first()\n\n def last(self):\n if not isinstance(self.children[-1], AdapterCompositionBlock):\n return self.children[-1]\n else:\n return self.children[-1].last()\n\n @property\n def parallel_channels(self):\n return max([b.parallel_channels if isinstance(b, AdapterCompositionBlock) else 1 for b in self.children])\n\n def flatten(self) -> Set[str]:\n return set(itertools.chain(*[[b] if isinstance(b, str) else b.flatten() for b in self.children]))" }, { "identifier": "BatchSplit", "path": "src/models/transformers/parameter-efficient-finetuning/composition.py", "snippet": "class BatchSplit(AdapterCompositionBlock):\n def __init__(self, *split_adapters: List[Union[AdapterCompositionBlock, str]], batch_sizes: Union[List[int], int]):\n super().__init__(*split_adapters)\n self.batch_sizes = batch_sizes if isinstance(batch_sizes, list) else [batch_sizes] * len(split_adapters)" }, { "identifier": "Fuse", "path": "src/models/transformers/parameter-efficient-finetuning/composition.py", "snippet": "class Fuse(AdapterCompositionBlock):\n def __init__(self, *fuse_stacks: List[Union[AdapterCompositionBlock, str]]):\n super().__init__(*fuse_stacks)\n\n # TODO-V2 pull this up to all block classes?\n @property\n def name(self):\n return \",\".join([c if isinstance(c, str) else c.last() for c in self.children])" }, { "identifier": "Parallel", "path": "src/models/transformers/parameter-efficient-finetuning/composition.py", "snippet": "class Parallel(AdapterCompositionBlock):\n def __init__(self, *parallel_adapters: List[str]):\n \"\"\"\n Can be used to perform inference for multiple tasks (i.e., adapters) in parallel (for the same input).\n\n See AdapterDrop https://arxiv.org/abs/2010.11918\n \"\"\"\n super().__init__(*parallel_adapters)\n\n @property\n def parallel_channels(self):\n return len(self.children)" }, { "identifier": "Split", "path": "src/models/transformers/parameter-efficient-finetuning/composition.py", "snippet": "class Split(AdapterCompositionBlock):\n def __init__(self, left: str, right: str, split_index: int):\n super().__init__(left, right)\n assert split_index > 0\n self.left = left\n self.right = right\n self.split_index = split_index" }, { "identifier": "Stack", "path": "src/models/transformers/parameter-efficient-finetuning/composition.py", "snippet": "class Stack(AdapterCompositionBlock):\n def __init__(self, *stack_layers: List[Union[AdapterCompositionBlock, str]]):\n super().__init__(*stack_layers)" }, { "identifier": "AdapterConfig", "path": "src/models/transformers/parameter-efficient-finetuning/configuration.py", "snippet": "class AdapterConfig(AdapterConfigBase):\n \"\"\"\n Base class that models the architecture of an adapter.\n\n Args:\n mh_adapter (:obj:`bool`): If True, add adapter modules after the multi-head attention block of each layer.\n output_adapter (:obj:`bool`): If True, add adapter modules after the output FFN of each layer.\n reduction_factor (:obj:`float` or :obj:`Mapping`):\n Either a scalar float (> 0) specifying the reduction factor for all layers or a mapping specifying the\n reduction_factor for individual layers. If not all layers are represented in the mapping a default value\n should be given e.g. {'1': 8, '6': 32, 'default': 16}. Specifying a reduction factor < 1 will result in an\n up-projection layer.\n non_linearity (:obj:`str`): The activation function to use in the adapter bottleneck.\n original_ln_before (:obj:`bool`, optional):\n If True, apply layer pre-trained normalization and residual connection before the adapter modules. Defaults\n to False. Only applicable if :obj:`is_parallel` is False.\n original_ln_after (:obj:`bool`, optional):\n If True, apply pre-trained layer normalization and residual connection after the adapter modules. Defaults\n to True.\n ln_before (:obj:`bool`, optional): If True, add a new layer normalization before the adapter bottleneck.\n Defaults to False.\n ln_after (:obj:`bool`, optional): If True, add a new layer normalization after the adapter bottleneck.\n Defaults to False.\n init_weights (:obj:`str`, optional): Initialization method for the weights of the adapter modules.\n Currently, this can be either \"bert\" (default) or \"mam_adapter\".\n is_parallel (:obj:`bool`, optional): If True, apply adapter transformations in parallel.\n By default (False), sequential application is used.\n scaling (:obj:`float` or :obj:`str`, optional):\n Scaling factor to use for scaled addition of adapter outputs as done by He et al. (2021). Can bei either a\n constant factor (float) or the string \"learned\", in which case the scaling factor is learned. Defaults to\n 1.0.\n use_gating (:obj:`bool`, optional):\n Place a trainable gating module besides the added parameter module to control module activation. This is\n e.g. used for PEFT. Defaults to False.\n residual_before_ln (:obj:`bool`, optional):\n If True, take the residual connection around the adapter bottleneck before the layer normalization. Only\n applicable if :obj:`original_ln_before` is True.\n adapter_residual_before_ln (:obj:`bool`, optional):\n If True, apply the residual connection around the adapter modules before the new layer normalization within\n the adapter. Only applicable if :obj:`ln_after` is True and :obj:`is_parallel` is False.\n inv_adapter (:obj:`str`, optional):\n If not None (default), add invertible adapter modules after the model embedding layer. Currently, this can\n be either \"nice\" or \"glow\".\n inv_adapter_reduction_factor (:obj:`float`, optional):\n The reduction to use within the invertible adapter modules. Only applicable if :obj:`inv_adapter` is not\n None.\n cross_adapter (:obj:`bool`, optional):\n If True, add adapter modules after the cross attention block of each decoder layer in an encoder-decoder\n model. Defaults to False.\n leave_out (:obj:`List[int]`, optional):\n The IDs of the layers (starting at 0) where NO adapter modules should be added.\n phm_layer (:obj:`bool`, optional): If True the down and up projection layers are a PHMLayer.\n Defaults to False\n phm_dim (:obj:`int`, optional): The dimension of the phm matrix.\n Defaults to None.\n shared_phm_rule (:obj:`bool`, optional): Whether the phm matrix is shared across all layers.\n Defaults to True\n factorized_phm_rule (:obj:`bool`, optional):\n Whether the phm matrix is factorized into a left and right matrix. Defaults to False.\n learn_phm (:obj:`bool`, optional): Whether the phm matrix should be learned during training.\n Defaults to True\n factorized_phm_W (:\n obj:`bool`, optional): Whether the weights matrix is factorized into a left and right matrix. Defaults to\n True\n shared_W_phm (:obj:`bool`, optional): Whether the weights matrix is shared across all layers.\n Defaults to False.\n phm_c_init (:obj:`str`, optional): The initialization function for the weights of the phm matrix.\n The possible values are `[\"normal\", \"uniform\"]`. Defaults to `normal`.\n phm_init_range (:obj:`float`, optional): std for initializing phm weights if `phm_c_init=\"normal\"`.\n Defaults to 0.0001.\n hypercomplex_nonlinearity (:obj:`str`, optional):\n This specifies the distribution to draw the weights in the phm layer from. Defaults to `glorot-uniform`.\n phm_rank (:obj:`int`, optional):\n If the weight matrix is factorized this specifies the rank of the matrix. E.g. the left matrix of the down\n projection has the shape (phm_dim, _in_feats_per_axis, phm_rank) and the right matrix (phm_dim, phm_rank,\n _out_feats_per_axis). Defaults to 1\n phm_bias (:obj:`bool`, optional):\n If True the down and up projection PHMLayer has a bias term. If `phm_layer` is False this is ignored.\n Defaults to True\n \"\"\"\n\n # Required options\n mh_adapter: bool\n output_adapter: bool\n\n reduction_factor: Union[float, Mapping]\n non_linearity: str\n\n # Options with defaults\n original_ln_before: bool = False\n original_ln_after: bool = True\n ln_before: bool = False\n ln_after: bool = False\n init_weights: str = \"bert\"\n is_parallel: bool = False\n scaling: Union[float, str] = 1.0\n use_gating: bool = False\n residual_before_ln: bool = True\n adapter_residual_before_ln: bool = False\n inv_adapter: Optional[str] = None\n inv_adapter_reduction_factor: Optional[float] = None\n cross_adapter: bool = False\n leave_out: List[int] = field(default_factory=list)\n phm_layer: bool = False\n phm_dim: int = 4\n factorized_phm_W: Optional[bool] = True\n shared_W_phm: Optional[bool] = False\n shared_phm_rule: Optional[bool] = True\n factorized_phm_rule: Optional[bool] = False\n phm_c_init: Optional[str] = \"normal\"\n phm_init_range: Optional[float] = 0.0001\n learn_phm: Optional[bool] = True\n hypercomplex_nonlinearity: Optional[str] = \"glorot-uniform\"\n phm_rank: Optional[int] = 1\n phm_bias: Optional[bool] = True\n\n # We want to emulate a simple form of immutability while keeping the ability to add custom attributes.\n # Therefore, we don't allow changing attribute values if set once.\n def __setattr__(self, name, value):\n if name in self.__dict__:\n raise FrozenInstanceError()\n elif name == \"invertible_adapter\":\n # This is for backwards compatibility. In v1, invertible adapters were specified in a nested config dict.\n # Now, we have two config keys directly in the adapter config.\n if value:\n object.__setattr__(self, \"inv_adapter\", value[\"block_type\"])\n object.__setattr__(self, \"inv_adapter_reduction_factor\", value[\"reduction_factor\"])\n else:\n object.__setattr__(self, name, value)" }, { "identifier": "AdapterSetup", "path": "src/models/transformers/parameter-efficient-finetuning/context.py", "snippet": "class AdapterSetup:\n \"\"\"\n Represents an adapter setup of a model including active adapters and active heads. This class is intended to be\n used as a context manager using the ``with`` statement. The setup defined by the ``AdapterSetup`` context will\n override static adapter setups defined in a model (i.e. setups specified via ``active_adapters``).\n\n Example::\n\n with AdapterSetup(Stack(\"a\", \"b\")):\n # will use the adapter stack \"a\" and \"b\" outputs = model(**inputs)\n\n Note that the context manager is thread-local, i.e. it can be used with different setups in a multi-threaded\n environment.\n \"\"\"\n\n # thread-local storage that holds a stack of active contexts\n storage = threading.local()\n\n def __init__(self, adapter_setup, head_setup=None, ignore_empty: bool = False):\n self.adapter_setup = parse_composition(adapter_setup)\n if head_setup:\n self.head_setup = head_setup\n else:\n self.head_setup = parse_heads_from_composition(self.adapter_setup)\n self._empty = ignore_empty and self.adapter_setup is None and self.head_setup is None\n\n def __enter__(self):\n if not self._empty:\n AdapterSetup.get_contexts().append(self)\n return self\n\n def __exit__(self, type, value, traceback):\n if not self._empty:\n AdapterSetup.get_contexts().pop()\n\n @classmethod\n def get_contexts(cls):\n if not hasattr(cls.storage, \"contexts\"):\n cls.storage.contexts = []\n return cls.storage.contexts\n\n @classmethod\n def get_context(cls):\n try:\n return cls.get_contexts()[-1]\n except IndexError:\n return None\n\n @classmethod\n def get_context_adapter_setup(cls):\n context = cls.get_context()\n if context:\n return context.adapter_setup\n return None\n\n @classmethod\n def get_context_head_setup(cls):\n context = cls.get_context()\n if context:\n return context.head_setup\n return None" }, { "identifier": "ForwardContext", "path": "src/models/transformers/parameter-efficient-finetuning/context.py", "snippet": "class ForwardContext:\n \"\"\"\n Holds context information during a forward pass through a model. This class should be used via the\n ``ForwardContext.wrap()`` method.\n\n Note that the context is thread-local.\n \"\"\"\n\n # thread-local storage that holds a stack of active contexts\n storage = threading.local()\n\n context_attributes = [\"adapter_gating_scores\", \"adapter_fusion_attentions\", \"adapter_input_parallelized\"]\n\n def __init__(self, model, *args, **kwargs):\n # If the model has a method ``forward_context()``, use it to create the context.\n if hasattr(model, \"forward_context\"):\n model.forward_context(self, *args, **kwargs)\n\n def __enter__(self):\n ForwardContext.get_contexts().append(self)\n return self\n\n def __exit__(self, type, value, traceback):\n ForwardContext.get_contexts().pop()\n\n @classmethod\n def wrap(cls, f):\n \"\"\"\n Decorator method that wraps a ``forward()`` function of a model class.\n \"\"\"\n\n @functools.wraps(f)\n def wrapper_func(self, *args, **kwargs):\n if self.config.adapters is not None:\n with cls(self, *args, **kwargs) as ctx:\n kwargs = {\n k: v for k, v in kwargs.items() if k.replace(\"output_\", \"\") not in cls.context_attributes\n }\n results = f(self, *args, **kwargs)\n\n # append output attributes\n if isinstance(results, tuple):\n for attr in cls.context_attributes:\n if getattr(ctx, \"output_\" + attr, False):\n results = results + (dict(getattr(ctx, attr)),)\n else:\n for attr in cls.context_attributes:\n if getattr(ctx, \"output_\" + attr, False):\n results[attr] = dict(getattr(ctx, attr))\n return results\n else:\n return f(self, *args, **kwargs)\n\n return wrapper_func\n\n @classmethod\n def get_contexts(cls):\n if not hasattr(cls.storage, \"contexts\"):\n cls.storage.contexts = []\n return cls.storage.contexts\n\n @classmethod\n def get_context(cls):\n try:\n return cls.get_contexts()[-1]\n except IndexError:\n return None" }, { "identifier": "Adapter", "path": "src/models/transformers/parameter-efficient-finetuning/modeling.py", "snippet": "class Adapter(nn.Module):\n \"\"\"\n Implementation of a sequential bottleneck adapter block.\n \"\"\"\n\n def __init__(\n self,\n adapter_name,\n input_size,\n down_sample,\n config: AdapterConfig,\n ):\n super().__init__()\n self.name = adapter_name\n self.input_size = input_size\n self.add_layer_norm_before = config[\"ln_before\"]\n self.add_layer_norm_after = config[\"ln_after\"]\n self.adapter_residual_before_ln = config[\"adapter_residual_before_ln\"]\n self.use_gating = config[\"use_gating\"]\n\n # Params related to input & output of adapter\n self.residual_before_ln = config[\"residual_before_ln\"]\n self.original_ln_before = config[\"original_ln_before\"]\n self.original_ln_after = config[\"original_ln_after\"]\n\n # list for all modules of the adapter, passed into nn.Sequential()\n seq_list = []\n\n # If we want to have a layer norm on input, we add it to seq_list\n if self.add_layer_norm_before:\n self.adapter_norm_before = nn.LayerNorm(self.input_size)\n seq_list.append(self.adapter_norm_before)\n\n # if a downsample size is not passed, we just half the size of the original input\n self.down_sample = down_sample\n if down_sample is None:\n self.down_sample = self.input_size // 2\n\n # ensure that the down sample size is at least 1\n if self.down_sample < 1:\n self.down_sample = 1\n\n if config[\"phm_layer\"]:\n # Linear down projection of the input\n seq_list.append(PHMLayer(adapter_name, self.input_size, self.down_sample, \"down\", config))\n else:\n seq_list.append(nn.Linear(self.input_size, self.down_sample))\n\n # select non-linearity\n self.non_linearity = Activation_Function_Class(config[\"non_linearity\"].lower())\n\n seq_list.append(self.non_linearity)\n\n # sequential adapter, first downproject, then non-linearity then upsample. In the forward pass we include the\n # residual connection\n self.adapter_down = nn.Sequential(*seq_list)\n\n # Up projection to input size\n if config[\"phm_layer\"]:\n # Linear down projection of the input\n self.adapter_up = PHMLayer(adapter_name, self.down_sample, self.input_size, \"up\", config)\n else:\n self.adapter_up = nn.Linear(self.down_sample, self.input_size)\n\n # Additional scaling factor (from He et al. (2021))\n if isinstance(config[\"scaling\"], float):\n self.scaling = config[\"scaling\"]\n elif config[\"scaling\"] == \"learned\":\n self.scaling = nn.Parameter(torch.ones(1))\n else:\n raise ValueError(\"Unknown scaling type: {}\".format(config[\"scaling\"]))\n\n # If we want to have a layer norm on output, we apply it later after a separate residual connection\n # This means that we learn a new output layer norm, which replaces another layer norm learned in the bert layer\n if self.add_layer_norm_after:\n self.adapter_norm_after = nn.LayerNorm(self.input_size)\n\n if self.use_gating:\n self.gate = nn.Linear(self.input_size, 1)\n\n # if we want to initialize with the bert strategy then this function is called for all the linear layers\n if config[\"init_weights\"] == \"bert\":\n self.adapter_down.apply(self.init_bert_weights)\n self.adapter_up.apply(self.init_bert_weights)\n if self.use_gating:\n self.gate.apply(self.init_bert_weights)\n elif config[\"init_weights\"] == \"mam_adapter\":\n with torch.no_grad():\n nn.init.kaiming_uniform_(self.adapter_down[0].weight, a=math.sqrt(5))\n nn.init.zeros_(self.adapter_up.weight)\n nn.init.zeros_(self.adapter_down[0].bias)\n nn.init.zeros_(self.adapter_up.bias)\n if self.use_gating:\n self.gate.apply(self.init_bert_weights)\n else:\n raise ValueError(\"Unknown init_weights type: {}\".format(config[\"init_weights\"]))\n\n def pre_forward(\n self,\n hidden_states,\n input_tensor,\n layer_norm,\n fusion_config=None,\n ):\n \"\"\"\n Retrieves the hidden_states, query (for Fusion), and residual connection according to the set configuration.\n\n Args:\n adapter_config: config file according to what the parameters are passed\n hidden_states: output of previous layer\n input_tensor: residual connection before FFN\n\n Returns: hidden_states, query, residual\n\n \"\"\"\n query = None\n\n if self.residual_before_ln:\n residual = hidden_states\n\n if fusion_config is not None and fusion_config[\"query_before_ln\"]:\n query = hidden_states\n\n if self.original_ln_before:\n if layer_norm:\n hidden_states = layer_norm(hidden_states + input_tensor)\n else:\n hidden_states = hidden_states + input_tensor\n\n if not self.residual_before_ln:\n residual = hidden_states\n\n if fusion_config is not None and not fusion_config[\"query_before_ln\"]:\n query = hidden_states\n\n return hidden_states, query, residual\n\n def forward(self, x, residual_input, output_gating=False):\n down = self.adapter_down(x)\n\n up = self.adapter_up(down)\n up = up * self.scaling\n output = up\n\n if self.use_gating:\n # x.shape = (batch_size, seq_len, hidden_size)\n gate = torch.sigmoid(self.gate(x))\n gate = torch.mean(gate, dim=1).unsqueeze(-1)\n output = output * gate\n\n # apply residual connection before layer norm if configured in this way\n if self.adapter_residual_before_ln:\n output = output + residual_input\n\n # apply layer norm if available\n if self.add_layer_norm_after:\n output = self.adapter_norm_after(output)\n\n # if residual should be applied after layer norm, apply it here\n if not self.adapter_residual_before_ln:\n output = output + residual_input\n\n if self.use_gating and output_gating:\n return output, down, up, gate\n return output, down, up\n\n def post_forward(self, hidden_states, input_hidden_states, input_tensor, layer_norm):\n \"\"\"\n Performs computations after the forward pass of the adapter block(s). This e.g. includes applying the residual\n connection and layer norm if configured in this way.\n\n Args:\n hidden_states: The hidden states outputted by the adapter block(s).\n input_hidden_states: Residual connection before the adapter block(s).\n input_tensor: Residual connection before the Transformer FFN/ attention layer.\n layer_norm: Transformer LayerNorm.\n\n Returns:\n The modified hidden states.\n \"\"\"\n if self.original_ln_after:\n if layer_norm:\n hidden_states = layer_norm(hidden_states + input_tensor)\n else:\n hidden_states = hidden_states + input_tensor\n\n return hidden_states\n\n # This is copied from the BertPreTrainedModel class to make this a self containing class.\n @staticmethod\n def init_bert_weights(module):\n \"\"\"Initialize the weights.\"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # std defaults to 0.02, this might need to be changed\n module.weight.data.normal_(mean=0.0, std=0.02)\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()" }, { "identifier": "BertFusion", "path": "src/models/transformers/parameter-efficient-finetuning/modeling.py", "snippet": "class BertFusion(nn.Module):\n \"\"\"\n Implementation of an AdapterFusion block.\n \"\"\"\n\n def __init__(\n self,\n config: AdapterFusionConfig,\n dense_size,\n attention_probs_dropout_prob,\n ):\n super(BertFusion, self).__init__()\n # if config.hidden_size % config.num_attention_heads != 0:\n # raise ValueError(\n # \"The hidden size (%d) is not a multiple of the number of attention \"\n # \"heads (%d)\" % (config.hidden_size, config.num_attention_heads))\n self.config = config\n\n self.dense_size = dense_size\n self.dropout = nn.Dropout(attention_probs_dropout_prob)\n\n if not self.config[\"query\"] and not self.config[\"key\"] and not self.config[\"value\"]:\n self.dense = nn.Linear(self.dense_size, 1)\n\n if self.config[\"query\"]:\n self.query = nn.Linear(self.dense_size, self.dense_size)\n self.query.apply(Adapter.init_bert_weights)\n\n if self.config[\"key\"]:\n self.key = nn.Linear(self.dense_size, self.dense_size)\n self.key.apply(Adapter.init_bert_weights)\n\n if self.config[\"value\"]:\n self.value = nn.Linear(self.dense_size, self.dense_size, bias=False)\n self.value.apply(Adapter.init_bert_weights)\n if self.config[\"value_initialized\"]:\n self.value.weight.data = (torch.zeros(self.dense_size, self.dense_size) + 0.000001).fill_diagonal_(1.0)\n\n if self.config[\"temperature\"]:\n self.T = 50.0\n else:\n self.T = 1.0\n self.reduction = self.T / 1000.0\n\n def forward(self, query, key, value, residual, output_attentions: bool = False):\n\n if self.config[\"residual_before\"]:\n value += residual[:, :, None, :].repeat(1, 1, value.size(2), 1)\n\n if self.config[\"query\"]:\n query_layer = self.query(query)\n else:\n query_layer = query\n\n if self.config[\"key\"]:\n key_layer = self.key(key)\n else:\n key_layer = key\n\n if self.config[\"value\"] and self.config[\"value_before_softmax\"]:\n # key/value have dims => batch, toks, number-of-adapters, feats\n value_layer = self.value(value)\n else:\n value_layer = value\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.squeeze(torch.matmul(query_layer.unsqueeze(2), key_layer.transpose(-2, -1)), dim=2)\n\n attention_scores = self.dropout(attention_scores)\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.Softmax(dim=-1)(attention_scores / self.T)\n self.T = max(self.T - self.reduction, 1.0)\n\n context_layer = torch.squeeze(torch.matmul(attention_probs.unsqueeze(2), value_layer), dim=2)\n\n if self.config[\"value\"] and not self.config[\"value_before_softmax\"]:\n # key/value have dims => batch, toks, number-of-adapters, feats\n context_layer = self.value(context_layer)\n else:\n context_layer = context_layer\n\n if not self.config[\"residual_before\"]:\n context_layer += residual\n\n if output_attentions:\n attention_probs = attention_probs.detach().cpu().numpy()\n return context_layer, attention_probs\n else:\n return context_layer" }, { "identifier": "ParallelAdapter", "path": "src/models/transformers/parameter-efficient-finetuning/modeling.py", "snippet": "class ParallelAdapter(Adapter):\n \"\"\"\n Implementation of a parallel bottleneck adapter block.\n \"\"\"\n\n def __init__(self, adapter_name, input_size, down_sample, config: AdapterConfig):\n super().__init__(adapter_name, input_size, down_sample, config)\n\n def pre_forward(\n self,\n hidden_states,\n input_tensor,\n layer_norm,\n fusion_config=None,\n ):\n \"\"\"\n Retrieves the hidden_states, query (for Fusion), and residual connection according to the set configuration.\n\n Args:\n adapter_config: config file according to what the parameters are passed\n hidden_states: output of previous layer\n input_tensor: residual connection before FFN\n\n Returns: hidden_states, query, residual\n\n \"\"\"\n # In case of parallel adapter, return the input tensor as hidden states\n query = None\n if fusion_config is not None:\n query = input_tensor\n return input_tensor, query, input_tensor\n\n def forward(self, x, residual_input, output_gating=False):\n down = self.adapter_down(x)\n\n up = self.adapter_up(down)\n up = up * self.scaling\n\n output = up\n\n if self.use_gating:\n # x.shape = (batch_size, seq_len, hidden_size)\n gate = torch.sigmoid(self.gate(x))\n gate = torch.mean(gate, dim=1).unsqueeze(-1)\n output = output * gate\n\n # apply layer norm if available\n if self.add_layer_norm_after:\n output = self.adapter_norm_after(output)\n\n if self.use_gating and output_gating:\n return output, down, up, gate\n return output, down, up\n\n def post_forward(self, hidden_states, input_hidden_states, input_tensor, layer_norm):\n \"\"\"\n Performs computations after the forward pass of the adapter block(s). This e.g. includes applying the residual\n connection and layer norm if configured in this way.\n\n Args:\n hidden_states: The hidden states outputted by the adapter block(s).\n input_hidden_states: Residual connection before the adapter block(s).\n input_tensor: Residual connection before the Transformer FFN/ attention layer.\n layer_norm: Transformer LayerNorm.\n\n Returns:\n The modified hidden states.\n \"\"\"\n hidden_states = hidden_states + input_hidden_states\n\n if self.original_ln_after:\n if layer_norm:\n hidden_states = layer_norm(hidden_states + input_tensor)\n else:\n hidden_states = hidden_states + input_tensor\n\n return hidden_states" } ]
from abc import ABC, abstractmethod from typing import List, Mapping, Union from torch import nn from .composition import AdapterCompositionBlock, BatchSplit, Fuse, Parallel, Split, Stack from .configuration import AdapterConfig from .context import AdapterSetup, ForwardContext from .modeling import Adapter, BertFusion, ParallelAdapter import numpy as np import torch
8,171
class AdapterLayerBase(ABC): """ Base class for all adaptation methods that require per-layer modules. """ @property def layer_idx(self): return getattr(self, "_layer_idx", -1) @layer_idx.setter def layer_idx(self, layer_idx): idx = getattr(self, "_layer_idx", layer_idx) assert idx == layer_idx setattr(self, "_layer_idx", idx) def get_active_setup(self, module_dict): if getattr(self.config, "is_adaptable", False): # First check current context before falling back to defined setup context = AdapterSetup.get_context() if context is not None: adapter_setup = context.adapter_setup else: adapter_setup = self.config.adapters.active_setup else: adapter_setup = None skip_adapters = adapter_setup is None or ( self.config.adapters.skip_layers is not None and self.layer_idx in self.config.adapters.skip_layers ) if not skip_adapters and (len(set(module_dict.keys()) & adapter_setup.flatten()) > 0): return adapter_setup else: return None def _store_gating_score(self, adapter_name, gating_score): context = ForwardContext.get_context() if context.output_adapter_gating_scores: gating_cache = context.adapter_gating_scores if self.layer_idx not in gating_cache[adapter_name]: gating_cache[adapter_name][self.layer_idx] = {} gating_score = gating_score.detach().squeeze().cpu().numpy() if len(gating_score.shape) == 0: gating_score = np.expand_dims(gating_score, axis=0) cache_score = gating_cache[adapter_name][self.layer_idx].get(self.location_key, None) if cache_score is not None: gating_cache[adapter_name][self.layer_idx][self.location_key] = np.column_stack( (cache_score, gating_score) ) else: gating_cache[adapter_name][self.layer_idx][self.location_key] = gating_score def _store_fusion_attentions(self, fusion_name, attentions): context = ForwardContext.get_context() if context.output_adapter_fusion_attentions: attention_cache = context.adapter_fusion_attentions if self.layer_idx not in attention_cache[fusion_name]: attention_cache[fusion_name][self.layer_idx] = {} attention_cache[fusion_name][self.layer_idx][self.location_key] = attentions @abstractmethod def add_adapter(self, adapter_name: str, layer_idx: int): raise NotImplementedError() @abstractmethod def delete_adapter(self, adapter_name: str): raise NotImplementedError() @abstractmethod def add_fusion_layer(self, adapter_names: Union[List, str]): raise NotImplementedError() @abstractmethod def delete_fusion_layer(self, adapter_names: Union[List, str]): raise NotImplementedError() @abstractmethod def enable_adapters(self, adapter_setup: AdapterCompositionBlock, unfreeze_adapters: bool, unfreeze_fusion: bool): raise NotImplementedError() @abstractmethod def get_adapter(self, adapter_name: str) -> nn.Module: raise NotImplementedError() class AdapterLayer(AdapterLayerBase, nn.Module): def __init__(self, location_key: str, config): super().__init__() self.location_key = location_key self.config = config def _init_adapter_modules(self): self.adapters = nn.ModuleDict(dict()) self.adapter_fusion_layer = nn.ModuleDict(dict()) def add_adapter(self, adapter_name: str, layer_idx: int): self.layer_idx = layer_idx adapter_config = self.config.adapters.match( adapter_name, config_type=AdapterConfig, layer_idx=self.layer_idx, location_key=self.location_key, ) if adapter_config is not None: reduction_factor = adapter_config["reduction_factor"] if isinstance(reduction_factor, Mapping): if str(self.layer_idx) in reduction_factor: reduction_factor = reduction_factor[str(self.layer_idx)] elif "default" in reduction_factor: reduction_factor = reduction_factor["default"] else: raise KeyError( "The given reduction factor mapping does not give a default value and does not specify each " "reduction factor individually. You need to provide a default value like this: " '{"1": 16, "default": 16}' ) if adapter_config.is_parallel:
class AdapterLayerBase(ABC): """ Base class for all adaptation methods that require per-layer modules. """ @property def layer_idx(self): return getattr(self, "_layer_idx", -1) @layer_idx.setter def layer_idx(self, layer_idx): idx = getattr(self, "_layer_idx", layer_idx) assert idx == layer_idx setattr(self, "_layer_idx", idx) def get_active_setup(self, module_dict): if getattr(self.config, "is_adaptable", False): # First check current context before falling back to defined setup context = AdapterSetup.get_context() if context is not None: adapter_setup = context.adapter_setup else: adapter_setup = self.config.adapters.active_setup else: adapter_setup = None skip_adapters = adapter_setup is None or ( self.config.adapters.skip_layers is not None and self.layer_idx in self.config.adapters.skip_layers ) if not skip_adapters and (len(set(module_dict.keys()) & adapter_setup.flatten()) > 0): return adapter_setup else: return None def _store_gating_score(self, adapter_name, gating_score): context = ForwardContext.get_context() if context.output_adapter_gating_scores: gating_cache = context.adapter_gating_scores if self.layer_idx not in gating_cache[adapter_name]: gating_cache[adapter_name][self.layer_idx] = {} gating_score = gating_score.detach().squeeze().cpu().numpy() if len(gating_score.shape) == 0: gating_score = np.expand_dims(gating_score, axis=0) cache_score = gating_cache[adapter_name][self.layer_idx].get(self.location_key, None) if cache_score is not None: gating_cache[adapter_name][self.layer_idx][self.location_key] = np.column_stack( (cache_score, gating_score) ) else: gating_cache[adapter_name][self.layer_idx][self.location_key] = gating_score def _store_fusion_attentions(self, fusion_name, attentions): context = ForwardContext.get_context() if context.output_adapter_fusion_attentions: attention_cache = context.adapter_fusion_attentions if self.layer_idx not in attention_cache[fusion_name]: attention_cache[fusion_name][self.layer_idx] = {} attention_cache[fusion_name][self.layer_idx][self.location_key] = attentions @abstractmethod def add_adapter(self, adapter_name: str, layer_idx: int): raise NotImplementedError() @abstractmethod def delete_adapter(self, adapter_name: str): raise NotImplementedError() @abstractmethod def add_fusion_layer(self, adapter_names: Union[List, str]): raise NotImplementedError() @abstractmethod def delete_fusion_layer(self, adapter_names: Union[List, str]): raise NotImplementedError() @abstractmethod def enable_adapters(self, adapter_setup: AdapterCompositionBlock, unfreeze_adapters: bool, unfreeze_fusion: bool): raise NotImplementedError() @abstractmethod def get_adapter(self, adapter_name: str) -> nn.Module: raise NotImplementedError() class AdapterLayer(AdapterLayerBase, nn.Module): def __init__(self, location_key: str, config): super().__init__() self.location_key = location_key self.config = config def _init_adapter_modules(self): self.adapters = nn.ModuleDict(dict()) self.adapter_fusion_layer = nn.ModuleDict(dict()) def add_adapter(self, adapter_name: str, layer_idx: int): self.layer_idx = layer_idx adapter_config = self.config.adapters.match( adapter_name, config_type=AdapterConfig, layer_idx=self.layer_idx, location_key=self.location_key, ) if adapter_config is not None: reduction_factor = adapter_config["reduction_factor"] if isinstance(reduction_factor, Mapping): if str(self.layer_idx) in reduction_factor: reduction_factor = reduction_factor[str(self.layer_idx)] elif "default" in reduction_factor: reduction_factor = reduction_factor["default"] else: raise KeyError( "The given reduction factor mapping does not give a default value and does not specify each " "reduction factor individually. You need to provide a default value like this: " '{"1": 16, "default": 16}' ) if adapter_config.is_parallel:
adapter_class = ParallelAdapter
11
2023-10-18 18:05:54+00:00
12k
justincui03/tesla
distill.py
[ { "identifier": "augment", "path": "utils.py", "snippet": "def augment(images, dc_aug_param, device):\n # This can be sped up in the future.\n\n if dc_aug_param != None and dc_aug_param['strategy'] != 'none':\n scale = dc_aug_param['scale']\n crop = dc_aug_param['crop']\n rotate = dc_aug_param['rotate']\n noise = dc_aug_param['noise']\n strategy = dc_aug_param['strategy']\n\n shape = images.shape\n mean = []\n for c in range(shape[1]):\n mean.append(float(torch.mean(images[:,c])))\n\n def cropfun(i):\n im_ = torch.zeros(shape[1],shape[2]+crop*2,shape[3]+crop*2, dtype=torch.float, device=device)\n for c in range(shape[1]):\n im_[c] = mean[c]\n im_[:, crop:crop+shape[2], crop:crop+shape[3]] = images[i]\n r, c = np.random.permutation(crop*2)[0], np.random.permutation(crop*2)[0]\n images[i] = im_[:, r:r+shape[2], c:c+shape[3]]\n\n def scalefun(i):\n h = int((np.random.uniform(1 - scale, 1 + scale)) * shape[2])\n w = int((np.random.uniform(1 - scale, 1 + scale)) * shape[2])\n tmp = F.interpolate(images[i:i + 1], [h, w], )[0]\n mhw = max(h, w, shape[2], shape[3])\n im_ = torch.zeros(shape[1], mhw, mhw, dtype=torch.float, device=device)\n r = int((mhw - h) / 2)\n c = int((mhw - w) / 2)\n im_[:, r:r + h, c:c + w] = tmp\n r = int((mhw - shape[2]) / 2)\n c = int((mhw - shape[3]) / 2)\n images[i] = im_[:, r:r + shape[2], c:c + shape[3]]\n\n def rotatefun(i):\n im_ = scipyrotate(images[i].cpu().data.numpy(), angle=np.random.randint(-rotate, rotate), axes=(-2, -1), cval=np.mean(mean))\n r = int((im_.shape[-2] - shape[-2]) / 2)\n c = int((im_.shape[-1] - shape[-1]) / 2)\n images[i] = torch.tensor(im_[:, r:r + shape[-2], c:c + shape[-1]], dtype=torch.float, device=device)\n\n def noisefun(i):\n images[i] = images[i] + noise * torch.randn(shape[1:], dtype=torch.float, device=device)\n\n\n augs = strategy.split('_')\n\n for i in range(shape[0]):\n choice = np.random.permutation(augs)[0] # randomly implement one augmentation\n if choice == 'crop':\n cropfun(i)\n elif choice == 'scale':\n scalefun(i)\n elif choice == 'rotate':\n rotatefun(i)\n elif choice == 'noise':\n noisefun(i)\n\n return images" }, { "identifier": "get_dataset", "path": "utils.py", "snippet": "def get_dataset(dataset, data_path, batch_size=1, args=None):\n\n class_map = None\n loader_train_dict = None\n class_map_inv = None\n\n if dataset == 'CIFAR10':\n channel = 3\n im_size = (32, 32)\n num_classes = 10\n mean = [0.4914, 0.4822, 0.4465]\n std = [0.2023, 0.1994, 0.2010]\n if args.zca:\n transform = transforms.Compose([transforms.ToTensor()])\n else:\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])\n dst_train = datasets.CIFAR10(data_path, train=True, download=True, transform=transform) # no augmentation\n dst_test = datasets.CIFAR10(data_path, train=False, download=True, transform=transform)\n class_names = dst_train.classes\n class_map = {x:x for x in range(num_classes)}\n\n\n elif dataset == 'Tiny':\n channel = 3\n im_size = (64, 64)\n num_classes = 200\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n if args.zca:\n transform = transforms.Compose([transforms.ToTensor()])\n else:\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])\n dst_train = datasets.ImageFolder(os.path.join(data_path, \"train\"), transform=transform) # no augmentation\n dst_test = datasets.ImageFolder(os.path.join(data_path, \"val\", \"images\"), transform=transform)\n class_names = dst_train.classes\n class_map = {x:x for x in range(num_classes)}\n\n\n elif dataset == 'ImageNet':\n channel = 3\n im_size = (64, 64)\n # im_size = (128, 128)\n # data_path = '/home/justincui/data/' + str(im_size[0])\n num_classes = 1000\n data_path = '/nfs/data/justincui/data/imagenet2012/' + str(im_size[0])\n\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n\n data_transforms = {\n 'train': transforms.Compose([\n # transforms.Resize(im_size),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n 'val': transforms.Compose([\n # transforms.Resize(im_size),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n }\n\n dst_train = datasets.ImageFolder(os.path.join(data_path, \"train\"), transform=data_transforms['train']) # no augmentation\n dst_test = datasets.ImageFolder(os.path.join(data_path, \"val\"), transform=data_transforms['val'])\n class_names = dst_train.classes\n class_map = {x:x for x in range(num_classes)}\n\n elif dataset.startswith('CIFAR100'):\n channel = 3\n im_size = (32, 32)\n num_classes = 100\n mean = [0.4914, 0.4822, 0.4465]\n std = [0.2023, 0.1994, 0.2010]\n\n if args.zca:\n transform = transforms.Compose([transforms.ToTensor()])\n else:\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std), transforms.Resize(im_size)])\n dst_train = datasets.CIFAR100(data_path, train=True, download=True, transform=transform) # no augmentation\n dst_test = datasets.CIFAR100(data_path, train=False, download=True, transform=transform)\n class_names = dst_train.classes\n class_map = {x: x for x in range(num_classes)}\n\n else:\n exit('unknown dataset: %s'%dataset)\n\n if args.zca:\n images = []\n labels = []\n print(\"Train ZCA\")\n for i in tqdm.tqdm(range(len(dst_train))):\n im, lab = dst_train[i]\n images.append(im)\n labels.append(lab)\n images = torch.stack(images, dim=0).to(args.device)\n labels = torch.tensor(labels, dtype=torch.long, device=\"cpu\")\n zca = K.enhance.ZCAWhitening(eps=0.1, compute_inv=True)\n zca.fit(images)\n zca_images = zca(images).to(\"cpu\")\n dst_train = TensorDataset(zca_images, labels)\n\n images = []\n labels = []\n print(\"Test ZCA\")\n for i in tqdm.tqdm(range(len(dst_test))):\n im, lab = dst_test[i]\n images.append(im)\n labels.append(lab)\n images = torch.stack(images, dim=0).to(args.device)\n labels = torch.tensor(labels, dtype=torch.long, device=\"cpu\")\n\n zca_images = zca(images).to(\"cpu\")\n dst_test = TensorDataset(zca_images, labels)\n\n args.zca_trans = zca\n\n\n testloader = torch.utils.data.DataLoader(dst_test, batch_size=128, shuffle=False, num_workers=2)\n\n\n return channel, im_size, num_classes, class_names, mean, std, dst_train, dst_test, testloader, loader_train_dict, class_map, class_map_inv" }, { "identifier": "get_network", "path": "utils.py", "snippet": "def get_network(model, channel, num_classes, im_size=(32, 32), dist=True):\n torch.random.manual_seed(int(time.time() * 1000) % 100000)\n net_width, net_depth, net_act, net_norm, net_pooling = get_default_convnet_setting()\n\n if model == 'ConvNet':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n elif model == 'ConvNetD1':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=1, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n elif model == 'ConvNetD2':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=2, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n elif model == 'ConvNetD3':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=3, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n elif model == 'ConvNetD4':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=4, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n elif model == 'ConvNetD5':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=5, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n elif model == 'ConvNetD6':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=6, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n elif model == 'ConvNetD7':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=7, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n elif model == 'ConvNetD8':\n net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=8, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)\n\n else:\n net = None\n exit('DC error: unknown model')\n\n if dist:\n gpu_num = torch.cuda.device_count()\n if gpu_num>0:\n device = 'cuda'\n if gpu_num>1:\n net = nn.DataParallel(net)\n else:\n device = 'cpu'\n net = net.to(device)\n\n return net" }, { "identifier": "get_eval_pool", "path": "utils.py", "snippet": "def get_eval_pool(eval_mode, model, model_eval):\n if eval_mode == 'M': # multiple architectures\n # model_eval_pool = ['MLP', 'ConvNet', 'AlexNet', 'VGG11', 'ResNet18', 'LeNet']\n model_eval_pool = ['ConvNet', 'AlexNet', 'VGG11', 'ResNet18_AP', 'ResNet18']\n # model_eval_pool = ['MLP', 'ConvNet', 'AlexNet', 'VGG11', 'ResNet18']\n elif eval_mode == 'W': # ablation study on network width\n model_eval_pool = ['ConvNetW32', 'ConvNetW64', 'ConvNetW128', 'ConvNetW256']\n elif eval_mode == 'D': # ablation study on network depth\n model_eval_pool = ['ConvNetD1', 'ConvNetD2', 'ConvNetD3', 'ConvNetD4']\n elif eval_mode == 'A': # ablation study on network activation function\n model_eval_pool = ['ConvNetAS', 'ConvNetAR', 'ConvNetAL']\n elif eval_mode == 'P': # ablation study on network pooling layer\n model_eval_pool = ['ConvNetNP', 'ConvNetMP', 'ConvNetAP']\n elif eval_mode == 'N': # ablation study on network normalization layer\n model_eval_pool = ['ConvNetNN', 'ConvNetBN', 'ConvNetLN', 'ConvNetIN', 'ConvNetGN']\n elif eval_mode == 'S': # itself\n model_eval_pool = [model[:model.index('BN')]] if 'BN' in model else [model]\n elif eval_mode == 'C':\n model_eval_pool = [model, 'ConvNet']\n else:\n model_eval_pool = [model_eval]\n return model_eval_pool" }, { "identifier": "evaluate_synset", "path": "utils.py", "snippet": "def evaluate_synset(it_eval, net, images_train, labels_train, testloader, args, return_loss=False, texture=False):\n net = net.to(args.device)\n images_train = images_train.to(args.device)\n labels_train = labels_train.to(args.device)\n lr = float(args.lr_net)\n Epoch = int(args.epoch_eval_train)\n lr_schedule = [Epoch//2+1]\n optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=0.0005)\n\n criterion = nn.CrossEntropyLoss().to(args.device)\n\n dst_train = TensorDataset(images_train, labels_train)\n trainloader = torch.utils.data.DataLoader(dst_train, batch_size=args.batch_train, shuffle=True, num_workers=0)\n\n start = time.time()\n acc_train_list = []\n loss_train_list = []\n\n for ep in tqdm.tqdm(range(Epoch+1)):\n loss_train, acc_train = epoch('train', trainloader, net, optimizer, criterion, args, aug=True, texture=texture)\n acc_train_list.append(acc_train)\n loss_train_list.append(loss_train)\n if ep == Epoch:\n with torch.no_grad():\n loss_test, acc_test = epoch('test', testloader, net, optimizer, criterion, args, aug=False)\n if ep in lr_schedule:\n lr *= 0.1\n optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=0.0005)\n\n\n time_train = time.time() - start\n\n print('%s Evaluate_%02d: epoch = %04d train time = %d s train loss = %.6f train acc = %.4f, test acc = %.4f' % (get_time(), it_eval, Epoch, int(time_train), loss_train, acc_train, acc_test))\n\n if return_loss:\n return net, acc_train_list, acc_test, loss_train_list, loss_test\n else:\n return net, acc_train_list, acc_test" }, { "identifier": "get_time", "path": "utils.py", "snippet": "def get_time():\n return str(time.strftime(\"[%Y-%m-%d %H:%M:%S]\", time.localtime()))" }, { "identifier": "DiffAugment", "path": "utils.py", "snippet": "def DiffAugment(x, strategy='', seed = -1, param = None):\n if seed == -1:\n param.batchmode = False\n else:\n param.batchmode = True\n\n param.latestseed = seed\n\n if strategy == 'None' or strategy == 'none':\n return x\n\n if strategy:\n if param.aug_mode == 'M': # original\n for p in strategy.split('_'):\n for f in AUGMENT_FNS[p]:\n x = f(x, param)\n elif param.aug_mode == 'S':\n pbties = strategy.split('_')\n set_seed_DiffAug(param)\n p = pbties[torch.randint(0, len(pbties), size=(1,)).item()]\n for f in AUGMENT_FNS[p]:\n x = f(x, param)\n else:\n exit('Error ZH: unknown augmentation mode.')\n x = x.contiguous()\n return x" }, { "identifier": "DiffAugmentList", "path": "utils.py", "snippet": "def DiffAugmentList(x_list, strategy='', seed = -1, param = None):\n if seed == -1:\n param.batchmode = False\n else:\n param.batchmode = True\n\n param.latestseed = seed\n\n if strategy == 'None' or strategy == 'none':\n return x\n\n if strategy:\n if param.aug_mode == 'M': # original\n for p in strategy.split('_'):\n for f in AUGMENT_FNS[p]:\n for x in x_list:\n x = f(x, param)\n elif param.aug_mode == 'S':\n pbties = strategy.split('_')\n set_seed_DiffAug(param)\n p = pbties[torch.randint(0, len(pbties), size=(1,)).item()]\n for f in AUGMENT_FNS[p]:\n for x in x_list:\n x = f(x, param)\n else:\n exit('Error ZH: unknown augmentation mode.')\n for x in x_list:\n x = x.contiguous()\n return x_list" }, { "identifier": "ParamDiffAug", "path": "utils.py", "snippet": "class ParamDiffAug():\n def __init__(self):\n self.aug_mode = 'S' #'multiple or single'\n self.prob_flip = 0.5\n self.ratio_scale = 1.2\n self.ratio_rotate = 15.0\n self.ratio_crop_pad = 0.125\n self.ratio_cutout = 0.5 # the size would be 0.5x0.5\n self.ratio_noise = 0.05\n self.brightness = 1.0\n self.saturation = 2.0\n self.contrast = 0.5" }, { "identifier": "ReparamModule", "path": "reparam_module.py", "snippet": "class ReparamModule(nn.Module):\n def _get_module_from_name(self, mn):\n if mn == '':\n return self\n m = self\n for p in mn.split('.'):\n m = getattr(m, p)\n return m\n\n def __init__(self, module):\n super(ReparamModule, self).__init__()\n self.module = module\n\n param_infos = [] # (module name/path, param name)\n shared_param_memo = {}\n shared_param_infos = [] # (module name/path, param name, src module name/path, src param_name)\n params = []\n param_numels = []\n param_shapes = []\n for mn, m in self.named_modules():\n for n, p in m.named_parameters(recurse=False):\n if p is not None:\n if p in shared_param_memo:\n shared_mn, shared_n = shared_param_memo[p]\n shared_param_infos.append((mn, n, shared_mn, shared_n))\n else:\n shared_param_memo[p] = (mn, n)\n param_infos.append((mn, n))\n params.append(p.detach())\n param_numels.append(p.numel())\n param_shapes.append(p.size())\n\n assert len(set(p.dtype for p in params)) <= 1, \\\n \"expects all parameters in module to have same dtype\"\n\n # store the info for unflatten\n self._param_infos = tuple(param_infos)\n self._shared_param_infos = tuple(shared_param_infos)\n self._param_numels = tuple(param_numels)\n self._param_shapes = tuple(param_shapes)\n\n # flatten\n flat_param = nn.Parameter(torch.cat([p.reshape(-1) for p in params], 0))\n self.register_parameter('flat_param', flat_param)\n self.param_numel = flat_param.numel()\n del params\n del shared_param_memo\n\n # deregister the names as parameters\n for mn, n in self._param_infos:\n delattr(self._get_module_from_name(mn), n)\n for mn, n, _, _ in self._shared_param_infos:\n delattr(self._get_module_from_name(mn), n)\n\n # register the views as plain attributes\n self._unflatten_param(self.flat_param)\n\n # now buffers\n # they are not reparametrized. just store info as (module, name, buffer)\n buffer_infos = []\n for mn, m in self.named_modules():\n for n, b in m.named_buffers(recurse=False):\n if b is not None:\n buffer_infos.append((mn, n, b))\n\n self._buffer_infos = tuple(buffer_infos)\n self._traced_self = None\n\n def trace(self, example_input, **trace_kwargs):\n assert self._traced_self is None, 'This ReparamModule is already traced'\n\n if isinstance(example_input, torch.Tensor):\n example_input = (example_input,)\n example_input = tuple(example_input)\n example_param = (self.flat_param.detach().clone(),)\n example_buffers = (tuple(b.detach().clone() for _, _, b in self._buffer_infos),)\n\n self._traced_self = torch.jit.trace_module(\n self,\n inputs=dict(\n _forward_with_param=example_param + example_input,\n _forward_with_param_and_buffers=example_param + example_buffers + example_input,\n ),\n **trace_kwargs,\n )\n\n # replace forwards with traced versions\n self._forward_with_param = self._traced_self._forward_with_param\n self._forward_with_param_and_buffers = self._traced_self._forward_with_param_and_buffers\n return self\n\n def clear_views(self):\n for mn, n in self._param_infos:\n setattr(self._get_module_from_name(mn), n, None) # This will set as plain attr\n\n def _apply(self, *args, **kwargs):\n if self._traced_self is not None:\n self._traced_self._apply(*args, **kwargs)\n return self\n return super(ReparamModule, self)._apply(*args, **kwargs)\n\n def _unflatten_param(self, flat_param):\n ps = (t.view(s) for (t, s) in zip(flat_param.split(self._param_numels), self._param_shapes))\n for (mn, n), p in zip(self._param_infos, ps):\n setattr(self._get_module_from_name(mn), n, p) # This will set as plain attr\n for (mn, n, shared_mn, shared_n) in self._shared_param_infos:\n setattr(self._get_module_from_name(mn), n, getattr(self._get_module_from_name(shared_mn), shared_n))\n\n @contextmanager\n def unflattened_param(self, flat_param):\n saved_views = [getattr(self._get_module_from_name(mn), n) for mn, n in self._param_infos]\n self._unflatten_param(flat_param)\n yield\n # Why not just `self._unflatten_param(self.flat_param)`?\n # 1. because of https://github.com/pytorch/pytorch/issues/17583\n # 2. slightly faster since it does not require reconstruct the split+view\n # graph\n for (mn, n), p in zip(self._param_infos, saved_views):\n setattr(self._get_module_from_name(mn), n, p)\n for (mn, n, shared_mn, shared_n) in self._shared_param_infos:\n setattr(self._get_module_from_name(mn), n, getattr(self._get_module_from_name(shared_mn), shared_n))\n\n @contextmanager\n def replaced_buffers(self, buffers):\n for (mn, n, _), new_b in zip(self._buffer_infos, buffers):\n setattr(self._get_module_from_name(mn), n, new_b)\n yield\n for mn, n, old_b in self._buffer_infos:\n setattr(self._get_module_from_name(mn), n, old_b)\n\n def _forward_with_param_and_buffers(self, flat_param, buffers, *inputs, **kwinputs):\n with self.unflattened_param(flat_param):\n with self.replaced_buffers(buffers):\n return self.module(*inputs, **kwinputs)\n\n def _forward_with_param(self, flat_param, *inputs, **kwinputs):\n with self.unflattened_param(flat_param):\n return self.module(*inputs, **kwinputs)\n\n def forward(self, *inputs, flat_param=None, buffers=None, **kwinputs):\n flat_param = torch.squeeze(flat_param)\n # print(\"PARAMS ON DEVICE: \", flat_param.get_device())\n # print(\"DATA ON DEVICE: \", inputs[0].get_device())\n # flat_param.to(\"cuda:{}\".format(inputs[0].get_device()))\n # self.module.to(\"cuda:{}\".format(inputs[0].get_device()))\n if flat_param is None:\n flat_param = self.flat_param\n if buffers is None:\n return self._forward_with_param(flat_param, *inputs, **kwinputs)\n else:\n return self._forward_with_param_and_buffers(flat_param, tuple(buffers), *inputs, **kwinputs)" } ]
import os import argparse import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torchvision.utils import wandb import copy import random import warnings from tqdm import tqdm from utils import augment, get_dataset, get_network, get_eval_pool, evaluate_synset, get_time, DiffAugment, DiffAugmentList, ParamDiffAug from reparam_module import ReparamModule from torch.utils.data import Subset from torch.utils.data import DataLoader from PIL import PngImagePlugin
9,617
image_save = args.zca_trans.inverse_transform(image_save) image_save.cpu() torch.save(image_save.cpu(), os.path.join(save_dir, "images_zca_{}.pt".format(it))) upsampled = image_save if args.dataset != "ImageNet": upsampled = torch.repeat_interleave(upsampled, repeats=4, dim=2) upsampled = torch.repeat_interleave(upsampled, repeats=4, dim=3) grid = torchvision.utils.make_grid(upsampled, nrow=10, normalize=True, scale_each=True) wandb.log({"Reconstructed_Images": wandb.Image(torch.nan_to_num(grid.detach().cpu()))}, step=it) wandb.log({'Reconstructed_Pixels': wandb.Histogram(torch.nan_to_num(image_save.detach().cpu()))}, step=it) for clip_val in [2.5]: std = torch.std(image_save) mean = torch.mean(image_save) upsampled = torch.clip(image_save, min=mean - clip_val * std, max=mean + clip_val * std) if args.dataset != "ImageNet": upsampled = torch.repeat_interleave(upsampled, repeats=4, dim=2) upsampled = torch.repeat_interleave(upsampled, repeats=4, dim=3) grid = torchvision.utils.make_grid(upsampled, nrow=10, normalize=True, scale_each=True) wandb.log({"Clipped_Reconstructed_Images/std_{}".format(clip_val): wandb.Image( torch.nan_to_num(grid.detach().cpu()))}, step=it) wandb.log({"Synthetic_LR": syn_lr.detach().cpu()}, step=it) student_net = get_network(args.model, channel, num_classes, im_size, dist=False).to(args.device) # get a random model student_net = ReparamModule(student_net) if args.distributed: student_net = torch.nn.DataParallel(student_net) student_net.train() if not args.random_trajectory: if args.load_all: expert_trajectory = buffer[np.random.randint(0, len(buffer))] else: expert_trajectory = buffer[expert_idx] expert_idx += 1 if expert_idx == len(buffer): expert_idx = 0 file_idx += 1 if file_idx == len(expert_files): file_idx = 0 random.shuffle(expert_files) print("loading file {}".format(expert_files[file_idx])) if args.max_files != 1: del buffer buffer = torch.load(expert_files[file_idx]) if args.max_experts is not None: buffer = buffer[:args.max_experts] random.shuffle(buffer) start_epoch = np.random.randint(0, args.max_start_epoch) if not args.random_trajectory: starting_params = expert_trajectory[start_epoch] target_params = expert_trajectory[start_epoch+args.expert_epochs] else: starting_params = [p for p in student_net.parameters()] target_params = [p for p in student_net.parameters()] target_params = torch.cat([p.data.to(args.device).reshape(-1) for p in target_params], 0) student_params = [torch.cat([p.data.to(args.device).reshape(-1) for p in starting_params], 0).requires_grad_(True)] starting_params = torch.cat([p.data.to(args.device).reshape(-1) for p in starting_params], 0) param_dist = torch.tensor(0.0).to(args.device) param_dist += torch.nn.functional.mse_loss(starting_params, target_params, reduction="sum") # produce soft labels for soft label assignment. if args.teacher_label: label_net = get_network(args.model, channel, num_classes, im_size, dist=False).to(args.device) # get a random model label_net = ReparamModule(label_net) label_net.eval() # use the target param as the model param to get soft labels. label_params = copy.deepcopy(target_params.detach()).requires_grad_(False) batch_labels = [] SOFT_INIT_BATCH_SIZE = 50 if image_syn.shape[0] > SOFT_INIT_BATCH_SIZE and args.dataset == 'ImageNet': for indices in torch.split(torch.tensor([i for i in range(0, image_syn.shape[0])], dtype=torch.long), SOFT_INIT_BATCH_SIZE): batch_labels.append(label_net(image_syn[indices].detach().to(args.device), flat_param=label_params)) else: label_syn = label_net(image_syn.detach().to(args.device), flat_param=label_params) label_syn = torch.cat(batch_labels, dim=0) label_syn = torch.nn.functional.softmax(label_syn) del label_net, label_params for _ in batch_labels: del _ syn_images = image_syn y_hat = label_syn.to(args.device) syn_image_gradients = torch.zeros(syn_images.shape).to(args.device) x_list = [] original_x_list = [] y_list = [] indices_chunks = [] gradient_sum = torch.zeros(student_params[-1].shape).to(args.device) indices_chunks_copy = [] for _ in range(args.syn_steps): if not indices_chunks: indices = torch.randperm(len(syn_images)) indices_chunks = list(torch.split(indices, args.batch_syn)) these_indices = indices_chunks.pop() indices_chunks_copy.append(these_indices) x = syn_images[these_indices] this_y = y_hat[these_indices] original_x_list.append(x) if args.dsa and (not args.no_aug):
LARGE_ENOUGH_NUMBER = 100 PngImagePlugin.MAX_TEXT_CHUNK = LARGE_ENOUGH_NUMBER * (1024**2) warnings.filterwarnings("ignore", category=DeprecationWarning) def main(args): if args.zca and args.texture: raise AssertionError("Cannot use zca and texture together") if args.texture and args.pix_init == "real": print("WARNING: Using texture with real initialization will take a very long time to smooth out the boundaries between images.") if args.max_experts is not None and args.max_files is not None: args.total_experts = args.max_experts * args.max_files print("CUDNN STATUS: {}".format(torch.backends.cudnn.enabled)) args.dsa = True if args.dsa == 'True' else False args.device = 'cuda' if torch.cuda.is_available() else 'cpu' eval_it_pool = np.arange(0, args.Iteration + 1, args.eval_it).tolist() channel, im_size, num_classes, class_names, mean, std, dst_train, dst_test, testloader, loader_train_dict, class_map, class_map_inv = get_dataset(args.dataset, args.data_path, args.batch_real, args=args) model_eval_pool = get_eval_pool(args.eval_mode, args.model, args.model) im_res = im_size[0] args.im_size = im_size accs_all_exps = dict() # record performances of all experiments for key in model_eval_pool: accs_all_exps[key] = [] data_save = [] if args.dsa: # args.epoch_eval_train = 1000 args.dc_aug_param = None args.dsa_param = ParamDiffAug() dsa_params = args.dsa_param if args.zca: zca_trans = args.zca_trans else: zca_trans = None wandb.init(sync_tensorboard=False, project="DatasetDistillation", job_type="CleanRepo", config=args, ) args = type('', (), {})() for key in wandb.config._items: setattr(args, key, wandb.config._items[key]) args.dsa_param = dsa_params args.zca_trans = zca_trans if args.batch_syn is None: args.batch_syn = num_classes * args.ipc args.distributed = torch.cuda.device_count() > 1 print('Hyper-parameters: \n', args.__dict__) print('Evaluation model pool: ', model_eval_pool) ''' organize the real dataset ''' indices_class = [[] for c in range(num_classes)] # Build label to index map print("---------------Build label to index map--------------") # For machines with limited RAM, it's impossible to load all ImageNet or even TinyImageNet into memory. # Even if it's possible, it will take too long to process. # Therefore we pregenerate an indices to image map and use this map to quickly random samples from ImageNet or TinyImageNet dataset. if args.dataset == 'ImageNet': indices_class = np.load('indices/imagenet_indices_class.npy', allow_pickle=True) elif args.dataset == 'Tiny': indices_class = np.load('indices/tiny_indices_class.npy', allow_pickle=True) else: for i, data in tqdm(enumerate(dst_train)): indices_class[data[1]].append(i) # for c in range(num_classes): # print('class c = %d: %d real images'%(c, len(indices_class[c]))) def get_images(c, n): # get random n images from class c idx_shuffle = np.random.permutation(indices_class[c])[:n] subset = Subset(dst_train, idx_shuffle) data_loader = DataLoader(subset, batch_size=n) # only read the first batch which has n(IPC) number of images. for data in data_loader: return data[0].to("cpu") ''' initialize the synthetic data ''' label_syn = torch.tensor([np.ones(args.ipc)*i for i in range(num_classes)], dtype=torch.long, requires_grad=False, device=args.device).view(-1) # [0,0,0, 1,1,1, ..., 9,9,9] if args.texture: image_syn = torch.randn(size=(num_classes * args.ipc, channel, im_size[0]*args.canvas_size, im_size[1]*args.canvas_size), dtype=torch.float) else: image_syn = torch.randn(size=(num_classes * args.ipc, channel, im_size[0], im_size[1]), dtype=torch.float) syn_lr = torch.tensor(args.lr_teacher).to(args.device) if args.pix_init == 'real': print('initialize synthetic data from random real images') for c in range(num_classes): image_syn.data[c * args.ipc:(c + 1) * args.ipc] = get_images(c, args.ipc).detach().data else: print('initialize synthetic data from random noise') ''' training ''' image_syn = image_syn.detach().to(args.device).requires_grad_(True) print(image_syn.shape) syn_lr = syn_lr.detach().to(args.device).requires_grad_(True) optimizer_img = torch.optim.SGD([image_syn], lr=args.lr_img, momentum=0.5) optimizer_lr = torch.optim.SGD([syn_lr], lr=args.lr_lr, momentum=0.5) optimizer_img.zero_grad() optimizer_lr.zero_grad() criterion = nn.CrossEntropyLoss().to(args.device) print('%s training begins'%get_time()) expert_dir = os.path.join(args.buffer_path, args.dataset) if args.dataset in ["CIFAR10", "CIFAR100"] and not args.zca: expert_dir += "_NO_ZCA" expert_dir = os.path.join(expert_dir, args.model) print("Expert Dir: {}".format(expert_dir)) if not args.random_trajectory: if args.load_all: buffer = [] n = 0 while os.path.exists(os.path.join(expert_dir, "replay_buffer_{}.pt".format(n))): buffer = buffer + torch.load(os.path.join(expert_dir, "replay_buffer_{}.pt".format(n))) n += 1 if n == 0: raise AssertionError("No buffers detected at {}".format(expert_dir)) else: expert_files = [] n = 0 while os.path.exists(os.path.join(expert_dir, "replay_buffer_{}.pt".format(n))): expert_files.append(os.path.join(expert_dir, "replay_buffer_{}.pt".format(n))) n += 1 if n == 0: raise AssertionError("No buffers detected at {}".format(expert_dir)) file_idx = 0 expert_idx = 0 random.shuffle(expert_files) if args.max_files is not None: expert_files = expert_files[:args.max_files] print("loading file {}".format(expert_files[file_idx])) buffer = torch.load(expert_files[file_idx]) if args.max_experts is not None: buffer = buffer[:args.max_experts] random.shuffle(buffer) best_acc = {m: 0 for m in model_eval_pool} best_std = {m: 0 for m in model_eval_pool} for it in range(0, args.Iteration+1): save_this_it = False # writer.add_scalar('Progress', it, it) wandb.log({"Progress": it}, step=it) ''' Evaluate synthetic data ''' if it in eval_it_pool and args.eval_it > 0: for model_eval in model_eval_pool: print('-------------------------\nEvaluation\nmodel_train = %s, model_eval = %s, iteration = %d'%(args.model, model_eval, it)) if args.dsa: print('DSA augmentation strategy: \n', args.dsa_strategy) print('DSA augmentation parameters: \n', args.dsa_param.__dict__) else: print('DC augmentation parameters: \n', args.dc_aug_param) accs_test = [] accs_train = [] for it_eval in range(args.num_eval): net_eval = get_network(model_eval, channel, num_classes, im_size).to(args.device) # get a random model eval_labs = label_syn with torch.no_grad(): image_save = image_syn image_syn_eval, label_syn_eval = copy.deepcopy(image_save.detach()), copy.deepcopy(eval_labs.detach()) # avoid any unaware modification args.lr_net = syn_lr.item() _, acc_train, acc_test = evaluate_synset(it_eval, net_eval, image_syn_eval, label_syn_eval, testloader, args, texture=args.texture) accs_test.append(acc_test) accs_train.append(acc_train) accs_test = np.array(accs_test) accs_train = np.array(accs_train) acc_test_mean = np.mean(accs_test) acc_test_std = np.std(accs_test) if acc_test_mean > best_acc[model_eval]: best_acc[model_eval] = acc_test_mean best_std[model_eval] = acc_test_std save_this_it = True print('Evaluate %d random %s, mean = %.4f std = %.4f\n-------------------------'%(len(accs_test), model_eval, acc_test_mean, acc_test_std)) wandb.log({'Accuracy/{}'.format(model_eval): acc_test_mean}, step=it) wandb.log({'Max_Accuracy/{}'.format(model_eval): best_acc[model_eval]}, step=it) wandb.log({'Std/{}'.format(model_eval): acc_test_std}, step=it) wandb.log({'Max_Std/{}'.format(model_eval): best_std[model_eval]}, step=it) if it in eval_it_pool and (save_this_it or it % 1000 == 0) and args.eval_it > 0: with torch.no_grad(): image_save = image_syn.cuda() save_dir = os.path.join(".", "logged_files", args.dataset, 'offline' if wandb.run.name is None else wandb.run.name) if not os.path.exists(save_dir): os.makedirs(save_dir) torch.save(image_save.cpu(), os.path.join(save_dir, "images_{}.pt".format(it))) torch.save(label_syn.cpu(), os.path.join(save_dir, "labels_{}.pt".format(it))) if save_this_it: torch.save(image_save.cpu(), os.path.join(save_dir, "images_best.pt".format(it))) torch.save(label_syn.cpu(), os.path.join(save_dir, "labels_best.pt".format(it))) wandb.log({"Pixels": wandb.Histogram(torch.nan_to_num(image_syn.detach().cpu()))}, step=it) if args.ipc < 50 or args.force_save: upsampled = image_save if args.dataset != "ImageNet": upsampled = torch.repeat_interleave(upsampled, repeats=4, dim=2) upsampled = torch.repeat_interleave(upsampled, repeats=4, dim=3) grid = torchvision.utils.make_grid(upsampled, nrow=10, normalize=True, scale_each=True) wandb.log({"Synthetic_Images": wandb.Image(torch.nan_to_num(grid.detach().cpu()))}, step=it) wandb.log({'Synthetic_Pixels': wandb.Histogram(torch.nan_to_num(image_save.detach().cpu()))}, step=it) for clip_val in [2.5]: std = torch.std(image_save) mean = torch.mean(image_save) upsampled = torch.clip(image_save, min=mean-clip_val*std, max=mean+clip_val*std) if args.dataset != "ImageNet": upsampled = torch.repeat_interleave(upsampled, repeats=4, dim=2) upsampled = torch.repeat_interleave(upsampled, repeats=4, dim=3) grid = torchvision.utils.make_grid(upsampled, nrow=10, normalize=True, scale_each=True) wandb.log({"Clipped_Synthetic_Images/std_{}".format(clip_val): wandb.Image(torch.nan_to_num(grid.detach().cpu()))}, step=it) if args.zca: image_save = image_save.to(args.device) image_save = args.zca_trans.inverse_transform(image_save) image_save.cpu() torch.save(image_save.cpu(), os.path.join(save_dir, "images_zca_{}.pt".format(it))) upsampled = image_save if args.dataset != "ImageNet": upsampled = torch.repeat_interleave(upsampled, repeats=4, dim=2) upsampled = torch.repeat_interleave(upsampled, repeats=4, dim=3) grid = torchvision.utils.make_grid(upsampled, nrow=10, normalize=True, scale_each=True) wandb.log({"Reconstructed_Images": wandb.Image(torch.nan_to_num(grid.detach().cpu()))}, step=it) wandb.log({'Reconstructed_Pixels': wandb.Histogram(torch.nan_to_num(image_save.detach().cpu()))}, step=it) for clip_val in [2.5]: std = torch.std(image_save) mean = torch.mean(image_save) upsampled = torch.clip(image_save, min=mean - clip_val * std, max=mean + clip_val * std) if args.dataset != "ImageNet": upsampled = torch.repeat_interleave(upsampled, repeats=4, dim=2) upsampled = torch.repeat_interleave(upsampled, repeats=4, dim=3) grid = torchvision.utils.make_grid(upsampled, nrow=10, normalize=True, scale_each=True) wandb.log({"Clipped_Reconstructed_Images/std_{}".format(clip_val): wandb.Image( torch.nan_to_num(grid.detach().cpu()))}, step=it) wandb.log({"Synthetic_LR": syn_lr.detach().cpu()}, step=it) student_net = get_network(args.model, channel, num_classes, im_size, dist=False).to(args.device) # get a random model student_net = ReparamModule(student_net) if args.distributed: student_net = torch.nn.DataParallel(student_net) student_net.train() if not args.random_trajectory: if args.load_all: expert_trajectory = buffer[np.random.randint(0, len(buffer))] else: expert_trajectory = buffer[expert_idx] expert_idx += 1 if expert_idx == len(buffer): expert_idx = 0 file_idx += 1 if file_idx == len(expert_files): file_idx = 0 random.shuffle(expert_files) print("loading file {}".format(expert_files[file_idx])) if args.max_files != 1: del buffer buffer = torch.load(expert_files[file_idx]) if args.max_experts is not None: buffer = buffer[:args.max_experts] random.shuffle(buffer) start_epoch = np.random.randint(0, args.max_start_epoch) if not args.random_trajectory: starting_params = expert_trajectory[start_epoch] target_params = expert_trajectory[start_epoch+args.expert_epochs] else: starting_params = [p for p in student_net.parameters()] target_params = [p for p in student_net.parameters()] target_params = torch.cat([p.data.to(args.device).reshape(-1) for p in target_params], 0) student_params = [torch.cat([p.data.to(args.device).reshape(-1) for p in starting_params], 0).requires_grad_(True)] starting_params = torch.cat([p.data.to(args.device).reshape(-1) for p in starting_params], 0) param_dist = torch.tensor(0.0).to(args.device) param_dist += torch.nn.functional.mse_loss(starting_params, target_params, reduction="sum") # produce soft labels for soft label assignment. if args.teacher_label: label_net = get_network(args.model, channel, num_classes, im_size, dist=False).to(args.device) # get a random model label_net = ReparamModule(label_net) label_net.eval() # use the target param as the model param to get soft labels. label_params = copy.deepcopy(target_params.detach()).requires_grad_(False) batch_labels = [] SOFT_INIT_BATCH_SIZE = 50 if image_syn.shape[0] > SOFT_INIT_BATCH_SIZE and args.dataset == 'ImageNet': for indices in torch.split(torch.tensor([i for i in range(0, image_syn.shape[0])], dtype=torch.long), SOFT_INIT_BATCH_SIZE): batch_labels.append(label_net(image_syn[indices].detach().to(args.device), flat_param=label_params)) else: label_syn = label_net(image_syn.detach().to(args.device), flat_param=label_params) label_syn = torch.cat(batch_labels, dim=0) label_syn = torch.nn.functional.softmax(label_syn) del label_net, label_params for _ in batch_labels: del _ syn_images = image_syn y_hat = label_syn.to(args.device) syn_image_gradients = torch.zeros(syn_images.shape).to(args.device) x_list = [] original_x_list = [] y_list = [] indices_chunks = [] gradient_sum = torch.zeros(student_params[-1].shape).to(args.device) indices_chunks_copy = [] for _ in range(args.syn_steps): if not indices_chunks: indices = torch.randperm(len(syn_images)) indices_chunks = list(torch.split(indices, args.batch_syn)) these_indices = indices_chunks.pop() indices_chunks_copy.append(these_indices) x = syn_images[these_indices] this_y = y_hat[these_indices] original_x_list.append(x) if args.dsa and (not args.no_aug):
x = DiffAugment(x, args.dsa_strategy, param=args.dsa_param)
6
2023-10-17 23:11:36+00:00
12k
upiterbarg/hihack
models/utils.py
[ { "identifier": "CDGPT5", "path": "models/cdgpt5.py", "snippet": "class CDGPT5(nn.Module):\n def __init__(self, shape, action_space, flags, device):\n super(CDGPT5, self).__init__()\n\n self.flags = flags\n self.num_actions = len(action_space)\n self.use_prev_action = flags.use_prev_action\n\n self.topline_encoder = TopLineEncoder()\n self.bottomline_encoder = torch.jit.script(BottomLinesEncoder())\n\n pixel_size = flags.pixel_size\n if flags.crop_dim == 0:\n screen_shape = (24 * pixel_size, 80 * pixel_size)\n else:\n screen_shape = (flags.crop_dim * pixel_size, flags.crop_dim * pixel_size)\n\n self.screen_encoder = torch.jit.script(ScreenEncoder(screen_shape))\n\n self.prev_actions_dim = 128 if self.use_prev_action else 0\n\n if flags.cdgpt5_xxl_policy:\n self.hidden_dim = 1024\n else:\n self.hidden_dim = 512\n\n self.h_dim = sum(\n [\n self.topline_encoder.hidden_dim,\n self.bottomline_encoder.hidden_dim,\n self.screen_encoder.hidden_dim,\n self.prev_actions_dim,\n ]\n )\n \n self.core = nn.LSTM(self.h_dim, self.hidden_dim, num_layers=1)\n\n if flags.cdgpt5_xxl_decoder:\n self.policy_hidden_dim = 1024\n self.policy = nn.Sequential(nn.Linear(self.hidden_dim, self.policy_hidden_dim),\n nn.ELU(),\n nn.Linear(self.policy_hidden_dim, self.policy_hidden_dim),\n nn.ELU(),\n nn.Linear(self.policy_hidden_dim, self.num_actions)\n )\n else:\n self.policy = nn.Linear(self.hidden_dim, self.num_actions)\n\n self.baseline = nn.Linear(self.hidden_dim, 1)\n self.version = 0\n self.inference_unroll_length = flags.unroll_length if not 'inference_unroll_length' in flags else flags.inference_unroll_length\n\n def initial_state(self, batch_size=1):\n return tuple(\n torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size)\n for _ in range(2)\n )\n\n def forward(self, inputs, core_state=None):\n T, B, C, H, W = inputs[\"screen_image\"].shape\n\n topline = inputs[\"tty_chars\"][..., 0, :]\n bottom_line = inputs[\"tty_chars\"][..., -2:, :]\n\n st = [\n self.topline_encoder(\n topline.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.bottomline_encoder(\n bottom_line.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.screen_encoder(\n inputs[\"screen_image\"]\n .float(memory_format=torch.contiguous_format)\n .view(T * B, C, H, W)\n ),\n ]\n if self.use_prev_action:\n st.append(torch.nn.functional.one_hot(inputs[\"prev_action\"], self.prev_actions_dim).view(T * B, -1))\n\n st = torch.cat(st, dim=1)\n\n core_input = st.view(T, B, -1)\n core_output_list = []\n notdone = (~inputs[\"done\"]).float()\n\n for input, nd in zip(core_input.unbind(), notdone.unbind()):\n # Reset core state to zero whenever an episode ended.\n # Make `done` broadcastable with (num_layers, B, hidden_size)\n nd = nd.view(1, -1, 1)\n core_state = tuple(nd * t for t in core_state)\n output, core_state = self.core(input.unsqueeze(0), core_state)\n core_output_list.append(output)\n\n core_output = torch.flatten(torch.cat(core_output_list), 0, 1)\n\n # -- [B' x A]\n policy_logits = self.policy(core_output)\n\n # -- [B' x 1]\n baseline = self.baseline(core_output)\n\n action = torch.multinomial(F.softmax(policy_logits + 1e-5, dim=1), num_samples=1)\n\n policy_logits = policy_logits.view(T, B, -1)\n baseline = baseline.view(T, B)\n action = action.view(T, B)\n version = torch.ones_like(action) * self.version\n\n output = dict(\n policy_logits=policy_logits,\n baseline=baseline,\n action=action,\n version=version,\n )\n\n return (output, core_state)" }, { "identifier": "CleavedHierarchicalPolicy", "path": "models/cleaved_hierarchical_policy.py", "snippet": "class CleavedHierarchicalPolicy(nn.Module):\n def __init__(self, \n flags,\n high_level_model, \n low_level_model):\n super(CleavedHierarchicalPolicy, self).__init__()\n self.high_level_model = high_level_model\n self.low_level_model = low_level_model\n self.num_strategies = self.high_level_model.num_strategies\n\n self.gumbel_softmax_tau = 1\n if 'gumbel_softmax_tau' in flags:\n self.gumbel_softmax_tau = flags.gumbel_softmax_tau\n\n self.disable_high_level_policy_gradients = flags.disable_high_level_policy_gradients\n self.disable_low_level_policy_gradients = flags.disable_low_level_policy_gradients\n self.version = 0\n self.eps_greedy = flags.eps_greedy if 'eps_greedy' in flags else 1\n\n\n def initial_state(self, batch_size=1):\n high_level_core_state = self.high_level_model.initial_state(batch_size)\n low_level_core_state = self.low_level_model.initial_state(batch_size)\n return high_level_core_state + low_level_core_state\n\n def parameters(self):\n if self.disable_high_level_policy_gradients:\n return self.low_level_model.parameters()\n elif self.disable_low_level_policy_gradients:\n return self.high_level_model.parameters()\n return list(self.low_level_model.parameters()) + list(self.high_level_model.parameters())\n\n def buffers(self):\n if self.disable_high_level_policy_gradients:\n return self.low_level_model.buffers()\n elif self.disable_low_level_policy_gradients:\n return self.high_level_model.buffers()\n return list(self.low_level_model.buffers()) + list(self.high_level_model.buffers())\n\n def forward(self, inputs, core_state, last_ttyrec_data=None):\n high_level_core_state, low_level_core_state = core_state[:2], core_state[2:]\n\n if not last_ttyrec_data is None:\n low_level_out, low_level_core_state = self.low_level_model(inputs, low_level_core_state, return_strategywise_logits=True, last_ttyrec_data=last_ttyrec_data)\n else:\n low_level_out, low_level_core_state = self.low_level_model(inputs, low_level_core_state, return_strategywise_logits=True)\n high_level_out, high_level_core_state = self.high_level_model(inputs, high_level_core_state)\n\n policy_logits = low_level_out['strategywise_policy_logits']\n strategy_logits = high_level_out['strategy_logits']\n\n if isinstance(self.low_level_model, HierarchicalTransformerLSTM):\n strategy_logits = torch.cat([strategy_logits[..., -1].unsqueeze(-1), strategy_logits[..., :-1]], axis=-1)\n\n T, B, _ = strategy_logits.shape\n\n sample = True\n\n if self.eps_greedy < 1:\n sample = bool(np.random.binomial(1, self.eps_greedy))\n\n if sample:\n strategies = F.gumbel_softmax(strategy_logits.reshape(T * B, -1), tau=self.gumbel_softmax_tau, hard=True).bool().unsqueeze(-1).expand((-1, -1, policy_logits.shape[-1]))\n sdim = strategy_logits.size(-1)\n out_policy_logits = torch.sum(torch.mul(policy_logits[:sdim], torch.swapaxes(strategies, 0, 1)), axis=0).view(T, B, -1)\n else:\n strategies = torch.argmax(strategy_logits.reshape(T * B, -1), axis=-1)\n out_policy_logits = policy_logits[strategies, torch.arange(strategies.size(0))].view(T, B, -1)\n\n\n out_action = torch.multinomial(F.softmax(out_policy_logits.reshape(T * B, -1), dim=1), num_samples=1).long().view(T, B)\n\n version = torch.ones_like(out_action) * self.version\n\n if self.disable_high_level_policy_gradients:\n baseline = low_level_out['baseline']\n else:\n baseline = high_level_out['baseline']\n\n output = dict(\n policy_logits=out_policy_logits,\n baseline=baseline,\n action=out_action,\n version=version,\n strategy_logits=strategy_logits.view(T, B, -1),\n all_policy_logits=torch.swapaxes(torch.swapaxes(policy_logits, 0, 1), 1, 2),\n )\n\n core_state = high_level_core_state + low_level_core_state\n return (output, core_state)" }, { "identifier": "FlatTransformer", "path": "models/flat_transformer.py", "snippet": "class FlatTransformer(nn.Module):\n def __init__(self, shape, action_space, flags, device):\n super(FlatTransformer, self).__init__()\n \n self.flags = flags\n self.num_actions = len(action_space)\n self.use_prev_action = flags.use_prev_action\n\n self.topline_encoder = TopLineEncoder()\n self.bottomline_encoder = torch.jit.script(BottomLinesEncoder())\n\n pixel_size = flags.pixel_size\n if flags.crop_dim == 0:\n screen_shape = (24 * pixel_size, 80 * pixel_size)\n else:\n screen_shape = (flags.crop_dim * pixel_size, flags.crop_dim * pixel_size)\n\n self.screen_encoder = torch.jit.script(ScreenEncoder(screen_shape))\n\n self.prev_actions_dim = 128 if self.use_prev_action else 0\n\n self.h_dim = sum(\n [\n self.topline_encoder.hidden_dim,\n self.bottomline_encoder.hidden_dim,\n self.screen_encoder.hidden_dim,\n self.prev_actions_dim,\n ]\n )\n\n self.num_attention_heads = flags.num_attention_heads \n self.num_transformer_encoder_layers = flags.num_transformer_layers\n core_layer = nn.TransformerEncoderLayer(d_model=self.h_dim, nhead=self.num_attention_heads)\n self.core = nn.TransformerEncoder(core_layer, num_layers=self.num_transformer_encoder_layers)\n self.positional_encoder = PositionalEncoding(self.h_dim)\n\n self.policy_hidden_dim = 1024\n self.policy = nn.Sequential(nn.Linear(self.h_dim, self.policy_hidden_dim),\n nn.ELU(),\n nn.Linear(self.policy_hidden_dim, self.policy_hidden_dim),\n nn.ELU(),\n nn.Linear(self.policy_hidden_dim, self.num_actions)\n )\n self.baseline = nn.Linear(self.h_dim, 1)\n\n self.version = 0\n self.inference_unroll_length = 1\n\n def initial_state(self, batch_size=1):\n return (\n torch.zeros(1, batch_size, self.inference_unroll_length, self.inference_unroll_length),\n torch.rand(self.inference_unroll_length, batch_size, self.h_dim)\n )\n\n def forward(self, inputs, core_state=None):\n T, B, C, H, W = inputs[\"screen_image\"].shape\n\n topline = inputs[\"tty_chars\"][..., 0, :]\n bottom_line = inputs[\"tty_chars\"][..., -2:, :]\n\n st = [\n self.topline_encoder(\n topline.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.bottomline_encoder(\n bottom_line.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.screen_encoder(\n inputs[\"screen_image\"]\n .float(memory_format=torch.contiguous_format)\n .view(T * B, C, H, W)\n ),\n ]\n if self.use_prev_action:\n st.append(torch.nn.functional.one_hot(inputs[\"prev_action\"], self.prev_actions_dim).view(T * B, -1))\n\n st = torch.cat(st, dim=1)\n\n core_input = st.reshape(T, B, -1)\n notdone = (~inputs[\"done\"]).float()\n if not self.training:\n prev_mask, prev_encodings = core_state\n prev_mask = prev_mask.squeeze(0)\n core_input = torch.cat([prev_encodings[1:], core_input], axis=0)\n core_mask = torch.stack(\n [torch.cat([torch.cat([prev_mask[i, 1:, 1:], prev_mask[i, -1, 1:].unsqueeze(0)], axis=0) * notdone[-1, i], torch.zeros((self.inference_unroll_length, 1)).to(core_input.device)], axis=1) for i in range(B)]\n )\n core_mask[:, -1, -1] = 1\n core_state = (core_mask.detach().clone().unsqueeze(0), \n core_input.detach().clone()\n )\n for i in range(B):\n core_mask[i].fill_diagonal_(1)\n core_mask = (core_mask.float().masked_fill(core_mask == 0, float(\"-inf\")).masked_fill(core_mask == 1, float(0.0))).to(device=core_input.device)\n\n core_mask = torch.repeat_interleave(core_mask, self.num_attention_heads, dim=1).reshape(B * self.num_attention_heads, self.inference_unroll_length, self.inference_unroll_length)\n T = core_input.shape[0]\n else:\n core_mask = generate_square_subsequent_mask(T, core_input.device)\n\n core_input = self.positional_encoder(core_input)\n core_output = self.core(core_input, core_mask)\n core_output = torch.flatten(core_output, 0, 1)\n\n # -- [B' x A]\n policy_logits = self.policy(core_output)\n\n # -- [B' x 1]\n baseline = self.baseline(core_output)\n\n action = torch.multinomial(F.softmax(policy_logits + 1e-5, dim=1), num_samples=1)\n\n policy_logits = policy_logits.view(T, B, -1)\n baseline = baseline.view(T, B)\n action = action.view(T, B)\n version = torch.ones_like(action) * self.version\n\n\n if not self.training:\n action = action[-1].unsqueeze(0)\n baseline = baseline[-1].unsqueeze(0)\n policy_logits = policy_logits[-1].unsqueeze(0)\n version = version[-1].unsqueeze(0)\n\n output = dict(\n policy_logits=policy_logits,\n baseline=baseline,\n action=action,\n version=version,\n )\n \n return (output, core_state)" }, { "identifier": "HierarchicalLSTM", "path": "models/hierarchical_lstm.py", "snippet": "class HierarchicalLSTM(nn.Module):\n def __init__(self, shape, action_space, flags, device, num_strategies=13):\n super(HierarchicalLSTM, self).__init__()\n\n self.flags = flags\n self.num_actions = len(action_space)\n self.num_strategies = num_strategies\n\n self.use_prev_action = flags.use_prev_action\n\n self.topline_encoder = TopLineEncoder()\n self.bottomline_encoder = torch.jit.script(BottomLinesEncoder())\n\n pixel_size = flags.pixel_size\n if flags.crop_dim == 0:\n screen_shape = (24 * pixel_size, 80 * pixel_size)\n else:\n screen_shape = (flags.crop_dim * pixel_size, flags.crop_dim * pixel_size)\n\n self.screen_encoder = torch.jit.script(ScreenEncoder(screen_shape))\n\n self.prev_actions_dim = self.num_actions if self.use_prev_action else 0\n\n self.strategy_dim = self.num_strategies\n\n self.h_dim = sum(\n [\n self.topline_encoder.hidden_dim,\n self.bottomline_encoder.hidden_dim,\n self.screen_encoder.hidden_dim,\n self.prev_actions_dim,\n ]\n )\n\n self.policy_hidden_dim = 256\n self.strategy_hidden_dim = 128\n self.hidden_dim = 512\n\n self.strategy_encoder = nn.Linear(self.hidden_dim, self.num_strategies)\n\n self.core = nn.LSTM(self.h_dim, self.hidden_dim, num_layers=1)\n\n self.policies = nn.ModuleDict(\n [[f'{i}', nn.Sequential(nn.Linear(self.hidden_dim, self.policy_hidden_dim),\n nn.ELU(),\n nn.Linear(self.policy_hidden_dim, self.num_actions))] for i in range(self.num_strategies)]\n )\n\n self.baseline = nn.Linear(self.hidden_dim, 1)\n self.version = 0\n self.action_masks = {}\n\n self.gumbel_softmax_tau = 1\n if 'gumbel_softmax_tau' in flags:\n self.gumbel_softmax_tau = flags.gumbel_softmax_tau\n\n def initial_state(self, batch_size=1):\n return tuple(\n torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size)\n for _ in range(2)\n )\n\n def forward(self, inputs, core_state, last_ttyrec_data=None, return_strategywise_logits=False):\n T, B, C, H, W = inputs[\"screen_image\"].shape\n\n topline = inputs[\"tty_chars\"][..., 0, :]\n bottom_line = inputs[\"tty_chars\"][..., -2:, :]\n\n st = [\n self.topline_encoder( topline.float(memory_format=torch.contiguous_format).view(T * B, -1)),\n self.bottomline_encoder(bottom_line.float(memory_format=torch.contiguous_format).view(T * B, -1)),\n self.screen_encoder(inputs[\"screen_image\"].float(memory_format=torch.contiguous_format).view(T * B, C, H, W)),\n ]\n if self.use_prev_action:\n st.append(torch.nn.functional.one_hot(inputs[\"prev_action\"], self.num_actions).view(T * B, -1))\n\n st = torch.cat(st, dim=1)\n\n core_input = st.view(T, B, -1)\n core_output_list = []\n notdone = (~inputs[\"done\"]).float()\n\n for input, nd in zip(core_input.unbind(), notdone.unbind()):\n # Reset core state to zero whenever an episode ended.\n # Make `done` broadcastable with (num_layers, B, hidden_size)\n nd = nd.view(1, -1, 1)\n core_state = tuple(nd * t for t in core_state)\n output, core_state = self.core(input.unsqueeze(0), core_state)\n core_output_list.append(output)\n\n core_output = torch.flatten(torch.cat(core_output_list), 0, 1)\n strategy_logits = self.strategy_encoder(core_output).view(T * B, -1)\n\n all_policy_logits = torch.stack([self.policies[str(i)](core_output) for i in range(self.num_strategies)], axis=0)\n strategies = F.gumbel_softmax(strategy_logits, tau=self.gumbel_softmax_tau, hard=True).bool().unsqueeze(-1).expand((-1, -1, all_policy_logits.shape[-1]))\n out_policy_logits = torch.sum(torch.mul(all_policy_logits, torch.swapaxes(strategies, 0, 1)), axis=0).view(T, B, -1)\n out_action = torch.multinomial(F.softmax(out_policy_logits.reshape(T * B, -1), dim=1), num_samples=1).long().view(T, B)\n\n\n # -- [B' x 1]\n baseline = self.baseline(core_output)\n baseline = baseline.view(T, B)\n strategy_logits = strategy_logits.view(T, B, -1)\n\n version = torch.ones_like(out_action) * self.version\n\n output = dict(\n policy_logits=out_policy_logits,\n all_policy_logits=torch.swapaxes(torch.swapaxes(all_policy_logits, 0, 1), 1, 2),\n baseline=baseline,\n action=out_action,\n version=version,\n strategy_logits=strategy_logits,\n )\n\n if return_strategywise_logits:\n output['strategywise_policy_logits'] = all_policy_logits\n\n return (output, core_state)" }, { "identifier": "HierarchicalTransformerLSTM", "path": "models/hierarchical_transformer_lstm.py", "snippet": "class HierarchicalTransformerLSTM(nn.Module):\n def __init__(self, shape, action_space, flags, device, num_strategies=20):\n super(HierarchicalTransformerLSTM, self).__init__()\n\n self.flags = flags\n self.num_actions = len(action_space)\n self.use_prev_action = flags.use_prev_action\n\n self.topline_encoder = TopLineEncoder()\n self.bottomline_encoder = torch.jit.script(BottomLinesEncoder())\n\n pixel_size = flags.pixel_size\n if flags.crop_dim == 0:\n screen_shape = (24 * pixel_size, 80 * pixel_size)\n else:\n screen_shape = (flags.crop_dim * pixel_size, flags.crop_dim * pixel_size)\n\n self.screen_encoder = torch.jit.script(ScreenEncoder(screen_shape))\n\n ## second copy of encoders\n self.topline_encoder2 = TopLineEncoder()\n self.bottomline_encoder2 = torch.jit.script(BottomLinesEncoder())\n self.screen_encoder2 = torch.jit.script(ScreenEncoder(screen_shape))\n ###\n\n self.prev_actions_dim = 128 if self.use_prev_action else 0\n\n self.h_dim = sum(\n [\n self.topline_encoder.hidden_dim,\n self.bottomline_encoder.hidden_dim,\n self.screen_encoder.hidden_dim,\n self.prev_actions_dim\n ]\n )\n\n self.hidden_dim = 512\n self.policy_hidden_dim = 256\n self.strategy_dim = num_strategies\n \n self.core = nn.LSTM(self.h_dim, self.hidden_dim, num_layers=1)\n self.num_attention_heads = flags.num_attention_heads\n self.num_transformer_encoder_layers = flags.num_transformer_layers\n \n self.hidden_dim = self.h_dim + self.hidden_dim\n core_trnsfrmr_layer = nn.TransformerEncoderLayer(d_model=self.hidden_dim, nhead=self.num_attention_heads, norm_first=True, activation='gelu')\n self.core_trnsfrmr = nn.TransformerEncoder(core_trnsfrmr_layer, num_layers=self.num_transformer_encoder_layers)\n self.positional_encoder = PositionalEncoding(self.hidden_dim)\n\n self.strategy_encoder = nn.Linear(self.hidden_dim, self.strategy_dim)\n\n self.policies = nn.ModuleDict(\n [[f'{i}', nn.Sequential(nn.Linear(self.hidden_dim, self.policy_hidden_dim),\n nn.ELU(),\n nn.Linear(self.policy_hidden_dim, self.num_actions))] for i in range(self.strategy_dim)]\n )\n\n self.baseline = nn.Linear(self.hidden_dim, 1)\n self.version = 0\n self.inference_unroll_length = flags.unroll_length if not 'inference_unroll_length' in flags else flags.inference_unroll_length\n\n self.wrapped = False\n\n def initial_state(self, batch_size=1):\n return (\n torch.zeros(1, batch_size, self.inference_unroll_length, self.inference_unroll_length), # transformer portion 0\n torch.rand(self.inference_unroll_length, batch_size, self.hidden_dim), # transformer portion 1\n torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size), # lstm portion 0\n torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size) # lstm portion 1\n \n \n )\n\n def get_encodings(self, inputs, for_lstm=False):\n T, B, C, H, W = inputs[\"screen_image\"].shape\n\n topline = inputs[\"tty_chars\"][..., 0, :]\n bottom_line = inputs[\"tty_chars\"][..., -2:, :]\n\n if for_lstm or not hasattr(self, 'topline_encoder2'):\n st = [\n self.topline_encoder(\n topline.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.bottomline_encoder(\n bottom_line.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.screen_encoder(\n inputs[\"screen_image\"]\n .float(memory_format=torch.contiguous_format)\n .view(T * B, C, H, W)\n ),\n ]\n else:\n st = [\n self.topline_encoder2(\n topline.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.bottomline_encoder2(\n bottom_line.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.screen_encoder2(\n inputs[\"screen_image\"]\n .float(memory_format=torch.contiguous_format)\n .view(T * B, C, H, W)\n ),\n ]\n\n if self.use_prev_action:\n st.append(torch.nn.functional.one_hot(inputs[\"prev_action\"], self.prev_actions_dim).view(T * B, -1))\n\n st = torch.cat(st, dim=1)\n return st\n\n\n\n def forward(self, inputs, core_state=None, last_ttyrec_data=None, return_strategywise_logits=False):\n T, B, C, H, W = inputs[\"screen_image\"].shape\n st_lstm = self.get_encodings(inputs, for_lstm=True)\n st_trnsfrmr = self.get_encodings(inputs, for_lstm=False)\n\n T_eff = T\n\n if not last_ttyrec_data is None and self.training:\n last_st_lstm = self.get_encodings(last_ttyrec_data, for_lstm=True)\n last_st_trnsfrmr = self.get_encodings(last_ttyrec_data, for_lstm=False)\n T_eff = T * 2 \n st_lstm = torch.cat([last_st_lstm.reshape(T, B, -1), st_lstm.reshape(T, B, -1)], axis=0).reshape(T_eff * B, -1)\n st_trnsfrmr = torch.cat([last_st_trnsfrmr.reshape(T, B, -1), st_trnsfrmr.reshape(T, B, -1)], axis=0).reshape(T_eff * B, -1)\n self.wrapped = True\n\n c0, c1, c2, c3 = core_state\n trnsfrmr_core_state = c0, c1\n lstm_core_state = c2, c3\n\n lstm_core_input = st_lstm.view(T_eff, B, -1)\n lstm_core_output_list = []\n \n if self.wrapped:\n notdone = torch.cat([(~last_ttyrec_data[\"done\"]).float(), (~inputs[\"done\"]).float()], axis=0)\n else:\n notdone = (~inputs[\"done\"]).float()\n\n for input, nd in zip(lstm_core_input.unbind(), notdone.unbind()):\n # Reset core state to zero whenever an episode ended.\n # Make `done` broadcastable with (num_layers, B, hidden_size)\n nd = nd.view(1, -1, 1)\n lstm_core_state = tuple(nd * t for t in lstm_core_state)\n output, lstm_core_state = self.core(input.unsqueeze(0), lstm_core_state)\n lstm_core_output_list.append(output)\n\n lstm_core_output = torch.flatten(torch.cat(lstm_core_output_list), 0, 1)\n\n st = torch.cat([st_trnsfrmr, lstm_core_output], dim=1)\n\n trnsfrmr_core_input = st.reshape(T_eff, B, -1)\n if not self.training:\n prev_mask, prev_encodings = trnsfrmr_core_state\n prev_mask = prev_mask.squeeze(0)\n trnsfrmr_core_input = torch.cat([prev_encodings[1:], trnsfrmr_core_input], axis=0)\n trnsfrmr_core_mask = torch.stack(\n [torch.cat([torch.cat([prev_mask[i, 1:, 1:], prev_mask[i, -1, 1:].unsqueeze(0)], axis=0) * notdone[-1, i], torch.zeros((self.inference_unroll_length, 1)).to(trnsfrmr_core_input.device)], axis=1) for i in range(B)]\n )\n trnsfrmr_core_mask[:, -1, -1] = 1\n trnsfrmr_core_state = (trnsfrmr_core_mask.detach().clone().unsqueeze(0), \n trnsfrmr_core_input.detach().clone()\n )\n for i in range(B):\n trnsfrmr_core_mask[i].fill_diagonal_(1)\n trnsfrmr_core_mask = (trnsfrmr_core_mask.float().masked_fill(trnsfrmr_core_mask == 0, float(\"-inf\")).masked_fill(trnsfrmr_core_mask == 1, float(0.0))).to(device=trnsfrmr_core_input.device)\n trnsfrmr_core_mask = torch.repeat_interleave(trnsfrmr_core_mask, self.num_attention_heads, dim=1).reshape(B * self.num_attention_heads, self.inference_unroll_length, self.inference_unroll_length)\n T = trnsfrmr_core_input.shape[0]\n elif self.wrapped:\n mask1 = (torch.triu(torch.ones(T_eff, T_eff)) == 1).transpose(0, 1)\n mask2 = F.pad((torch.triu(torch.ones(T, T)) == 1).transpose(0, 1), (0, T, T, 0))\n trnsfrmr_core_mask = mask1.long() + mask2.long()\n trnsfrmr_core_mask[trnsfrmr_core_mask != 1] = 0\n trnsfrmr_core_mask = (trnsfrmr_core_mask.float().masked_fill(trnsfrmr_core_mask == 0, float(\"-inf\")).masked_fill(trnsfrmr_core_mask == 1, float(0.0))).to(device=trnsfrmr_core_input.device)\n else:\n trnsfrmr_core_mask = generate_square_subsequent_mask(T, trnsfrmr_core_input.device)\n\n trnsfrmr_core_input = self.positional_encoder(trnsfrmr_core_input)\n trnsfrmr_core_output = self.core_trnsfrmr(trnsfrmr_core_input, trnsfrmr_core_mask)\n\n trnsfrmr_core_output = torch.flatten(trnsfrmr_core_output, 0, 1)\n\n if self.wrapped:\n strategy_logits = self.strategy_encoder(trnsfrmr_core_output).view(2 * T * B, -1)\n else:\n strategy_logits = self.strategy_encoder(trnsfrmr_core_output).view(T * B, -1)\n\n\n all_policy_logits = torch.stack([self.policies[str(i)](trnsfrmr_core_output) for i in range(self.strategy_dim)], axis=0)\n\n\n # -- [B' x 1]\n baseline = self.baseline(trnsfrmr_core_output)\n\n strategy_sample = F.gumbel_softmax(strategy_logits, tau=1.0, hard=True)\n strategies = strategy_sample.bool().unsqueeze(-1).expand((-1, -1, all_policy_logits.shape[-1]))\n\n out_policy_logits = torch.sum(torch.mul(all_policy_logits, torch.swapaxes(strategies, 0, 1)), axis=0)\n action = torch.multinomial(F.softmax(out_policy_logits.reshape(T * B, -1), dim=1), num_samples=1).long()\n\n if self.wrapped:\n out_policy_logits = out_policy_logits.view(2*T, B, -1)[-T:].view(T * B, -1)\n baseline = baseline.view(2*T, B, -1)[-T:].view(T * B, -1)\n strategy_logits = strategy_logits.view(2 * T, B, -1)[-T:].view(T * B, -1)\n all_policy_logits = all_policy_logits.view(self.strategy_dim, 2 * T, B, -1)[:, -T:].view(self.strategy_dim, T * B, -1)\n\n out_policy_logits = out_policy_logits.view(T, B, -1)\n baseline = baseline.view(T, B)\n action = action.view(T, B)\n strategy_logits = strategy_logits.view(T, B, -1)\n strategy = torch.argmax(strategy_logits, axis=-1).long()\n version = torch.ones_like(action) * self.version\n\n\n if not self.training:\n action = action[-1].unsqueeze(0)\n baseline = baseline[-1].unsqueeze(0)\n out_policy_logits = out_policy_logits[-1].unsqueeze(0)\n version = version[-1].unsqueeze(0)\n strategy_logits = strategy_logits[-1].unsqueeze(0)\n strategy = strategy[-1].unsqueeze(0)\n\n output = dict(\n policy_logits=out_policy_logits,\n baseline=baseline,\n action=action,\n strategy=strategy,\n version=version,\n strategy_logits=strategy_logits\n )\n\n if return_strategywise_logits:\n output['strategywise_policy_logits'] = all_policy_logits\n\n c0, c1 = trnsfrmr_core_state\n c2, c3 = lstm_core_state\n\n core_state = (c0, c1, c2, c3)\n\n self.wrapped = False\n return (output, core_state)" }, { "identifier": "TransformerLSTM", "path": "models/transformer_lstm.py", "snippet": "class TransformerLSTM(nn.Module):\n def __init__(self, shape, action_space, flags, device):\n super(TransformerLSTM, self).__init__()\n\n self.flags = flags\n self.num_actions = len(action_space)\n self.use_prev_action = flags.use_prev_action\n\n self.topline_encoder = TopLineEncoder()\n self.bottomline_encoder = torch.jit.script(BottomLinesEncoder())\n\n pixel_size = flags.pixel_size\n if flags.crop_dim == 0:\n screen_shape = (24 * pixel_size, 80 * pixel_size)\n else:\n screen_shape = (flags.crop_dim * pixel_size, flags.crop_dim * pixel_size)\n\n self.screen_encoder = torch.jit.script(ScreenEncoder(screen_shape))\n\n ## second copy of encoders\n self.topline_encoder2 = TopLineEncoder()\n self.bottomline_encoder2 = torch.jit.script(BottomLinesEncoder())\n self.screen_encoder2 = torch.jit.script(ScreenEncoder(screen_shape))\n ###\n\n self.prev_actions_dim = 128 if self.use_prev_action else 0\n\n self.h_dim = sum(\n [\n self.topline_encoder.hidden_dim,\n self.bottomline_encoder.hidden_dim,\n self.screen_encoder.hidden_dim,\n self.prev_actions_dim,\n ]\n )\n\n self.hidden_dim = 512\n \n self.core = nn.LSTM(self.h_dim, self.hidden_dim, num_layers=1)\n\n self.num_attention_heads = flags.num_attention_heads\n self.num_transformer_encoder_layers = flags.num_transformer_layers\n self.hidden_dim = self.h_dim + self.hidden_dim\n core_trnsfrmr_layer = nn.TransformerEncoderLayer(d_model=self.hidden_dim, nhead=self.num_attention_heads, norm_first=True, activation='gelu')\n self.core_trnsfrmr = nn.TransformerEncoder(core_trnsfrmr_layer, num_layers=self.num_transformer_encoder_layers) # test round 1 uses 4 layers\n self.positional_encoder = PositionalEncoding(self.hidden_dim)\n \n self.policy = nn.Linear(self.hidden_dim, self.num_actions)\n\n self.baseline = nn.Linear(self.hidden_dim, 1)\n self.version = 0\n self.inference_unroll_length = flags.unroll_length if not 'inference_unroll_length' in flags else flags.inference_unroll_length\n\n self.wrapped = False\n\n def initial_state(self, batch_size=1):\n return (\n torch.zeros(1, batch_size, self.inference_unroll_length, self.inference_unroll_length), # transformer portion 0\n torch.rand(self.inference_unroll_length, batch_size, self.hidden_dim), # transformer portion 1\n torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size), # lstm portion 0\n torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size) # lstm portion 1\n \n \n )\n\n def get_encodings(self, inputs, for_lstm=False):\n T, B, C, H, W = inputs[\"screen_image\"].shape\n\n topline = inputs[\"tty_chars\"][..., 0, :]\n bottom_line = inputs[\"tty_chars\"][..., -2:, :]\n\n if for_lstm or not hasattr(self, 'topline_encoder2'):\n st = [\n self.topline_encoder(\n topline.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.bottomline_encoder(\n bottom_line.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.screen_encoder(\n inputs[\"screen_image\"]\n .float(memory_format=torch.contiguous_format)\n .view(T * B, C, H, W)\n ),\n ]\n else:\n st = [\n self.topline_encoder2(\n topline.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.bottomline_encoder2(\n bottom_line.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.screen_encoder2(\n inputs[\"screen_image\"]\n .float(memory_format=torch.contiguous_format)\n .view(T * B, C, H, W)\n ),\n ]\n\n if self.use_prev_action:\n st.append(torch.nn.functional.one_hot(inputs[\"prev_action\"], self.prev_actions_dim).view(T * B, -1))\n\n st = torch.cat(st, dim=1)\n return st\n\n\n\n def forward(self, inputs, core_state=None, last_ttyrec_data=None):\n T, B, C, H, W = inputs[\"screen_image\"].shape\n st_lstm = self.get_encodings(inputs, for_lstm=True)\n st_trnsfrmr = self.get_encodings(inputs, for_lstm=False)\n\n T_eff = T\n\n if not last_ttyrec_data is None and self.training:\n last_st_lstm = self.get_encodings(last_ttyrec_data, for_lstm=True)\n last_st_trnsfrmr = self.get_encodings(last_ttyrec_data, for_lstm=False)\n T_eff = T * 2 \n st_lstm = torch.cat([last_st_lstm.reshape(T, B, -1), st_lstm.reshape(T, B, -1)], axis=0).reshape(T_eff * B, -1)\n st_trnsfrmr = torch.cat([last_st_trnsfrmr.reshape(T, B, -1), st_trnsfrmr.reshape(T, B, -1)], axis=0).reshape(T_eff * B, -1)\n self.wrapped = True\n\n c0, c1, c2, c3 = core_state\n trnsfrmr_core_state = c0, c1\n lstm_core_state = c2, c3\n\n lstm_core_input = st_lstm.view(T_eff, B, -1)\n lstm_core_output_list = []\n \n if self.wrapped:\n notdone = torch.cat([(~last_ttyrec_data[\"done\"]).float(), (~inputs[\"done\"]).float()], axis=0)\n else:\n notdone = (~inputs[\"done\"]).float()\n\n notdone_mask = torch.ones((T_eff, T_eff)).repeat(B, 1, 1).to(lstm_core_input.device)\n\n i = 0\n for input, nd in zip(lstm_core_input.unbind(), notdone.unbind()):\n # Reset core state to zero whenever an episode ended.\n # Make `done` broadcastable with (num_layers, B, hidden_size)\n nd = nd.view(1, -1, 1)\n lstm_core_state = tuple(nd * t for t in lstm_core_state)\n output, lstm_core_state = self.core(input.unsqueeze(0), lstm_core_state)\n lstm_core_output_list.append(output)\n\n if i < T_eff-1:\n nd = notdone[i].view(-1, 1, 1)\n notdone_mask[:, i+1:, :i+1] *= nd\n\n i += 1\n\n lstm_core_output = torch.flatten(torch.cat(lstm_core_output_list), 0, 1)\n\n st = torch.cat([st_trnsfrmr, lstm_core_output], dim=1)\n\n trnsfrmr_core_input = st.reshape(T_eff, B, -1)\n if not self.training:\n prev_mask, prev_encodings = trnsfrmr_core_state\n prev_mask = prev_mask.squeeze(0)\n trnsfrmr_core_input = torch.cat([prev_encodings[1:], trnsfrmr_core_input], axis=0)\n trnsfrmr_core_mask = torch.stack(\n [torch.cat([torch.cat([prev_mask[i, 1:, 1:], prev_mask[i, -1, 1:].unsqueeze(0)], axis=0) * notdone[-1, i], torch.zeros((self.inference_unroll_length, 1)).to(trnsfrmr_core_input.device)], axis=1) for i in range(B)]\n )\n trnsfrmr_core_mask[:, -1, -1] = 1\n trnsfrmr_core_state = (trnsfrmr_core_mask.detach().clone().unsqueeze(0), \n trnsfrmr_core_input.detach().clone()\n )\n for i in range(B):\n trnsfrmr_core_mask[i].fill_diagonal_(1)\n trnsfrmr_core_mask = (trnsfrmr_core_mask.float().masked_fill(trnsfrmr_core_mask == 0, float(\"-inf\")).masked_fill(trnsfrmr_core_mask == 1, float(0.0))).to(device=trnsfrmr_core_input.device)\n trnsfrmr_core_mask = torch.repeat_interleave(trnsfrmr_core_mask, self.num_attention_heads, dim=1).reshape(B * self.num_attention_heads, self.inference_unroll_length, self.inference_unroll_length)\n T = trnsfrmr_core_input.shape[0]\n elif self.wrapped: \n mask1 = (torch.triu(torch.ones(T_eff, T_eff)) == 1).transpose(0, 1)\n mask2 = F.pad((torch.triu(torch.ones(T, T)) == 1).transpose(0, 1), (0, T, T, 0))\n trnsfrmr_core_mask = mask1.long() + mask2.long()\n trnsfrmr_core_mask[trnsfrmr_core_mask != 1] = 0\n trnsfrmr_core_mask = (trnsfrmr_core_mask.float().masked_fill(trnsfrmr_core_mask == 0, float(\"-inf\")).masked_fill(trnsfrmr_core_mask == 1, float(0.0))).to(device=trnsfrmr_core_input.device)\n else:\n trnsfrmr_core_mask = generate_square_subsequent_mask(T, trnsfrmr_core_input.device)\n\n\n trnsfrmr_core_input = self.positional_encoder(trnsfrmr_core_input)\n trnsfrmr_core_output = self.core_trnsfrmr(trnsfrmr_core_input, trnsfrmr_core_mask)\n trnsfrmr_core_output = torch.flatten(trnsfrmr_core_output, 0, 1)\n\n # -- [B' x A]\n policy_logits = self.policy(trnsfrmr_core_output)\n\n # -- [B' x 1]\n baseline = self.baseline(trnsfrmr_core_output)\n\n if self.wrapped:\n policy_logits = policy_logits.view(2*T, B, -1)[-T:].view(T * B, -1)\n baseline = baseline.view(2*T, B, -1)[-T:].view(T * B, -1)\n\n action = torch.multinomial(F.softmax(policy_logits + 1e-5, dim=1), num_samples=1)\n\n policy_logits = policy_logits.view(T, B, -1)\n baseline = baseline.view(T, B)\n action = action.view(T, B)\n version = torch.ones_like(action) * self.version\n\n\n if not self.training:\n action = action[-1].unsqueeze(0)\n baseline = baseline[-1].unsqueeze(0)\n policy_logits = policy_logits[-1].unsqueeze(0)\n version = version[-1].unsqueeze(0)\n\n output = dict(\n policy_logits=policy_logits,\n baseline=baseline,\n action=action,\n version=version,\n )\n\n c0, c1 = trnsfrmr_core_state\n c2, c3 = lstm_core_state\n\n core_state = (c0, c1, c2, c3)\n\n self.wrapped = False\n return (output, core_state)" } ]
import omegaconf import os import pathlib import pdb import sys import torch from .cdgpt5 import CDGPT5 from .cleaved_hierarchical_policy import CleavedHierarchicalPolicy from .flat_transformer import FlatTransformer from .hierarchical_lstm import HierarchicalLSTM from .hierarchical_transformer_lstm import HierarchicalTransformerLSTM from .transformer_lstm import TransformerLSTM from nle.env.base import DUNGEON_SHAPE from omegaconf import OmegaConf from tasks import ENVS
10,749
base_path = str(pathlib.Path().resolve()) hihack_path = os.path.join(base_path[:base_path.find('hihack')], 'hihack') sys.path.insert(0, os.path.join(hihack_path, 'dungeonsdata-neurips2022/experiment_code/hackrl')) MODELS = [ CDGPT5, HierarchicalLSTM, HierarchicalTransformerLSTM, TransformerLSTM,
base_path = str(pathlib.Path().resolve()) hihack_path = os.path.join(base_path[:base_path.find('hihack')], 'hihack') sys.path.insert(0, os.path.join(hihack_path, 'dungeonsdata-neurips2022/experiment_code/hackrl')) MODELS = [ CDGPT5, HierarchicalLSTM, HierarchicalTransformerLSTM, TransformerLSTM,
FlatTransformer
2
2023-10-23 15:44:32+00:00
12k
avilliai/Bert_Vits2_Sever
train_ms.py
[ { "identifier": "TextAudioSpeakerLoader", "path": "data_utils.py", "snippet": "class TextAudioSpeakerLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio, speaker_id, text pairs\n 2) normalizes text and converts them to sequences of integers\n 3) computes spectrograms from audio files.\n \"\"\"\n\n def __init__(self, audiopaths_sid_text, hparams):\n self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)\n self.max_wav_value = hparams.max_wav_value\n self.sampling_rate = hparams.sampling_rate\n self.filter_length = hparams.filter_length\n self.hop_length = hparams.hop_length\n self.win_length = hparams.win_length\n self.sampling_rate = hparams.sampling_rate\n self.spk_map = hparams.spk2id\n self.hparams = hparams\n\n self.use_mel_spec_posterior = getattr(hparams, \"use_mel_posterior_encoder\", False)\n if self.use_mel_spec_posterior:\n self.n_mel_channels = getattr(hparams, \"n_mel_channels\", 80)\n\n self.cleaned_text = getattr(hparams, \"cleaned_text\", False)\n\n self.add_blank = hparams.add_blank\n self.min_text_len = getattr(hparams, \"min_text_len\", 1)\n self.max_text_len = getattr(hparams, \"max_text_len\", 300)\n\n random.seed(1234)\n random.shuffle(self.audiopaths_sid_text)\n self._filter()\n\n def _filter(self):\n \"\"\"\n Filter text & store spec lengths\n \"\"\"\n # Store spectrogram lengths for Bucketing\n # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)\n # spec_length = wav_length // hop_length\n\n audiopaths_sid_text_new = []\n lengths = []\n skipped = 0\n for _id, spk, language, text, phones, tone, word2ph in self.audiopaths_sid_text:\n audiopath = f'{_id}'\n if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len:\n phones = phones.split(\" \")\n tone = [int(i) for i in tone.split(\" \")]\n word2ph = [int(i) for i in word2ph.split(\" \")]\n audiopaths_sid_text_new.append([audiopath, spk, language, text, phones, tone, word2ph])\n lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))\n else:\n skipped += 1\n print(\"skipped: \", skipped, \", total: \", len(self.audiopaths_sid_text))\n self.audiopaths_sid_text = audiopaths_sid_text_new\n self.lengths = lengths\n\n def get_audio_text_speaker_pair(self, audiopath_sid_text):\n # separate filename, speaker_id and text\n audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text\n\n bert, phones, tone, language = self.get_text(text, word2ph, phones, tone, language, audiopath)\n\n spec, wav = self.get_audio(audiopath)\n sid = torch.LongTensor([int(self.spk_map[sid])])\n return (phones, spec, wav, sid, tone, language, bert)\n\n def get_audio(self, filename):\n audio, sampling_rate = load_wav_to_torch(filename)\n if sampling_rate != self.sampling_rate:\n raise ValueError(\"{} {} SR doesn't match target {} SR\".format(\n sampling_rate, self.sampling_rate))\n audio_norm = audio / self.max_wav_value\n audio_norm = audio_norm.unsqueeze(0)\n spec_filename = filename.replace(\".wav\", \".spec.pt\")\n if self.use_mel_spec_posterior:\n spec_filename = spec_filename.replace(\".spec.pt\", \".mel.pt\")\n try:\n spec = torch.load(spec_filename)\n except:\n if self.use_mel_spec_posterior:\n spec = mel_spectrogram_torch(audio_norm, self.filter_length,\n self.n_mel_channels, self.sampling_rate, self.hop_length,\n self.win_length, self.hparams.mel_fmin, self.hparams.mel_fmax, center=False)\n else:\n spec = spectrogram_torch(audio_norm, self.filter_length,\n self.sampling_rate, self.hop_length, self.win_length,\n center=False)\n spec = torch.squeeze(spec, 0)\n torch.save(spec, spec_filename)\n return spec, audio_norm\n\n def get_text(self, text, word2ph, phone, tone, language_str, wav_path):\n pold = phone\n w2pho = [i for i in word2ph]\n word2ph = [i for i in word2ph]\n phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)\n pold2 = phone\n\n if self.add_blank:\n p1 = len(phone)\n phone = commons.intersperse(phone, 0)\n p2 = len(phone)\n t1 = len(tone)\n tone = commons.intersperse(tone, 0)\n t2 = len(tone)\n language = commons.intersperse(language, 0)\n for i in range(len(word2ph)):\n word2ph[i] = word2ph[i] * 2\n word2ph[0] += 1\n bert_path = wav_path.replace(\".wav\", \".bert.pt\")\n try:\n bert = torch.load(bert_path)\n assert bert.shape[-1] == len(phone)\n except:\n bert = get_bert(text, word2ph, language_str)\n torch.save(bert, bert_path)\n #print(bert.shape[-1], bert_path, text, pold)\n assert bert.shape[-1] == len(phone)\n\n assert bert.shape[-1] == len(phone), (\n bert.shape, len(phone), sum(word2ph), p1, p2, t1, t2, pold, pold2, word2ph, text, w2pho)\n phone = torch.LongTensor(phone)\n tone = torch.LongTensor(tone)\n language = torch.LongTensor(language)\n return bert, phone, tone, language\n\n def get_sid(self, sid):\n sid = torch.LongTensor([int(sid)])\n return sid\n\n def __getitem__(self, index):\n return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])\n\n def __len__(self):\n return len(self.audiopaths_sid_text)" }, { "identifier": "TextAudioSpeakerCollate", "path": "data_utils.py", "snippet": "class TextAudioSpeakerCollate():\n \"\"\" Zero-pads model inputs and targets\n \"\"\"\n\n def __init__(self, return_ids=False):\n self.return_ids = return_ids\n\n def __call__(self, batch):\n \"\"\"Collate's training batch from normalized text, audio and speaker identities\n PARAMS\n ------\n batch: [text_normalized, spec_normalized, wav_normalized, sid]\n \"\"\"\n # Right zero-pad all one-hot text sequences to max input length\n _, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([x[1].size(1) for x in batch]),\n dim=0, descending=True)\n\n max_text_len = max([len(x[0]) for x in batch])\n max_spec_len = max([x[1].size(1) for x in batch])\n max_wav_len = max([x[2].size(1) for x in batch])\n\n text_lengths = torch.LongTensor(len(batch))\n spec_lengths = torch.LongTensor(len(batch))\n wav_lengths = torch.LongTensor(len(batch))\n sid = torch.LongTensor(len(batch))\n\n text_padded = torch.LongTensor(len(batch), max_text_len)\n tone_padded = torch.LongTensor(len(batch), max_text_len)\n language_padded = torch.LongTensor(len(batch), max_text_len)\n bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n\n spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)\n wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)\n text_padded.zero_()\n tone_padded.zero_()\n language_padded.zero_()\n spec_padded.zero_()\n wav_padded.zero_()\n bert_padded.zero_()\n for i in range(len(ids_sorted_decreasing)):\n row = batch[ids_sorted_decreasing[i]]\n\n text = row[0]\n text_padded[i, :text.size(0)] = text\n text_lengths[i] = text.size(0)\n\n spec = row[1]\n spec_padded[i, :, :spec.size(1)] = spec\n spec_lengths[i] = spec.size(1)\n\n wav = row[2]\n wav_padded[i, :, :wav.size(1)] = wav\n wav_lengths[i] = wav.size(1)\n\n sid[i] = row[3]\n\n tone = row[4]\n tone_padded[i, :tone.size(0)] = tone\n\n language = row[5]\n language_padded[i, :language.size(0)] = language\n\n bert = row[6]\n bert_padded[i, :, :bert.size(1)] = bert\n\n return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, tone_padded, language_padded, bert_padded" }, { "identifier": "DistributedBucketSampler", "path": "data_utils.py", "snippet": "class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):\n \"\"\"\n Maintain similar input lengths in a batch.\n Length groups are specified by boundaries.\n Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.\n\n It removes samples which are not included in the boundaries.\n Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.\n \"\"\"\n\n def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)\n self.lengths = dataset.lengths\n self.batch_size = batch_size\n self.boundaries = boundaries\n\n self.buckets, self.num_samples_per_bucket = self._create_buckets()\n self.total_size = sum(self.num_samples_per_bucket)\n self.num_samples = self.total_size // self.num_replicas\n\n def _create_buckets(self):\n buckets = [[] for _ in range(len(self.boundaries) - 1)]\n for i in range(len(self.lengths)):\n length = self.lengths[i]\n idx_bucket = self._bisect(length)\n if idx_bucket != -1:\n buckets[idx_bucket].append(i)\n\n for i in range(len(buckets) - 1, 0, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n\n num_samples_per_bucket = []\n for i in range(len(buckets)):\n len_bucket = len(buckets[i])\n total_batch_size = self.num_replicas * self.batch_size\n rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size\n num_samples_per_bucket.append(len_bucket + rem)\n return buckets, num_samples_per_bucket\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n indices = []\n if self.shuffle:\n for bucket in self.buckets:\n indices.append(torch.randperm(len(bucket), generator=g).tolist())\n else:\n for bucket in self.buckets:\n indices.append(list(range(len(bucket))))\n\n batches = []\n for i in range(len(self.buckets)):\n bucket = self.buckets[i]\n len_bucket = len(bucket)\n if (len_bucket == 0):\n continue\n ids_bucket = indices[i]\n num_samples_bucket = self.num_samples_per_bucket[i]\n\n # add extra samples to make it evenly divisible\n rem = num_samples_bucket - len_bucket\n ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)]\n\n # subsample\n ids_bucket = ids_bucket[self.rank::self.num_replicas]\n\n # batching\n for j in range(len(ids_bucket) // self.batch_size):\n batch = [bucket[idx] for idx in ids_bucket[j * self.batch_size:(j + 1) * self.batch_size]]\n batches.append(batch)\n\n if self.shuffle:\n batch_ids = torch.randperm(len(batches), generator=g).tolist()\n batches = [batches[i] for i in batch_ids]\n self.batches = batches\n\n assert len(self.batches) * self.batch_size == self.num_samples\n return iter(self.batches)\n\n def _bisect(self, x, lo=0, hi=None):\n if hi is None:\n hi = len(self.boundaries) - 1\n\n if hi > lo:\n mid = (hi + lo) // 2\n if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:\n return mid\n elif x <= self.boundaries[mid]:\n return self._bisect(x, lo, mid)\n else:\n return self._bisect(x, mid + 1, hi)\n else:\n return -1\n\n def __len__(self):\n return self.num_samples // self.batch_size" }, { "identifier": "SynthesizerTrn", "path": "models.py", "snippet": "class SynthesizerTrn(nn.Module):\n \"\"\"\n Synthesizer for Training\n \"\"\"\n\n def __init__(self,\n n_vocab,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n n_speakers=256,\n gin_channels=256,\n use_sdp=True,\n n_flow_layer = 4,\n n_layers_trans_flow = 3,\n flow_share_parameter = False,\n use_transformer_flow = True,\n **kwargs):\n\n super().__init__()\n self.n_vocab = n_vocab\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.n_speakers = n_speakers\n self.gin_channels = gin_channels\n self.n_layers_trans_flow = n_layers_trans_flow\n self.use_spk_conditioned_encoder = kwargs.get(\"use_spk_conditioned_encoder\", True)\n self.use_sdp = use_sdp\n self.use_noise_scaled_mas = kwargs.get(\"use_noise_scaled_mas\", False)\n self.mas_noise_scale_initial = kwargs.get(\"mas_noise_scale_initial\", 0.01)\n self.noise_scale_delta = kwargs.get(\"noise_scale_delta\", 2e-6)\n self.current_mas_noise_scale = self.mas_noise_scale_initial\n if self.use_spk_conditioned_encoder and gin_channels > 0:\n self.enc_gin_channels = gin_channels\n self.enc_p = TextEncoder(n_vocab,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n gin_channels=self.enc_gin_channels)\n self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates,\n upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)\n self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16,\n gin_channels=gin_channels)\n if use_transformer_flow:\n self.flow = TransformerCouplingBlock(inter_channels, hidden_channels, filter_channels, n_heads, n_layers_trans_flow, 5, p_dropout, n_flow_layer, gin_channels=gin_channels,share_parameter= flow_share_parameter)\n else:\n self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, n_flow_layer, gin_channels=gin_channels)\n self.sdp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)\n self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)\n \n if n_speakers >= 1:\n self.emb_g = nn.Embedding(n_speakers, gin_channels)\n else:\n self.ref_enc = ReferenceEncoder(spec_channels, gin_channels)\n\n def forward(self, x, x_lengths, y, y_lengths, sid, tone, language, bert):\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1,2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert,g=g)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n\n with torch.no_grad():\n # negative cross-entropy\n s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]\n neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s]\n neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2),\n s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s]\n neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4\n if self.use_noise_scaled_mas:\n epsilon = torch.std(neg_cent) * torch.randn_like(neg_cent) * self.current_mas_noise_scale\n neg_cent = neg_cent + epsilon\n\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()\n\n w = attn.sum(2)\n\n l_length_sdp = self.sdp(x, x_mask, w, g=g)\n l_length_sdp = l_length_sdp / torch.sum(x_mask)\n \n logw_ = torch.log(w + 1e-6) * x_mask\n logw = self.dp(x, x_mask, g=g)\n l_length_dp = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(x_mask) # for averaging\n\n l_length = l_length_dp + l_length_sdp\n\n # expand prior\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)\n\n z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)\n o = self.dec(z_slice, g=g)\n return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (x, logw, logw_)\n \n def infer(self, x, x_lengths, sid, tone, language, bert, noise_scale=.667, length_scale=1, noise_scale_w=0.8, max_len=None, sdp_ratio=0,y=None):\n #x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert)\n # g = self.gst(y)\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1,2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert,g=g)\n logw = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) * (sdp_ratio) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio)\n w = torch.exp(logw) * x_mask * length_scale\n w_ceil = torch.ceil(w)\n y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()\n y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = commons.generate_path(w_ceil, attn_mask)\n\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1,\n 2) # [b, t', t], [b, t, d] -> [b, d, t']\n\n z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale\n z = self.flow(z_p, y_mask, g=g, reverse=True)\n o = self.dec((z * y_mask)[:, :, :max_len], g=g)\n return o, attn, y_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "MultiPeriodDiscriminator", "path": "models.py", "snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self, use_spectral_norm=False):\n super(MultiPeriodDiscriminator, self).__init__()\n periods = [2, 3, 5, 7, 11]\n\n discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]\n discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]\n self.discriminators = nn.ModuleList(discs)\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n y_d_gs.append(y_d_g)\n fmap_rs.append(fmap_r)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs" }, { "identifier": "DurationDiscriminator", "path": "models.py", "snippet": "class DurationDiscriminator(nn.Module): #vits2\n def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):\n super().__init__()\n\n self.in_channels = in_channels\n self.filter_channels = filter_channels\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.gin_channels = gin_channels\n\n self.drop = nn.Dropout(p_dropout)\n self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2)\n self.norm_1 = modules.LayerNorm(filter_channels)\n self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)\n self.norm_2 = modules.LayerNorm(filter_channels)\n self.dur_proj = nn.Conv1d(1, filter_channels, 1)\n\n self.pre_out_conv_1 = nn.Conv1d(2*filter_channels, filter_channels, kernel_size, padding=kernel_size//2)\n self.pre_out_norm_1 = modules.LayerNorm(filter_channels)\n self.pre_out_conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)\n self.pre_out_norm_2 = modules.LayerNorm(filter_channels)\n\n if gin_channels != 0:\n self.cond = nn.Conv1d(gin_channels, in_channels, 1)\n\n self.output_layer = nn.Sequential(\n nn.Linear(filter_channels, 1), \n nn.Sigmoid() \n )\n\n def forward_probability(self, x, x_mask, dur, g=None):\n dur = self.dur_proj(dur)\n x = torch.cat([x, dur], dim=1)\n x = self.pre_out_conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_1(x)\n x = self.drop(x)\n x = self.pre_out_conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_2(x)\n x = self.drop(x)\n x = x * x_mask\n x = x.transpose(1, 2)\n output_prob = self.output_layer(x)\n return output_prob\n\n def forward(self, x, x_mask, dur_r, dur_hat, g=None):\n x = torch.detach(x)\n if g is not None:\n g = torch.detach(g)\n x = x + self.cond(g)\n x = self.conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.norm_1(x)\n x = self.drop(x)\n x = self.conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.norm_2(x)\n x = self.drop(x)\n\n output_probs = []\n for dur in [dur_r, dur_hat]:\n output_prob = self.forward_probability(x, x_mask, dur, g)\n output_probs.append(output_prob)\n\n return output_probs" }, { "identifier": "generator_loss", "path": "losses.py", "snippet": "def generator_loss(disc_outputs):\n loss = 0\n gen_losses = []\n for dg in disc_outputs:\n dg = dg.float()\n l = torch.mean((1-dg)**2)\n gen_losses.append(l)\n loss += l\n\n return loss, gen_losses" }, { "identifier": "discriminator_loss", "path": "losses.py", "snippet": "def discriminator_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n r_losses = []\n g_losses = []\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n dr = dr.float()\n dg = dg.float()\n r_loss = torch.mean((1-dr)**2)\n g_loss = torch.mean(dg**2)\n loss += (r_loss + g_loss)\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, r_losses, g_losses" }, { "identifier": "feature_loss", "path": "losses.py", "snippet": "def feature_loss(fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n rl = rl.float().detach()\n gl = gl.float()\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2 " }, { "identifier": "kl_loss", "path": "losses.py", "snippet": "def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):\n \"\"\"\n z_p, logs_q: [b, h, t_t]\n m_p, logs_p: [b, h, t_t]\n \"\"\"\n z_p = z_p.float()\n logs_q = logs_q.float()\n m_p = m_p.float()\n logs_p = logs_p.float()\n z_mask = z_mask.float()\n\n kl = logs_p - logs_q - 0.5\n kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p)\n kl = torch.sum(kl * z_mask)\n l = kl / torch.sum(z_mask)\n return l" }, { "identifier": "mel_spectrogram_torch", "path": "mel_processing.py", "snippet": "def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):\n if torch.min(y) < -1.:\n print('min value is ', torch.min(y))\n if torch.max(y) > 1.:\n print('max value is ', torch.max(y))\n\n global mel_basis, hann_window\n dtype_device = str(y.dtype) + '_' + str(y.device)\n fmax_dtype_device = str(fmax) + '_' + dtype_device\n wnsize_dtype_device = str(win_size) + '_' + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)\n if wnsize_dtype_device not in hann_window:\n hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)\n\n y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')\n y = y.squeeze(1)\n\n spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],\n center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)\n\n spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)\n\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec" }, { "identifier": "spec_to_mel_torch", "path": "mel_processing.py", "snippet": "def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):\n global mel_basis\n dtype_device = str(spec.dtype) + '_' + str(spec.device)\n fmax_dtype_device = str(fmax) + '_' + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n return spec" }, { "identifier": "symbols", "path": "text/symbols.py", "snippet": "" } ]
import os import json import argparse import itertools import math import torch import shutil import torch.multiprocessing as mp import torch.distributed as dist import logging import commons import utils from torch import nn, optim from torch.nn import functional as F from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from torch.nn.parallel import DistributedDataParallel as DDP from torch.cuda.amp import autocast, GradScaler from tqdm import tqdm from data_utils import ( TextAudioSpeakerLoader, TextAudioSpeakerCollate, DistributedBucketSampler ) from models import ( SynthesizerTrn, MultiPeriodDiscriminator, DurationDiscriminator, ) from losses import ( generator_loss, discriminator_loss, feature_loss, kl_loss ) from mel_processing import mel_spectrogram_torch, spec_to_mel_torch from text.symbols import symbols
8,152
logging.getLogger('numba').setLevel(logging.WARNING) torch.backends.cudnn.benchmark = True torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True torch.set_float32_matmul_precision('medium') global_step = 0 def main(): """Assume Single Node Multi GPUs Training Only""" assert torch.cuda.is_available(), "CPU training is not allowed." n_gpus = torch.cuda.device_count() os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = '65280' hps = utils.get_hparams() if not hps.cont: shutil.copy('./pretrained_models/D_0.pth','./logs/OUTPUT_MODEL/D_0.pth') shutil.copy('./pretrained_models/G_0.pth','./logs/OUTPUT_MODEL/G_0.pth') shutil.copy('./pretrained_models/DUR_0.pth','./logs/OUTPUT_MODEL/DUR_0.pth') mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,)) def run(rank, n_gpus, hps): global global_step if rank == 0: logger = utils.get_logger(hps.model_dir) logger.info(hps) utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) dist.init_process_group(backend= 'gloo' if os.name == 'nt' else 'nccl', init_method='env://', world_size=n_gpus, rank=rank) torch.manual_seed(hps.train.seed) torch.cuda.set_device(rank)
logging.getLogger('numba').setLevel(logging.WARNING) torch.backends.cudnn.benchmark = True torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True torch.set_float32_matmul_precision('medium') global_step = 0 def main(): """Assume Single Node Multi GPUs Training Only""" assert torch.cuda.is_available(), "CPU training is not allowed." n_gpus = torch.cuda.device_count() os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = '65280' hps = utils.get_hparams() if not hps.cont: shutil.copy('./pretrained_models/D_0.pth','./logs/OUTPUT_MODEL/D_0.pth') shutil.copy('./pretrained_models/G_0.pth','./logs/OUTPUT_MODEL/G_0.pth') shutil.copy('./pretrained_models/DUR_0.pth','./logs/OUTPUT_MODEL/DUR_0.pth') mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,)) def run(rank, n_gpus, hps): global global_step if rank == 0: logger = utils.get_logger(hps.model_dir) logger.info(hps) utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) dist.init_process_group(backend= 'gloo' if os.name == 'nt' else 'nccl', init_method='env://', world_size=n_gpus, rank=rank) torch.manual_seed(hps.train.seed) torch.cuda.set_device(rank)
train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data)
0
2023-10-23 08:24:12+00:00
12k
t-ega/whatsapp-cloud-sdk
whatsapp_cloud_sdk/bot.py
[ { "identifier": "CustomHTTPError", "path": "whatsapp_cloud_sdk/_exceptions/http_error.py", "snippet": "class CustomHTTPError(Exception):\n \"\"\"\n Represents a custom HTTP error.\n\n This exception class is used to raise custom HTTP errors with\n specific status codes and response text.\n It inherits from the base Exception class.\n\n Attributes:\n status_code (int): The HTTP status code associated with the error.\n response_text (str): The text or message associated with the error response.\n\n Methods:\n __init__(self, status_code, response_text):\n Initializes a new instance of the CustomHTTPError class.\n Args:\n status_code (int): The HTTP status code associated with the error.\n response_text (str): The text or message associated with the error response.\n Returns:\n None.\n \"\"\"\n\n def __init__(self, status_code, response_text):\n self.status_code = status_code\n self.response_text = response_text\n super().__init__(f\"HTTP Error {status_code}: {response_text}\")" }, { "identifier": "_BaseApi", "path": "whatsapp_cloud_sdk/_base_api.py", "snippet": "class _BaseApi:\n # pylint: disable=line-too-long\n\n \"\"\"\n Base class for interacting with the WhatsApp API.\n\n This class provides essential configuration and authentication parameters for making requests\n to the WhatsApp API. It is meant to be inherited by other classes that will implement\n specific bot functionality.\n\n Attributes:\n WA_URL (str): The base URL for WhatsApp API requests, including the API version\n and phone number ID.\n HEADERS (dict): HTTP headers for API requests, including \"Content-Type\" and \"Authorization\" with the\n Cloud API access token.\n \"\"\"\n\n __cloud_api_access_token = os.getenv(\"CLOUD_API_ACCESS_TOKEN\")\n __wa_phone_number_id = os.getenv(\"WA_PHONE_NUMBER_ID\")\n __version = os.getenv(\"WA_VERSION\")\n WA_URL = f\"https://graph.facebook.com/{__version}/{__wa_phone_number_id}/messages\"\n\n HEADERS = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": f\"Bearer {__cloud_api_access_token}\",\n }\n\n def __init__(\n self,\n cloud_api_access_token: str = None,\n wa_phone_number_id: str = None,\n version: str = \"v17.0\",\n ):\n \"\"\"\n Initialize the BaseApi instance.\n\n Args:\n cloud_api_access_token (str, optional): The Cloud API access token used for authentication,\n if not provided it is replaced with the one defined in the environment variables .\n wa_phone_number_id (str, optional): The WhatsApp phone number ID,\n if not provided it is replaced with the one defined in the environment variable.\n version (str, optional): The WhatsApp API version to use. Default is \"v17.0\",\n if not provided it is replaced with the one defined in the environment variable.\n\n Raises:\n RuntimeError: If neither `cloud_api_access_token` nor `wa_phone_number_id` is provided, and\n there are no corresponding environment variables set, a `RuntimeError` is raised.\n \"\"\"\n\n if not cloud_api_access_token:\n cloud_api_access_token = (self.__cloud_api_access_token,)\n\n if not wa_phone_number_id:\n wa_phone_number_id = (self.__wa_phone_number_id,)\n\n if not version:\n version = self.__version\n\n if not cloud_api_access_token or not wa_phone_number_id:\n raise RuntimeError(\n \"Either pass in your CLOUD_API_ACCESS_TOKEN or WA_PHONE_NUMBER_ID, \"\n \"Or place it in your env file\"\n )\n\n self.__cloud_api_access_token = cloud_api_access_token\n self.__wa_phone_number_id = wa_phone_number_id\n self.__version = version" }, { "identifier": "Contact", "path": "whatsapp_cloud_sdk/_files/contact.py", "snippet": "class Contact(File):\n \"\"\"\n Represents a contact.\n\n Args:\n name [Name]: The contact's name. This is a required field.\n addresses Optional[List[Address]]: A list of addresses.\n birthday Optional[str]: The contact's birthday.\n emails Optional[List[Email]]: A list of email addresses.\n org Optional[Org]: Organizational information.\n phones Optional[List[Phone]]: A list of phone numbers.\n urls Optional[List[URL]]: A list of URLs.\n\n Attributes:\n name Optional[Name]: The contact's name This field is required.\n addresses Optional[List[Address]]: A list of addresses.\n birthday (Optional[str]): The contact's birthday.\n emails (Optional[List[Email]]): A list of email addresses.\n org (Optional[Org]): Organizational information.\n phones (Optional[List[Phone]]): A list of phone numbers.\n urls (Optional[List[URL]]): A list of URLs.\n\n Methods:\n - de_json(data: Optional[JSONDict]) -> Optional[Contact]: Create a Contact\n object from JSON data.\n \"\"\"\n\n _id_attrs = (\"name\", \"phones\", \"birthday\")\n\n __slots__ = (\n \"name\",\n \"addresses\",\n \"birthday\",\n \"emails\",\n \"org\",\n \"phones\",\n \"urls\",\n )\n\n # pylint: disable=too-many-arguments\n def __init__(\n self,\n name: Union[Name, str],\n addresses: Optional[List[Address]] = None,\n birthday: Optional[str] = None,\n emails: Optional[List[Email]] = None,\n org: Optional[Org] = None,\n phones: Optional[Union[List[Phone], List[str]]] = None,\n urls: Optional[List[URL]] = None,\n ):\n # pylint: disable=fixme\n # TODO: Allow validation using pydantic\n\n # required\n if isinstance(name, str):\n self.name = Name(formatted_name=name, first_name=name)\n elif isinstance(name, Name):\n self.name = name\n else:\n raise TypeError(\n \"Name must either be a string or an instance of the Name class!\"\n )\n\n if isinstance(phones, list):\n for i, phone in enumerate(phones):\n if isinstance(phone, str):\n phones[i] = Phone(phone=phone)\n elif not isinstance(phone, Phone):\n raise TypeError(\n f\"Phone {i} must either be a string or an instance of the Phone class!\"\n )\n else:\n # pylint: disable=line-too-long\n raise ValueError(\n f\"Phones must be of type <class list> of phones class or strings!\\nGot {type(phones)} instead \"\n )\n\n # optional\n self.addresses = addresses\n self.birthday = birthday\n self.emails = emails\n self.org = org\n self.phones = phones\n self.urls = urls\n\n # pylint: disable=too-many-locals\n @classmethod\n def de_json(cls, data: Optional[JSONDict]) -> Optional[\"Contact\"]:\n \"\"\"This class acts as a method for extracting and converting JSON data gotten from\n Whatsapp Cloud API and converting them into internal objects that can be interacted with\n \"\"\"\n\n data = cls.parse_data(data)\n\n if not data:\n return None\n\n addresses = []\n if \"addresses\" in data:\n for address_data in data[\"addresses\"]:\n address = Address(**address_data)\n addresses.append(address)\n\n emails = []\n if \"emails\" in data:\n for email_data in data[\"emails\"]:\n email = Email(**email_data)\n emails.append(email)\n\n name = None\n if \"name\" in data:\n name_data = data[\"name\"]\n name = Name(**name_data)\n\n org = None\n if \"org\" in data:\n org_data = data[\"org\"]\n org = Org(**org_data)\n\n phones = []\n if \"phones\" in data:\n for phone_data in data[\"phones\"]:\n phone = Phone(**phone_data)\n phones.append(phone)\n\n urls = []\n if \"urls\" in data:\n for url_data in data[\"urls\"]:\n url = URL(**url_data)\n urls.append(url)\n\n return cls(\n name=name,\n addresses=addresses,\n birthday=data.get(\"birthday\"),\n emails=emails,\n org=org,\n phones=phones,\n urls=urls,\n )" }, { "identifier": "MyEncoder", "path": "whatsapp_cloud_sdk/_utils/json_serializer.py", "snippet": "class MyEncoder(JSONEncoder):\n \"\"\"Custom JSON encoder for serializing File objects e.g. Message, Audio, Video e.t.c.\n\n This encoder is used to customize the serialization behavior when converting objects\n to JSON format.\n\n Attributes:\n None\n\n Methods:\n default(o): Serialize an object to a JSON-serializable format.\n\n Args:\n o: The object to be serialized.\n\n Returns:\n JSON-serializable representation of the object.\n \"\"\"\n\n def default(self, o):\n \"\"\"Serialize an object to a JSON-serializable format.\n\n This method is called for objects that are not natively serializable by the JSON encoder.\n It checks if the object is an instance of the File class and calls it's to_dict()\n method for serialization.\n\n Args:\n o: The object to be serialized.\n\n Returns:\n JSON-serializable representation of the object.\n \"\"\"\n if isinstance(o, File):\n return o.to_dict()\n\n return super().default(o)" }, { "identifier": "TextMessage", "path": "whatsapp_cloud_sdk/_validators/messages.py", "snippet": "class TextMessage(BaseModel):\n \"\"\"\n Represents a text message.\n\n Args:\n text (str): The text content of the message.\n message_id (str, optional): An optional message ID.\n recipient_number (str): The recipient's phone number.\n\n Attributes:\n model_config (ConfigDict): Pydantic configuration for this model.\n \"\"\"\n\n model_config = ConfigDict(extra=\"forbid\")\n text: str\n message_id: Optional[str]\n recipient_number: constr(max_length=20, min_length=8)" }, { "identifier": "ButtonMessage", "path": "whatsapp_cloud_sdk/_validators/messages.py", "snippet": "class ButtonMessage(BaseModel):\n \"\"\"\n Represents a message with buttons.\n\n Args:\n text (str): The text content of the message.\n recipient_number (str): The recipient's phone number.\n buttons (List[ButtonContents]): A list of button contents.\n\n Attributes:\n None\n \"\"\"\n\n text: str\n recipient_number: constr(max_length=12, min_length=8)\n buttons: List[ButtonContents]" }, { "identifier": "ButtonContents", "path": "whatsapp_cloud_sdk/_validators/messages.py", "snippet": "class ButtonContents(BaseModel):\n \"\"\"\n Represents the contents of a button.\n\n Args:\n id (str, optional): An optional button ID. Defaults to a UUID.\n title (str): The title or label of the button.\n\n Attributes:\n None\n \"\"\"\n\n id: Optional[str] = str(uuid.uuid4())\n title: constr(max_length=20, min_length=1)" }, { "identifier": "LinkMessage", "path": "whatsapp_cloud_sdk/_validators/messages.py", "snippet": "class LinkMessage(BaseModel):\n \"\"\"\n Represents a message with a link.\n\n Args:\n link (str): The URL link.\n caption (str, optional): An optional caption for the link.\n message_id (str, optional): An optional message ID.\n\n Attributes:\n None\n \"\"\"\n\n link: str\n caption: Optional[str] = None\n message_id: Optional[str] = None" }, { "identifier": "LocationMessage", "path": "whatsapp_cloud_sdk/_validators/messages.py", "snippet": "class LocationMessage(BaseModel):\n \"\"\"\n Represents a location message.\n\n Args:\n longitude (int): The longitude of the location.\n name (str): The name of the location.\n address (str): The address of the location.\n\n Attributes:\n None\n \"\"\"\n\n longitude: int\n name: str\n address: str" }, { "identifier": "MessageFormatter", "path": "whatsapp_cloud_sdk/_formaters/message_formatter.py", "snippet": "class MessageFormatter:\n \"\"\"\n Provides methods for formatting messages and data for interaction with the WhatsApp API.\n\n Methods:\n - format_text_message(body: str, to: str, preview_url: bool = False,\n message_id: str = None) -> JSONDict:\n - format_button_message(to: str, text: str, buttons: List[ButtonContents],\n message_id: Optional[str])\n -> JSONDict:\n - format_reply_with_reaction(to: str, emoji, message_id: Optional[str]) -> JSONDict:\n - format_link_message(to: str, link: str, m_type: LinkTypes, caption: str = \"\",\n message_id: str =None\n -> JSONDict:\n - format_send_document_by_url(to: str, document_link: str, caption: str,\n is_reply: bool = False,\n message_id: str = None) -> JSONDict:\n - format_location_message(to: str, latitude: decimal, longitude: int, name: str,\n address: str,\n message_id: Optional[str])\n -> JSONDict:\n - format_contact_message(contact: list, to: str, message_id: Optional[str]) -> JSONDict:\n - format_sticker_message_by_url(link: str, to: str, message_id: Optional[str]) -> JSONDict:\n - mark_message_as_read(message_id: str) -> JSONDict:\n \"\"\"\n\n @staticmethod\n def format_text_message(\n body: str, to: str, preview_url: bool = False, message_id: str = None\n ) -> JSONDict:\n \"\"\"\n Formats a text message for WhatsApp.\n\n Args:\n - body (str): The text message body.\n - to (str): The recipient's WhatsApp number.\n - preview_url (bool, optional): Whether to preview URLs in the message.\n - message_id (str, optional): The ID of the message being replied to.\n\n Returns:\n - JSONDict: The formatted text message.\n \"\"\"\n\n body = {\n \"messaging_product\": \"whatsapp\",\n \"recipient_type\": \"individual\",\n \"to\": to,\n \"type\": \"text\",\n \"text\": {\"preview_url\": preview_url, \"body\": body},\n }\n\n if message_id:\n body[\"context\"] = {\"message_id\": message_id}\n\n return body\n\n @staticmethod\n def format_button_message(\n to: str,\n text: str,\n buttons: List[ButtonContents],\n message_id: Optional[str],\n ) -> JSONDict:\n \"\"\"\n Formats a message with interactive buttons for WhatsApp.\n\n Args:\n - to (str): The recipient's WhatsApp number.\n - text (str): The text message accompanying the buttons.\n - buttons (List[ButtonContents]): List of button contents.\n - message_id (str, optional): The ID of the message being replied to.\n\n Returns:\n - JSONDict: The formatted button message.\n\n \"\"\"\n\n if not isinstance(buttons, ButtonContents):\n raise TypeError(\"Buttons must be an instance of button contents\")\n\n message = {\n \"messaging_product\": \"whatsapp\",\n \"recipient_type\": \"individual\",\n \"to\": to,\n \"type\": \"interactive\",\n \"interactive\": {\n \"type\": \"button\",\n \"body\": {\"text\": text},\n \"action\": {\"buttons\": buttons},\n },\n }\n\n if message_id:\n message[\"context\"] = {\"message_id\": message_id}\n\n return message\n\n @staticmethod\n def format_reply_with_reaction(\n to: str,\n emoji,\n message_id: Optional[str],\n ) -> JSONDict:\n \"\"\"\n Formats a message with interactive buttons for WhatsApp.\n\n Args:\n - to (str): The recipient's WhatsApp number.\n - text (str): The text message accompanying the buttons.\n - buttons (List[ButtonContents]): List of button contents.\n - message_id (str, optional): The ID of the message being replied to.\n\n Returns:\n - JSONDict: The formatted button message.\n \"\"\"\n\n message = {\n \"messaging_product\": \"whatsapp\",\n \"recipient_type\": \"individual\",\n \"to\": to,\n \"type\": \"reaction\",\n \"reaction\": {\"message_id\": message_id, \"emoji\": emoji},\n }\n\n if message_id:\n message[\"context\"] = {\"message_id\": message_id}\n\n return message\n\n @staticmethod\n def format_link_message(\n to: str, link: str, m_type: LinkTypes, caption: str = \"\", message_id: str = None\n ) -> JSONDict:\n \"\"\"\n Formats a reaction message with an emoji for WhatsApp.\n\n Args:\n - to (str): The recipient's WhatsApp number.\n - emoji: The emoji representing the reaction.\n - message_id (str, optional): The ID of the message being reacted to.\n\n Returns:\n - JSONDict: The formatted reaction message.\n \"\"\"\n\n message = {\n \"messaging_product\": \"whatsapp\",\n \"recipient_type\": \"individual\",\n \"to\": to,\n \"type\": m_type,\n m_type: {\"link\": link},\n }\n\n if len(caption) > 0:\n message[m_type][\"caption\"] = caption\n\n if message_id:\n message[\"context\"] = {\"message_id\": message_id}\n\n return message\n\n @staticmethod\n def format_send_document_by_url(\n to: str,\n document_link: str,\n caption: str,\n is_reply: bool = False,\n message_id: str = None,\n ) -> JSONDict:\n \"\"\"\n Formats a document message with a link for WhatsApp.\n\n Args:\n - to (str): The recipient's WhatsApp number.\n - document_link (str): The URL of the document to send.\n - caption (str): The caption for the document.\n - is_reply (bool, optional): Indicates if it's a reply message.\n - message_id (str, optional): The ID of the message being replied to.\n\n Returns:\n - JSONDict: The formatted document message.\n \"\"\"\n\n message = {\n \"messaging_product\": \"whatsapp\",\n \"recipient_type\": \"individual\",\n \"to\": to,\n \"type\": \"document\",\n \"document\": {\"link\": document_link, \"caption\": caption},\n }\n\n if is_reply:\n if message_id is None:\n raise ValueError(\"message_id is required for a reply message.\")\n message[\"context\"] = {\"message_id\": message_id}\n\n return message\n\n # pylint: disable=too-many-arguments\n @staticmethod\n def format_location_message(\n to: str,\n latitude: decimal,\n longitude: int,\n name: str,\n address: str,\n message_id: Optional[str],\n ) -> JSONDict:\n \"\"\"\n Formats a location message for WhatsApp.\n\n Args:\n - to (str): The recipient's WhatsApp number.\n - latitude (decimal): The latitude coordinate of the location.\n - longitude (int): The longitude coordinate of the location.\n - name (str): The name of the location.\n - address (str): The address of the location.\n - message_id (str, optional): The ID of the message being replied to.\n\n Returns:\n - JSONDict: The formatted location message.\n \"\"\"\n message = {\n \"messaging_product\": \"whatsapp\",\n \"recipient_type\": \"individual\",\n \"to\": to,\n \"type\": \"location\",\n \"location\": {\n \"latitude\": latitude,\n \"longitude\": longitude,\n \"name\": name,\n \"address\": address,\n },\n }\n\n if message_id:\n message[\"context\"] = {\"message_id\": message_id}\n return message\n\n @staticmethod\n def format_contact_message(\n contacts: list,\n to: str,\n message_id: Optional[str],\n ) -> JSONDict:\n \"\"\"\n Formats a contact message for WhatsApp.\n\n Args:\n - contacts (list): List of contact details (e.g., Name, Phone, Email).\n - to (str): The recipient's WhatsApp number.\n - message_id (str, optional): The ID of the message being replied to.\n\n Returns:\n - JSONDict: The formatted contact message.\n \"\"\"\n message = {\n \"messaging_product\": \"whatsapp\",\n \"to\": to,\n \"type\": \"contacts\",\n \"contacts\": contacts,\n }\n\n if message_id:\n message[\"context\"] = {\"message_id\": message_id}\n\n return message\n\n @staticmethod\n def format_sticker_message_by_url(\n link: str,\n to: str,\n message_id: Optional[str],\n ) -> JSONDict:\n \"\"\"\n Formats a sticker message with a link for WhatsApp.\n\n Args:\n - link (str): The URL of the sticker image.\n - to (str): The recipient's WhatsApp number.\n - message_id (str, optional): The ID of the message being replied to.\n\n Returns:\n - JSONDict: The formatted sticker message.\n \"\"\"\n message = {\n \"messaging_product\": \"whatsapp\",\n \"recipient_type\": \"individual\",\n \"to\": to,\n \"type\": \"sticker\",\n \"sticker\": {\"link\": link},\n }\n\n if message_id:\n message[\"context\"] = {\"message_id\": message_id}\n\n return message\n\n @staticmethod\n def mark_message_as_read(message_id: str):\n \"\"\"\n Marks a message as read on WhatsApp.\n\n Args:\n - message_id (str): The ID of the message to mark as read.\n\n Returns:\n - JSONDict: The command to mark the message as read.\n \"\"\"\n return {\n \"messaging_product\": \"whatsapp\",\n \"status\": \"read\",\n \"message_id\": message_id,\n }" }, { "identifier": "LinkTypes", "path": "whatsapp_cloud_sdk/_formaters/message_formatter.py", "snippet": "class LinkTypes(Enum):\n \"\"\"\n Constants representing different types of links.\n\n Attributes:\n AUDIO (str): A link type for audio content.\n IMAGE (str): A link type for image content.\n VIDEO (str): A link type for video content.\n \"\"\"\n\n AUDIO = \"audio\"\n IMAGE = \"image\"\n VIDEO = \"video\"" } ]
from typing import Optional, List, Dict from unicodedata import decimal from whatsapp_cloud_sdk._exceptions.http_error import CustomHTTPError from whatsapp_cloud_sdk._base_api import _BaseApi from whatsapp_cloud_sdk._files.contact import Contact from whatsapp_cloud_sdk._utils.json_serializer import MyEncoder from whatsapp_cloud_sdk._validators.messages import ( TextMessage, ButtonMessage, ButtonContents, LinkMessage, LocationMessage, ) from whatsapp_cloud_sdk._formaters.message_formatter import MessageFormatter, LinkTypes import json import requests
7,476
longitude=message.longitude, latitude=latitude, message_id=message_id, ) return await self.__send(data=payload) async def send_contact( self, contacts: List[Contact], recipient_number: str, message_id: Optional[str] = None, ): """ Send a contact to a recipient. Args: contacts (list): A list of contact details.Each contact detail a list of contact objects. recipient_number (str): The recipient's WhatsApp phone number. message_id (str, optional): An optional message ID if it is a reply to a message. Returns: Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain The response from the WhatsApp API. """ if not isinstance(contacts, list): raise TypeError("Contacts must be a list") for i, contact in contacts: if not isinstance(contact, Contact): raise AttributeError( f"Contact {i} must be of type {type(Contact)}. Got {type(type(contact))} instead." ) payload = formatter.format_contact_message( contacts=contacts, to=recipient_number, message_id=message_id ) return await self.__send(data=payload) async def send_sticker_with_url( self, link: str, recipient_number: str, message_id: Optional[str], ): """ Send a sticker by URL to a recipient. Args: link (str): The URL of the sticker. recipient_number (str): The recipient's WhatsApp phone number. message_id (str, optional): An optional message ID if it is a reply to a message. Returns: Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain The response from the WhatsApp API. """ payload = formatter.format_sticker_message_by_url( link=link, to=recipient_number, message_id=message_id ) return await self.__send(data=payload) async def mark_message_as_read(self, message_id: str): """ Mark a message as read. Args: message_id (str): The ID of the message to mark as read. Raises: ValueError: If message_id is not provided. Returns: Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain The response from the WhatsApp API. """ if not message_id: raise ValueError("A message Id is required") payload = formatter.mark_message_as_read(message_id=message_id) return await self.__send(data=payload) async def __send( self, data: dict, ) -> dict: """ Send data to the WhatsApp API. Args: data (dict): The data to send to the WhatsApp API. Raises: AttributeError: If there is no data to send. Returns: Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain The response from the WhatsApp API. """ if not data: raise AttributeError("No data to send") # Convert message_body to JSON json_data = json.dumps(data, cls=MyEncoder) timeout_secs = 10 response = requests.post( self.WA_URL, headers=self.HEADERS, data=json_data, timeout=timeout_secs ) try: response.raise_for_status() except requests.HTTPError as exc: # Re raise the error with the text gotten
"""This module Represents a WhatsApp bot for communication with the WhatsApp API.""" formatter = MessageFormatter() class Bot(_BaseApi): # pylint: disable=line-too-long """ Represents a WhatsApp bot for communication with the WhatsApp API. This class inherits from the `BaseApi` class and provides methods for sending various types of messages, marking messages as read, and handling communication with the WhatsApp API. Args: cloud_api_access_token (str, optional): The Cloud API access token used for authentication. wa_phone_number_id (str, optional): The WhatsApp phone number ID. version (str, optional): The WhatsApp API version to use. Inherits attributes from the `BaseApi` class, such as `WA_URL` and `HEADERS`. Attributes: Inherits attributes from the `BaseApi` class. Methods: - `send_text(text: str, recipient_number: str, message_id: str = None, preview_url: bool = False)`: Send a text message to a recipient. - `send_text_with_buttons(text: str, buttons: list, recipient_number: str)`: Send a text message with buttons to a recipient. - `send_reply_with_reaction(message_id: str, emoji: str, recipient_number: str)`: Send a reaction to a message. - `send_image_by_url(link: str, caption: Optional[str], recipient_number: str, message_id: Optional[str])`: Send an image by URL. - `send_audio_by_url(link: str, caption: Optional[str], recipient_number: str)`: Send audio by URL. - `send_document_by_url(link: str, caption: Optional[str], recipient_number: str)`: Send a document by URL. - `send_video_by_url(link: str, caption: Optional[str], recipient_number: str, message_id: Optional[str] = None) `: Send a video by URL. - `send_location(latitude: decimal, longitude: int, name: str, address: str, recipient_number: str)`: Send a location. - `send_contact(contact: list, recipient_number: str)`: Send a contact. - `send_sticker_with_url(link: str, recipient_number: str)`: Send a sticker by URL. - `mark_message_as_read(message_id: str)`: Mark a message as read. - `__send(data: dict, method: Optional[str] = "POST") -> dict`: Send data to the WhatsApp API. Usage Example: ``` python from your_library import Bot # Initialize the bot. bot = Bot(cloud_api_access_token="your_access_token", wa_phone_number_id="your_phone_number_id", version="v17.0") # Use bot methods to interact with the WhatsApp API bot.send_text("Hello, world!", "recipient_number") ``` """ def __init__( self, cloud_api_access_token: str = None, wa_phone_number_id: str = None, version: str = None, ): """ Initialize a Bot instance for WhatsApp API communication. Args: cloud_api_access_token (str, optional): The Cloud API access token used for authentication. wa_phone_number_id (str, optional): The WhatsApp phone number ID. version (str, optional): The WhatsApp API version to use. Inherits attributes from the `BaseApi` class. """ super().__init__( cloud_api_access_token=cloud_api_access_token, wa_phone_number_id=wa_phone_number_id, version=version, ) async def send_text( self, text: str, recipient_number: str, message_id: str = None, preview_url: bool = False, ): """ Send a text message to a recipient. Args: text (str): The text of the message. recipient_number (str): The recipient's WhatsApp phone number. message_id (str, optional): The ID of the message if it is a reply to a message (optional). preview_url (bool): Enable or disable URL preview (default is False). Returns: Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain The response from the WhatsApp API. """ message = TextMessage( text=text, recipient_number=recipient_number, message_id=message_id ) payload = formatter.format_text_message( to=message.recipient_number, body=message.text, message_id=message_id, preview_url=preview_url, ) return await self.__send(data=payload) async def send_text_with_buttons( self, text: str, buttons: List[Dict[str, str]], recipient_number: str, message_id: Optional[str], ): """ Send a text message with buttons to a recipient. Args: text (str): The text of the message. buttons (list): List of buttons, where each button is a dictionary with the following keys: - 'title' (str): The title or label of the button. - 'id' (optional, str): An optional id for the button. recipient_number (str): The recipient's WhatsApp phone number. message_id (str, optional): An optional message ID if it is a reply to a message. Returns: Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain The response from the WhatsApp API. """ if not isinstance(buttons, list): raise TypeError("Buttons must be a list of dict object") buttons_content = [ButtonContents(**b) for b in buttons] message = ButtonMessage( text=text, recipient_number=recipient_number, buttons=buttons_content ) payload = formatter.format_button_message( to=recipient_number, text=message.text, buttons=message.buttons, message_id=message_id, ) return await self.__send(data=payload) # pylint: disable=fixme # TODO: Add input validation for all bot methods async def send_reaction_message( self, message_id: Optional[str], emoji, recipient_number: str ): """ Send a reaction message. Args: message_id (str, optional): An optional message ID if it is a reply to a message. emoji (str): The reaction emoji to send. recipient_number (str): The recipient's WhatsApp phone number. Returns: Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain The response from the WhatsApp API. """ payload = formatter.format_reply_with_reaction( to=recipient_number, message_id=message_id, emoji=emoji ) return await self.__send(data=payload) async def send_image_by_url( self, link: str, caption: Optional[str], recipient_number: str, message_id: Optional[str], ): """ Send an image by URL to a recipient. Args: link (str): The URL of the image. caption (str, optional): An optional caption for the image. recipient_number (str): The recipient's WhatsApp phone number. message_id (str, optional): An optional message ID if it is a reply to a message. Returns: Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain The response from the WhatsApp API. """ message = LinkMessage(link=link, caption=caption) payload = formatter.format_link_message( to=recipient_number, link=message.link, m_type=LinkTypes.IMAGE, message_id=message_id, ) return await self.__send(data=payload) async def send_audio_by_url( self, link: str, recipient_number: str, message_id: Optional[str], ): """ Send an audio file by URL to a recipient. Args: link (str): The URL of the audio file. recipient_number (str): The recipient's WhatsApp phone number. message_id (str, optional): An optional message ID if it is a reply to a message. Returns: Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain The response from the WhatsApp API. """ message = LinkMessage(link=link) payload = formatter.format_link_message( to=recipient_number, link=message.link, m_type=LinkTypes.AUDIO, message_id=message_id, ) return await self.__send(data=payload) async def send_document_by_url( self, link: str, caption: Optional[str], recipient_number: str, message_id: Optional[str] = None, ): """ Send a document by URL to a recipient. Args: link (str): The URL of the document. caption (str, optional): An optional caption for the document. recipient_number (str): The recipient's WhatsApp phone number. message_id (str, optional): An optional message ID if it is a reply to a message. Returns: Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain The response from the WhatsApp API. """ message = LinkMessage( link=link, caption=caption, ) payload = formatter.format_send_document_by_url( to=recipient_number, document_link=message.link, caption=message.caption, message_id=message_id, ) return await self.__send(data=payload) async def send_video_by_url( self, link: str, caption: Optional[str], recipient_number: str, message_id: Optional[str] = None, ): """ Send a video by URL to a recipient. Args: link (str): The URL of the video. caption (str, optional): An optional caption for the video. recipient_number (str): The recipient's WhatsApp phone number. message_id (str, optional): An optional message ID if it is a reply to a message. Returns: Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain The response from the WhatsApp API. """ message = LinkMessage(link=link, caption=caption) payload = formatter.format_link_message( to=recipient_number, link=message.link, m_type=LinkTypes.VIDEO, caption=message.caption, message_id=message_id, ) return await self.__send(data=payload) # pylint: disable=too-many-arguments async def send_location( self, latitude: decimal, longitude: int, name: str, address: str, recipient_number: str, message_id: Optional[str] = None, ): """ Send a location to a recipient. Args: latitude (decimal): The latitude of the location. longitude (int): The longitude of the location. name (str): The name of the location. address (str): The address of the location. recipient_number (str): The recipient's WhatsApp phone number. message_id (str, optional): An optional message ID if it is a reply to a message. Returns: Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain The response from the WhatsApp API. """ message = LocationMessage(longitude=longitude, name=name, address=address) payload = formatter.format_location_message( to=recipient_number, name=message.name, address=message.address, longitude=message.longitude, latitude=latitude, message_id=message_id, ) return await self.__send(data=payload) async def send_contact( self, contacts: List[Contact], recipient_number: str, message_id: Optional[str] = None, ): """ Send a contact to a recipient. Args: contacts (list): A list of contact details.Each contact detail a list of contact objects. recipient_number (str): The recipient's WhatsApp phone number. message_id (str, optional): An optional message ID if it is a reply to a message. Returns: Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain The response from the WhatsApp API. """ if not isinstance(contacts, list): raise TypeError("Contacts must be a list") for i, contact in contacts: if not isinstance(contact, Contact): raise AttributeError( f"Contact {i} must be of type {type(Contact)}. Got {type(type(contact))} instead." ) payload = formatter.format_contact_message( contacts=contacts, to=recipient_number, message_id=message_id ) return await self.__send(data=payload) async def send_sticker_with_url( self, link: str, recipient_number: str, message_id: Optional[str], ): """ Send a sticker by URL to a recipient. Args: link (str): The URL of the sticker. recipient_number (str): The recipient's WhatsApp phone number. message_id (str, optional): An optional message ID if it is a reply to a message. Returns: Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain The response from the WhatsApp API. """ payload = formatter.format_sticker_message_by_url( link=link, to=recipient_number, message_id=message_id ) return await self.__send(data=payload) async def mark_message_as_read(self, message_id: str): """ Mark a message as read. Args: message_id (str): The ID of the message to mark as read. Raises: ValueError: If message_id is not provided. Returns: Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain The response from the WhatsApp API. """ if not message_id: raise ValueError("A message Id is required") payload = formatter.mark_message_as_read(message_id=message_id) return await self.__send(data=payload) async def __send( self, data: dict, ) -> dict: """ Send data to the WhatsApp API. Args: data (dict): The data to send to the WhatsApp API. Raises: AttributeError: If there is no data to send. Returns: Coroutine: A coroutine that should be awaited, The return value of the coroutine would contain The response from the WhatsApp API. """ if not data: raise AttributeError("No data to send") # Convert message_body to JSON json_data = json.dumps(data, cls=MyEncoder) timeout_secs = 10 response = requests.post( self.WA_URL, headers=self.HEADERS, data=json_data, timeout=timeout_secs ) try: response.raise_for_status() except requests.HTTPError as exc: # Re raise the error with the text gotten
raise CustomHTTPError(
0
2023-10-15 21:12:45+00:00
12k
caglarkucuk/earthformer-satellite-to-radar
ef-sat2rad/earthformer/datasets/sevir/sevir_torch_wrap.py
[ { "identifier": "cfg", "path": "ef-sat2rad/earthformer/config.py", "snippet": "_CURR_DIR = os.path.realpath(os.path.dirname(os.path.realpath(__file__)))" }, { "identifier": "SEVIRDataLoader", "path": "ef-sat2rad/earthformer/datasets/sevir/sevir_dataloader.py", "snippet": "class SEVIRDataLoader:\n r\"\"\"\n DataLoader that loads SEVIR sequences, and spilts each event\n into segments according to specified sequence length.\n\n Event Frames:\n [-----------------------raw_seq_len----------------------]\n [-----seq_len-----]\n <--stride-->[-----seq_len-----]\n <--stride-->[-----seq_len-----]\n ...\n \"\"\"\n def __init__(self,\n data_types: Sequence[str] = None,\n seq_len: int = 49,\n raw_seq_len: int = 49,\n sample_mode: str = 'sequent',\n stride: int = 12,\n batch_size: int = 1,\n layout: str = 'NHWT',\n num_shard: int = 1,\n rank: int = 0,\n split_mode: str = \"uneven\",\n sevir_catalog: Union[str, pd.DataFrame] = None,\n sevir_data_dir: str = None,\n start_date: datetime.datetime = None,\n end_date: datetime.datetime = None,\n datetime_filter=None,\n catalog_filter='default',\n shuffle: bool = False,\n shuffle_seed: int = 1,\n output_type=np.float32,\n preprocess: bool = True,\n rescale_method: str = 'sevir', # '01',\n downsample_dict: Dict[str, Sequence[int]] = None,\n verbose: bool = False):\n r\"\"\"\n Parameters\n ----------\n data_types\n A subset of SEVIR_DATA_TYPES.\n seq_len\n The length of the data sequences. Should be smaller than the max length raw_seq_len.\n raw_seq_len\n The length of the raw data sequences.\n sample_mode\n 'random' or 'sequent'\n stride\n Useful when sample_mode == 'sequent'\n stride must not be smaller than out_len to prevent data leakage in testing.\n batch_size\n Number of sequences in one batch.\n layout\n str: consists of batch_size 'N', seq_len 'T', channel 'C', height 'H', width 'W'\n The layout of sampled data. Raw data layout is 'NHWT'.\n valid layout: 'NHWT', 'NTHW', 'NTCHW', 'TNHW', 'TNCHW'.\n num_shard\n Split the whole dataset into num_shard parts for distributed training.\n rank\n Rank of the current process within num_shard.\n split_mode: str\n if 'ceil', all `num_shard` dataloaders have the same length = ceil(total_len / num_shard).\n Different dataloaders may have some duplicated data batches, if the total size of datasets is not divided by num_shard.\n if 'floor', all `num_shard` dataloaders have the same length = floor(total_len / num_shard).\n The last several data batches may be wasted, if the total size of datasets is not divided by num_shard.\n if 'uneven', the last datasets has larger length when the total length is not divided by num_shard.\n The uneven split leads to synchronization error in dist.all_reduce() or dist.barrier().\n See related issue: https://github.com/pytorch/pytorch/issues/33148\n Notice: this also affects the behavior of `self.use_up`.\n sevir_catalog\n Name of SEVIR catalog CSV file.\n sevir_data_dir\n Directory path to SEVIR data.\n start_date\n Start time of SEVIR samples to generate.\n end_date\n End time of SEVIR samples to generate.\n datetime_filter\n function\n Mask function applied to time_utc column of catalog (return true to keep the row).\n Pass function of the form lambda t : COND(t)\n Example: lambda t: np.logical_and(t.dt.hour>=13,t.dt.hour<=21) # Generate only day-time events\n catalog_filter\n function or None or 'default'\n Mask function applied to entire catalog dataframe (return true to keep row).\n Pass function of the form lambda catalog: COND(catalog)\n Example: lambda c: [s[0]=='S' for s in c.id] # Generate only the 'S' events\n shuffle\n bool, If True, data samples are shuffled before each epoch.\n shuffle_seed\n int, Seed to use for shuffling.\n output_type\n np.dtype, dtype of generated tensors\n preprocess\n bool, If True, self.preprocess_data_dict(data_dict) is called before each sample generated\n downsample_dict:\n dict, downsample_dict.keys() == data_types. downsample_dict[key] is a Sequence of (t_factor, h_factor, w_factor),\n representing the downsampling factors of all dimensions.\n verbose\n bool, verbose when opening raw data files\n \"\"\"\n super(SEVIRDataLoader, self).__init__()\n if sevir_catalog is None:\n sevir_catalog = SEVIR_CATALOG\n if sevir_data_dir is None:\n sevir_data_dir = SEVIR_DATA_DIR\n if data_types is None:\n data_types = SEVIR_DATA_TYPES\n else:\n assert set(data_types).issubset(SEVIR_DATA_TYPES)\n\n # configs which should not be modified\n self._dtypes = SEVIR_RAW_DTYPES\n self.lght_frame_times = LIGHTING_FRAME_TIMES\n self.data_shape = SEVIR_DATA_SHAPE\n\n self.raw_seq_len = raw_seq_len\n assert seq_len <= self.raw_seq_len, f'seq_len must not be larger than raw_seq_len = {raw_seq_len}, got {seq_len}.'\n self.seq_len = seq_len\n assert sample_mode in ['random', 'sequent'], f'Invalid sample_mode = {sample_mode}, must be \\'random\\' or \\'sequent\\'.'\n self.sample_mode = sample_mode\n self.stride = stride\n self.batch_size = batch_size\n valid_layout = ('NHWT', 'NTHW', 'NTCHW', 'NTHWC', 'TNHW', 'TNCHW')\n if layout not in valid_layout:\n raise ValueError(f'Invalid layout = {layout}! Must be one of {valid_layout}.')\n self.layout = layout\n self.num_shard = num_shard\n self.rank = rank\n valid_split_mode = ('ceil', 'floor', 'uneven')\n if split_mode not in valid_split_mode:\n raise ValueError(f'Invalid split_mode: {split_mode}! Must be one of {valid_split_mode}.')\n self.split_mode = split_mode\n self._samples = None\n self._hdf_files = {}\n self.data_types = data_types\n if isinstance(sevir_catalog, str):\n self.catalog = pd.read_csv(sevir_catalog, parse_dates=['time_utc'], low_memory=False)\n else:\n self.catalog = sevir_catalog\n self.sevir_data_dir = sevir_data_dir\n self.datetime_filter = datetime_filter\n self.catalog_filter = catalog_filter\n self.start_date = start_date\n self.end_date = end_date\n self.shuffle = shuffle\n self.shuffle_seed = int(shuffle_seed)\n self.output_type = output_type\n self.preprocess = preprocess\n self.downsample_dict = downsample_dict\n self.rescale_method = rescale_method\n self.verbose = verbose\n\n if self.start_date is not None:\n self.catalog = self.catalog[self.catalog.time_utc > self.start_date]\n if self.end_date is not None:\n self.catalog = self.catalog[self.catalog.time_utc <= self.end_date]\n if self.datetime_filter:\n self.catalog = self.catalog[self.datetime_filter(self.catalog.time_utc)]\n\n if self.catalog_filter is not None:\n if self.catalog_filter == 'default':\n self.catalog_filter = lambda c: c.pct_missing == 0\n self.catalog = self.catalog[self.catalog_filter(self.catalog)]\n\n self._compute_samples()\n self._open_files(verbose=self.verbose)\n self.reset()\n\n def _compute_samples(self):\n \"\"\"\n Computes the list of samples in catalog to be used. This sets self._samples\n \"\"\"\n # locate all events containing colocated data_types\n imgt = self.data_types\n imgts = set(imgt)\n filtcat = self.catalog[ np.logical_or.reduce([self.catalog.img_type==i for i in imgt]) ]\n # remove rows missing one or more requested img_types\n filtcat = filtcat.groupby('id').filter(lambda x: imgts.issubset(set(x['img_type'])))\n # If there are repeated IDs, remove them (this is a bug in SEVIR)\n # TODO: is it necessary to keep one of them instead of deleting them all\n filtcat = filtcat.groupby('id').filter(lambda x: x.shape[0]==len(imgt))\n self._samples = filtcat.groupby('id').apply(lambda df: self._df_to_series(df,imgt) )\n if self.shuffle:\n self.shuffle_samples()\n\n def shuffle_samples(self):\n self._samples = self._samples.sample(frac=1, random_state=self.shuffle_seed)\n\n def _df_to_series(self, df, imgt):\n d = {}\n df = df.set_index('img_type')\n for i in imgt:\n s = df.loc[i]\n idx = s.file_index if i != 'lght' else s.id\n d.update({f'{i}_filename': [s.file_name],\n f'{i}_index': [idx]})\n\n return pd.DataFrame(d)\n\n def _open_files(self, verbose=True):\n \"\"\"\n Opens HDF files\n \"\"\"\n imgt = self.data_types\n hdf_filenames = []\n for t in imgt:\n hdf_filenames += list(np.unique( self._samples[f'{t}_filename'].values ))\n self._hdf_files = {}\n for f in hdf_filenames:\n if verbose:\n print('Opening HDF5 file for reading', f)\n self._hdf_files[f] = h5py.File(self.sevir_data_dir + '/' + f, 'r')\n\n def close(self):\n \"\"\"\n Closes all open file handles\n \"\"\"\n for f in self._hdf_files:\n self._hdf_files[f].close()\n self._hdf_files = {}\n\n @property\n def num_seq_per_event(self):\n return 1 + (self.raw_seq_len - self.seq_len) // self.stride\n\n @property\n def total_num_seq(self):\n \"\"\"\n The total number of sequences within each shard.\n Notice that it is not the product of `self.num_seq_per_event` and `self.total_num_event`.\n \"\"\"\n return int(self.num_seq_per_event * self.num_event)\n\n @property\n def total_num_event(self):\n \"\"\"\n The total number of events in the whole dataset, before split into different shards.\n \"\"\"\n return int(self._samples.shape[0])\n\n @property\n def start_event_idx(self):\n \"\"\"\n The event idx used in certain rank should satisfy event_idx >= start_event_idx\n \"\"\"\n return self.total_num_event // self.num_shard * self.rank\n\n @property\n def end_event_idx(self):\n \"\"\"\n The event idx used in certain rank should satisfy event_idx < end_event_idx\n\n \"\"\"\n if self.split_mode == 'ceil':\n _last_start_event_idx = self.total_num_event // self.num_shard * (self.num_shard - 1)\n _num_event = self.total_num_event - _last_start_event_idx\n return self.start_event_idx + _num_event\n elif self.split_mode == 'floor':\n return self.total_num_event // self.num_shard * (self.rank + 1)\n else: # self.split_mode == 'uneven':\n if self.rank == self.num_shard - 1: # the last process\n return self.total_num_event\n else:\n return self.total_num_event // self.num_shard * (self.rank + 1)\n\n @property\n def num_event(self):\n \"\"\"\n The number of events split into each rank\n \"\"\"\n return self.end_event_idx - self.start_event_idx\n\n def _read_data(self, row, data):\n \"\"\"\n Iteratively read data into data dict. Finally data[imgt] gets shape (batch_size, height, width, raw_seq_len).\n\n Parameters\n ----------\n row\n A series with fields IMGTYPE_filename, IMGTYPE_index, IMGTYPE_time_index.\n data\n Dict, data[imgt] is a data tensor with shape = (tmp_batch_size, height, width, raw_seq_len).\n\n Returns\n -------\n data\n Updated data. Updated shape = (tmp_batch_size + 1, height, width, raw_seq_len).\n \"\"\"\n imgtyps = np.unique([x.split('_')[0] for x in list(row.keys())])\n for t in imgtyps:\n fname = row[f'{t}_filename']\n idx = row[f'{t}_index']\n t_slice = slice(0, None)\n # Need to bin lght counts into grid\n if t == 'lght':\n lght_data = self._hdf_files[fname][idx][:]\n data_i = self._lght_to_grid(lght_data, t_slice)\n else:\n data_i = self._hdf_files[fname][t][idx:idx + 1, :, :, t_slice]\n data[t] = np.concatenate((data[t], data_i), axis=0) if (t in data) else data_i\n\n return data\n\n def _lght_to_grid(self, data, t_slice=slice(0, None)):\n \"\"\"\n Converts Nx5 lightning data matrix into a 2D grid of pixel counts\n \"\"\"\n # out_size = (48,48,len(self.lght_frame_times)-1) if isinstance(t_slice,(slice,)) else (48,48)\n out_size = (*self.data_shape['lght'], len(self.lght_frame_times)) if t_slice.stop is None else (*self.data_shape['lght'], 1)\n if data.shape[0] == 0:\n return np.zeros((1,) + out_size, dtype=np.float32)\n\n # filter out points outside the grid\n x, y = data[:, 3], data[:, 4]\n m = np.logical_and.reduce([x >= 0, x < out_size[0], y >= 0, y < out_size[1]])\n data = data[m, :]\n if data.shape[0] == 0:\n return np.zeros((1,) + out_size, dtype=np.float32)\n\n # Filter/separate times\n t = data[:, 0]\n if t_slice.stop is not None: # select only one time bin\n if t_slice.stop > 0:\n if t_slice.stop < len(self.lght_frame_times):\n tm = np.logical_and(t >= self.lght_frame_times[t_slice.stop - 1],\n t < self.lght_frame_times[t_slice.stop])\n else:\n tm = t >= self.lght_frame_times[-1]\n else: # special case: frame 0 uses lght from frame 1\n tm = np.logical_and(t >= self.lght_frame_times[0], t < self.lght_frame_times[1])\n # tm=np.logical_and( (t>=FRAME_TIMES[t_slice],t<FRAME_TIMES[t_slice+1]) )\n\n data = data[tm, :]\n z = np.zeros(data.shape[0], dtype=np.int64)\n else: # compute z coordinate based on bin location times\n z = np.digitize(t, self.lght_frame_times) - 1\n z[z == -1] = 0 # special case: frame 0 uses lght from frame 1\n\n x = data[:, 3].astype(np.int64)\n y = data[:, 4].astype(np.int64)\n\n k = np.ravel_multi_index(np.array([y, x, z]), out_size)\n n = np.bincount(k, minlength=np.prod(out_size))\n return np.reshape(n, out_size).astype(np.int16)[np.newaxis, :]\n\n def _old_save_downsampled_dataset(self, save_dir, downsample_dict, verbose=True):\n \"\"\"\n This method does not save .h5 dataset correctly. There are some batches missed due to unknown error.\n E.g., the first converted .h5 file `SEVIR_VIL_RANDOMEVENTS_2017_0501_0831.h5` only has batch_dim = 1414,\n while it should be 1440 in the original .h5 file.\n \"\"\"\n import os\n from skimage.measure import block_reduce\n assert not os.path.exists(save_dir), f\"save_dir {save_dir} already exists!\"\n os.makedirs(save_dir)\n sample_counter = 0\n for index, row in self._samples.iterrows():\n if verbose:\n print(f\"Downsampling {sample_counter}-th data item.\", end='\\r')\n for data_type in self.data_types:\n fname = row[f'{data_type}_filename']\n idx = row[f'{data_type}_index']\n t_slice = slice(0, None)\n if data_type == 'lght':\n lght_data = self._hdf_files[fname][idx][:]\n data_i = self._lght_to_grid(lght_data, t_slice)\n else:\n data_i = self._hdf_files[fname][data_type][idx:idx + 1, :, :, t_slice]\n # Downsample t\n t_slice = [slice(None, None), ] * 4\n t_slice[-1] = slice(None, None, downsample_dict[data_type][0]) # layout = 'NHWT'\n data_i = data_i[tuple(t_slice)]\n # Downsample h, w\n data_i = block_reduce(data_i,\n block_size=(1, *downsample_dict[data_type][1:], 1),\n func=np.max)\n # Save as new .h5 file\n new_file_path = os.path.join(save_dir, fname)\n if not os.path.exists(new_file_path):\n if not os.path.exists(os.path.dirname(new_file_path)):\n os.makedirs(os.path.dirname(new_file_path))\n # Create dataset\n with h5py.File(new_file_path, 'w') as hf:\n hf.create_dataset(\n data_type, data=data_i,\n maxshape=(None, *data_i.shape[1:]))\n else:\n # Append\n with h5py.File(new_file_path, 'a') as hf:\n hf[data_type].resize((hf[data_type].shape[0] + data_i.shape[0]), axis=0)\n hf[data_type][-data_i.shape[0]:] = data_i\n\n sample_counter += 1\n\n def save_downsampled_dataset(self, save_dir, downsample_dict, verbose=True):\n \"\"\"\n Parameters\n ----------\n save_dir\n downsample_dict: Dict[Sequence[int]]\n Notice that this is different from `self.downsample_dict`, which is used during runtime.\n \"\"\"\n import os\n from skimage.measure import block_reduce\n from ...utils.utils import path_splitall\n assert not os.path.exists(save_dir), f\"save_dir {save_dir} already exists!\"\n os.makedirs(save_dir)\n for fname, hdf_file in self._hdf_files.items():\n if verbose:\n print(f\"Downsampling data in {fname}.\")\n data_type = path_splitall(fname)[0]\n if data_type == 'lght':\n # TODO: how to get idx?\n raise NotImplementedError\n # lght_data = self._hdf_files[fname][idx][:]\n # t_slice = slice(0, None)\n # data_i = self._lght_to_grid(lght_data, t_slice)\n else:\n data_i = self._hdf_files[fname][data_type]\n # Downsample t\n t_slice = [slice(None, None), ] * 4\n t_slice[-1] = slice(None, None, downsample_dict[data_type][0]) # layout = 'NHWT'\n data_i = data_i[tuple(t_slice)]\n # Downsample h, w\n data_i = block_reduce(data_i,\n block_size=(1, *downsample_dict[data_type][1:], 1),\n func=np.max)\n # Save as new .h5 file\n new_file_path = os.path.join(save_dir, fname)\n if not os.path.exists(os.path.dirname(new_file_path)):\n os.makedirs(os.path.dirname(new_file_path))\n # Create dataset\n with h5py.File(new_file_path, 'w') as hf:\n hf.create_dataset(\n data_type, data=data_i,\n maxshape=(None, *data_i.shape[1:]))\n\n @property\n def sample_count(self):\n \"\"\"\n Record how many times self.__next__() is called.\n \"\"\"\n return self._sample_count\n\n def inc_sample_count(self):\n self._sample_count += 1\n\n @property\n def curr_event_idx(self):\n return self._curr_event_idx\n\n @property\n def curr_seq_idx(self):\n \"\"\"\n Used only when self.sample_mode == 'sequent'\n \"\"\"\n return self._curr_seq_idx\n\n def set_curr_event_idx(self, val):\n self._curr_event_idx = val\n\n def set_curr_seq_idx(self, val):\n \"\"\"\n Used only when self.sample_mode == 'sequent'\n \"\"\"\n self._curr_seq_idx = val\n\n def reset(self, shuffle: bool = None):\n self.set_curr_event_idx(val=self.start_event_idx)\n self.set_curr_seq_idx(0)\n self._sample_count = 0\n if shuffle is None:\n shuffle = self.shuffle\n if shuffle:\n self.shuffle_samples()\n\n def __len__(self):\n \"\"\"\n Used only when self.sample_mode == 'sequent'\n \"\"\"\n return self.total_num_seq // self.batch_size\n\n @property\n def use_up(self):\n \"\"\"\n Check if dataset is used up in 'sequent' mode.\n \"\"\"\n if self.sample_mode == 'random':\n return False\n else: # self.sample_mode == 'sequent'\n # compute the remaining number of sequences in current event\n curr_event_remain_seq = self.num_seq_per_event - self.curr_seq_idx\n all_remain_seq = curr_event_remain_seq + (\n self.end_event_idx - self.curr_event_idx - 1) * self.num_seq_per_event\n if self.split_mode == \"floor\":\n # This approach does not cover all available data, but avoid dealing with masks\n return all_remain_seq < self.batch_size\n else:\n return all_remain_seq <= 0\n\n def _load_event_batch(self, event_idx, event_batch_size):\n \"\"\"\n Loads a selected batch of events (not batch of sequences) into memory.\n\n Parameters\n ----------\n idx\n event_batch_size\n event_batch[i] = all_type_i_available_events[idx:idx + event_batch_size]\n Returns\n -------\n event_batch\n list of event batches.\n event_batch[i] is the event batch of the i-th data type.\n Each event_batch[i] is a np.ndarray with shape = (event_batch_size, height, width, raw_seq_len)\n \"\"\"\n event_idx_slice_end = event_idx + event_batch_size\n pad_size = 0\n if event_idx_slice_end > self.end_event_idx:\n pad_size = event_idx_slice_end - self.end_event_idx\n event_idx_slice_end = self.end_event_idx\n pd_batch = self._samples.iloc[event_idx:event_idx_slice_end]\n data = {}\n for index, row in pd_batch.iterrows():\n data = self._read_data(row, data)\n if pad_size > 0:\n event_batch = []\n for t in self.data_types:\n pad_shape = [pad_size, ] + list(data[t].shape[1:])\n data_pad = np.concatenate((data[t].astype(self.output_type),\n np.zeros(pad_shape, dtype=self.output_type)),\n axis=0)\n event_batch.append(data_pad)\n else:\n event_batch = [data[t].astype(self.output_type) for t in self.data_types]\n return event_batch\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.sample_mode == 'random':\n self.inc_sample_count()\n ret_dict = self._random_sample()\n else:\n if self.use_up:\n raise StopIteration\n else:\n self.inc_sample_count()\n ret_dict = self._sequent_sample()\n ret_dict = self.data_dict_to_tensor(data_dict=ret_dict,\n data_types=self.data_types)\n if self.preprocess:\n ret_dict = self.preprocess_data_dict(data_dict=ret_dict,\n data_types=self.data_types,\n layout=self.layout,\n rescale=self.rescale_method)\n if self.downsample_dict is not None:\n ret_dict = self.downsample_data_dict(data_dict=ret_dict,\n data_types=self.data_types,\n factors_dict=self.downsample_dict,\n layout=self.layout)\n return ret_dict\n\n def __getitem__(self, index):\n data_dict = self._idx_sample(index=index)\n return data_dict\n\n @staticmethod\n def preprocess_data_dict(data_dict, data_types=None, layout='NHWT', rescale='sevir'): # '01'):\n \"\"\"\n Parameters\n ----------\n data_dict: Dict[str, Union[np.ndarray, torch.Tensor]]\n data_types: Sequence[str]\n The data types that we want to rescale. This mainly excludes \"mask\" from preprocessing.\n layout: str\n consists of batch_size 'N', seq_len 'T', channel 'C', height 'H', width 'W'\n rescale: str\n 'sevir': use the offsets and scale factors in original implementation.\n '01': scale all values to range 0 to 1, currently only supports 'vil'\n Returns\n -------\n data_dict: Dict[str, Union[np.ndarray, torch.Tensor]]\n preprocessed data\n \"\"\"\n if rescale == 'sevir':\n scale_dict = PREPROCESS_SCALE_SEVIR\n offset_dict = PREPROCESS_OFFSET_SEVIR\n elif rescale == '01':\n scale_dict = PREPROCESS_SCALE_01\n offset_dict = PREPROCESS_OFFSET_01\n else:\n raise ValueError(f'Invalid rescale option: {rescale}.')\n if data_types is None:\n data_types = data_dict.keys()\n for key, data in data_dict.items():\n if key in data_types:\n if isinstance(data, np.ndarray):\n data = scale_dict[key] * (\n data.astype(np.float32) +\n offset_dict[key])\n data = change_layout_np(data=data,\n in_layout='NHWT',\n out_layout=layout)\n elif isinstance(data, torch.Tensor):\n data = scale_dict[key] * (\n data.float() +\n offset_dict[key])\n data = change_layout_torch(data=data,\n in_layout='NHWT',\n out_layout=layout)\n data_dict[key] = data\n return data_dict\n\n @staticmethod\n def process_data_dict_back(data_dict, data_types=None, rescale='sevir'): # '01'):\n \"\"\"\n Parameters\n ----------\n data_dict\n each data_dict[key] is a torch.Tensor.\n rescale\n str:\n 'sevir': data are scaled using the offsets and scale factors in original implementation.\n '01': data are all scaled to range 0 to 1, currently only supports 'vil'\n Returns\n -------\n data_dict\n each data_dict[key] is the data processed back in torch.Tensor.\n \"\"\"\n if rescale == 'sevir':\n scale_dict = PREPROCESS_SCALE_SEVIR\n offset_dict = PREPROCESS_OFFSET_SEVIR\n elif rescale == '01':\n scale_dict = PREPROCESS_SCALE_01\n offset_dict = PREPROCESS_OFFSET_01\n else:\n raise ValueError(f'Invalid rescale option: {rescale}.')\n if data_types is None:\n data_types = data_dict.keys()\n for key in data_types:\n data = data_dict[key]\n data = data.float() / scale_dict[key] - offset_dict[key]\n data_dict[key] = data\n return data_dict\n\n @staticmethod\n def data_dict_to_tensor(data_dict, data_types=None):\n \"\"\"\n Convert each element in data_dict to torch.Tensor (copy without grad).\n \"\"\"\n ret_dict = {}\n if data_types is None:\n data_types = data_dict.keys()\n for key, data in data_dict.items():\n if key in data_types:\n if isinstance(data, torch.Tensor):\n ret_dict[key] = data.detach().clone()\n elif isinstance(data, np.ndarray):\n ret_dict[key] = torch.from_numpy(data)\n else:\n raise ValueError(f\"Invalid data type: {type(data)}. Should be torch.Tensor or np.ndarray\")\n else: # key == \"mask\"\n ret_dict[key] = data\n return ret_dict\n\n @staticmethod\n def downsample_data_dict(data_dict, data_types=None, factors_dict=None, layout='NHWT'):\n \"\"\"\n Parameters\n ----------\n data_dict: Dict[str, Union[np.array, torch.Tensor]]\n factors_dict: Optional[Dict[str, Sequence[int]]]\n each element `factors` is a Sequence of int, representing (t_factor, h_factor, w_factor)\n\n Returns\n -------\n downsampled_data_dict: Dict[str, torch.Tensor]\n Modify on a deep copy of data_dict instead of directly modifying the original data_dict\n \"\"\"\n if factors_dict is None:\n factors_dict = {}\n if data_types is None:\n data_types = data_dict.keys()\n downsampled_data_dict = SEVIRDataLoader.data_dict_to_tensor(\n data_dict=data_dict,\n data_types=data_types) # make a copy\n for key, data in data_dict.items():\n factors = factors_dict.get(key, None)\n if factors is not None:\n downsampled_data_dict[key] = change_layout_torch(\n data=downsampled_data_dict[key],\n in_layout=layout,\n out_layout='NTHW')\n # downsample t dimension\n t_slice = [slice(None, None), ] * 4\n t_slice[1] = slice(None, None, factors[0])\n downsampled_data_dict[key] = downsampled_data_dict[key][tuple(t_slice)]\n # downsample spatial dimensions\n downsampled_data_dict[key] = avg_pool2d(\n input=downsampled_data_dict[key],\n kernel_size=(factors[1], factors[2]))\n\n downsampled_data_dict[key] = change_layout_torch(\n data=downsampled_data_dict[key],\n in_layout='NTHW',\n out_layout=layout)\n\n return downsampled_data_dict\n\n def _random_sample(self):\n \"\"\"\n Returns\n -------\n ret_dict\n dict. ret_dict.keys() == self.data_types.\n If self.preprocess == False:\n ret_dict[imgt].shape == (batch_size, height, width, seq_len)\n \"\"\"\n num_sampled = 0\n event_idx_list = nprand.randint(low=self.start_event_idx,\n high=self.end_event_idx,\n size=self.batch_size)\n seq_idx_list = nprand.randint(low=0,\n high=self.num_seq_per_event,\n size=self.batch_size)\n seq_slice_list = [slice(seq_idx * self.stride,\n seq_idx * self.stride + self.seq_len)\n for seq_idx in seq_idx_list]\n ret_dict = {}\n while num_sampled < self.batch_size:\n event = self._load_event_batch(event_idx=event_idx_list[num_sampled],\n event_batch_size=1)\n for imgt_idx, imgt in enumerate(self.data_types):\n sampled_seq = event[imgt_idx][[0, ], :, :, seq_slice_list[num_sampled]] # keep the dim of batch_size for concatenation\n if imgt in ret_dict:\n ret_dict[imgt] = np.concatenate((ret_dict[imgt], sampled_seq),\n axis=0)\n else:\n ret_dict.update({imgt: sampled_seq})\n return ret_dict\n\n def _sequent_sample(self):\n \"\"\"\n Returns\n -------\n ret_dict: Dict\n `ret_dict.keys()` contains `self.data_types`.\n `ret_dict[\"mask\"]` is a list of bool, indicating if the data entry is real or padded.\n If self.preprocess == False:\n ret_dict[imgt].shape == (batch_size, height, width, seq_len)\n \"\"\"\n assert not self.use_up, 'Data loader used up! Reset it to reuse.'\n event_idx = self.curr_event_idx\n seq_idx = self.curr_seq_idx\n num_sampled = 0\n sampled_idx_list = [] # list of (event_idx, seq_idx) records\n while num_sampled < self.batch_size:\n sampled_idx_list.append({'event_idx': event_idx,\n 'seq_idx': seq_idx})\n seq_idx += 1\n if seq_idx >= self.num_seq_per_event:\n event_idx += 1\n seq_idx = 0\n num_sampled += 1\n\n start_event_idx = sampled_idx_list[0]['event_idx']\n event_batch_size = sampled_idx_list[-1]['event_idx'] - start_event_idx + 1\n\n event_batch = self._load_event_batch(event_idx=start_event_idx,\n event_batch_size=event_batch_size)\n ret_dict = {\"mask\": []}\n all_no_pad_flag = True\n for sampled_idx in sampled_idx_list:\n batch_slice = [sampled_idx['event_idx'] - start_event_idx, ] # use [] to keepdim\n seq_slice = slice(sampled_idx['seq_idx'] * self.stride,\n sampled_idx['seq_idx'] * self.stride + self.seq_len)\n for imgt_idx, imgt in enumerate(self.data_types):\n sampled_seq = event_batch[imgt_idx][batch_slice, :, :, seq_slice]\n if imgt in ret_dict:\n ret_dict[imgt] = np.concatenate((ret_dict[imgt], sampled_seq),\n axis=0)\n else:\n ret_dict.update({imgt: sampled_seq})\n # add mask\n no_pad_flag = sampled_idx['event_idx'] < self.end_event_idx\n if not no_pad_flag:\n all_no_pad_flag = False\n ret_dict[\"mask\"].append(no_pad_flag)\n if all_no_pad_flag:\n # if there is no padded data items at all, set `ret_dict[\"mask\"] = None` for convenience.\n ret_dict[\"mask\"] = None\n # update current idx\n self.set_curr_event_idx(event_idx)\n self.set_curr_seq_idx(seq_idx)\n return ret_dict\n\n def _idx_sample(self, index):\n \"\"\"\n Parameters\n ----------\n index\n The index of the batch to sample.\n Returns\n -------\n ret_dict\n dict. ret_dict.keys() == self.data_types.\n If self.preprocess == False:\n ret_dict[imgt].shape == (batch_size, height, width, seq_len)\n \"\"\"\n event_idx = (index * self.batch_size) // self.num_seq_per_event\n seq_idx = (index * self.batch_size) % self.num_seq_per_event\n num_sampled = 0\n sampled_idx_list = [] # list of (event_idx, seq_idx) records\n while num_sampled < self.batch_size:\n sampled_idx_list.append({'event_idx': event_idx,\n 'seq_idx': seq_idx})\n seq_idx += 1\n if seq_idx >= self.num_seq_per_event:\n event_idx += 1\n seq_idx = 0\n num_sampled += 1\n\n start_event_idx = sampled_idx_list[0]['event_idx']\n event_batch_size = sampled_idx_list[-1]['event_idx'] - start_event_idx + 1\n\n event_batch = self._load_event_batch(event_idx=start_event_idx,\n event_batch_size=event_batch_size)\n ret_dict = {}\n for sampled_idx in sampled_idx_list:\n batch_slice = [sampled_idx['event_idx'] - start_event_idx, ] # use [] to keepdim\n seq_slice = slice(sampled_idx['seq_idx'] * self.stride,\n sampled_idx['seq_idx'] * self.stride + self.seq_len)\n for imgt_idx, imgt in enumerate(self.data_types):\n sampled_seq = event_batch[imgt_idx][batch_slice, :, :, seq_slice]\n if imgt in ret_dict:\n ret_dict[imgt] = np.concatenate((ret_dict[imgt], sampled_seq),\n axis=0)\n else:\n ret_dict.update({imgt: sampled_seq})\n\n ret_dict = self.data_dict_to_tensor(data_dict=ret_dict,\n data_types=self.data_types)\n if self.preprocess:\n ret_dict = self.preprocess_data_dict(data_dict=ret_dict,\n data_types=self.data_types,\n layout=self.layout,\n rescale=self.rescale_method)\n\n if self.downsample_dict is not None:\n ret_dict = self.downsample_data_dict(data_dict=ret_dict,\n data_types=self.data_types,\n factors_dict=self.downsample_dict,\n layout=self.layout)\n return ret_dict" } ]
import os import numpy as np import datetime import pandas as pd import torch import random import numpy as np import cv2 import torch import torchvision import h5py from typing import Union, Dict, Sequence, Tuple, List from torch.utils.data import Dataset as TorchDataset, DataLoader from pytorch_lightning import LightningDataModule from ...config import cfg from .sevir_dataloader import SEVIRDataLoader from torch.utils.data import Dataset from torch.utils.data import DataLoader
10,012
def __init__(self, seq_len: int = 25, raw_seq_len: int = 49, sample_mode: str = "sequent", stride: int = 12, batch_size: int = 1, layout: str = "NHWT", num_shard: int = 1, rank: int = 0, split_mode: str = "uneven", sevir_catalog: Union[str, pd.DataFrame] = None, sevir_data_dir: str = None, start_date: datetime.datetime = None, end_date: datetime.datetime = None, datetime_filter = None, catalog_filter = "default", shuffle: bool = False, shuffle_seed: int = 1, output_type = np.float32, preprocess: bool = True, rescale_method: str = "01", verbose: bool = False): super(SEVIRTorchDataset, self).__init__() self.layout = layout self.sevir_dataloader = SEVIRDataLoader( data_types=["vil", ], seq_len=seq_len, raw_seq_len=raw_seq_len, sample_mode=sample_mode, stride=stride, batch_size=batch_size, layout=layout, num_shard=num_shard, rank=rank, split_mode=split_mode, sevir_catalog=sevir_catalog, sevir_data_dir=sevir_data_dir, start_date=start_date, end_date=end_date, datetime_filter=datetime_filter, catalog_filter=catalog_filter, shuffle=shuffle, shuffle_seed=shuffle_seed, output_type=output_type, preprocess=preprocess, rescale_method=rescale_method, downsample_dict=None, verbose=verbose) def __getitem__(self, index): data_dict = self.sevir_dataloader._idx_sample(index=index) return data_dict def __len__(self): return self.sevir_dataloader.__len__() def collate_fn(self, data_dict_list): r""" Parameters ---------- data_dict_list: list[Dict[str, torch.Tensor]] Returns ------- merged_data: Dict[str, torch.Tensor] batch_size = len(data_dict_list) * data_dict["key"].batch_size """ batch_dim = self.layout.find('N') data_list_dict = { key: [data_dict[key] for data_dict in data_dict_list] for key in data_dict_list[0]} # TODO: key "mask" is not handled. Temporally fine since this func is not used data_list_dict.pop("mask", None) merged_dict = { key: torch.cat(data_list, dim=batch_dim) for key, data_list in data_list_dict.items()} merged_dict["mask"] = None return merged_dict def get_torch_dataloader(self, outer_batch_size=1, collate_fn=None, num_workers=1): # TODO: num_workers > 1 r""" We set the batch_size in Dataset by default, so outer_batch_size should be 1. In this case, not using `collate_fn` can save time. """ if outer_batch_size == 1: collate_fn = lambda x:x[0] else: if collate_fn is None: collate_fn = self.collate_fn dataloader = DataLoader( dataset=self, batch_size=outer_batch_size, collate_fn=collate_fn, num_workers=num_workers) return dataloader def check_aws(): r""" Check if aws cli is installed. """ if os.system("which aws") != 0: raise RuntimeError("AWS CLI is not installed! Please install it first. See https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html") def download_SEVIR(save_dir=None): r""" Downloaded dataset is saved in save_dir/sevir """ check_aws() if save_dir is None:
class SEVIRTorchDataset(TorchDataset): def __init__(self, seq_len: int = 25, raw_seq_len: int = 49, sample_mode: str = "sequent", stride: int = 12, batch_size: int = 1, layout: str = "NHWT", num_shard: int = 1, rank: int = 0, split_mode: str = "uneven", sevir_catalog: Union[str, pd.DataFrame] = None, sevir_data_dir: str = None, start_date: datetime.datetime = None, end_date: datetime.datetime = None, datetime_filter = None, catalog_filter = "default", shuffle: bool = False, shuffle_seed: int = 1, output_type = np.float32, preprocess: bool = True, rescale_method: str = "01", verbose: bool = False): super(SEVIRTorchDataset, self).__init__() self.layout = layout self.sevir_dataloader = SEVIRDataLoader( data_types=["vil", ], seq_len=seq_len, raw_seq_len=raw_seq_len, sample_mode=sample_mode, stride=stride, batch_size=batch_size, layout=layout, num_shard=num_shard, rank=rank, split_mode=split_mode, sevir_catalog=sevir_catalog, sevir_data_dir=sevir_data_dir, start_date=start_date, end_date=end_date, datetime_filter=datetime_filter, catalog_filter=catalog_filter, shuffle=shuffle, shuffle_seed=shuffle_seed, output_type=output_type, preprocess=preprocess, rescale_method=rescale_method, downsample_dict=None, verbose=verbose) def __getitem__(self, index): data_dict = self.sevir_dataloader._idx_sample(index=index) return data_dict def __len__(self): return self.sevir_dataloader.__len__() def collate_fn(self, data_dict_list): r""" Parameters ---------- data_dict_list: list[Dict[str, torch.Tensor]] Returns ------- merged_data: Dict[str, torch.Tensor] batch_size = len(data_dict_list) * data_dict["key"].batch_size """ batch_dim = self.layout.find('N') data_list_dict = { key: [data_dict[key] for data_dict in data_dict_list] for key in data_dict_list[0]} # TODO: key "mask" is not handled. Temporally fine since this func is not used data_list_dict.pop("mask", None) merged_dict = { key: torch.cat(data_list, dim=batch_dim) for key, data_list in data_list_dict.items()} merged_dict["mask"] = None return merged_dict def get_torch_dataloader(self, outer_batch_size=1, collate_fn=None, num_workers=1): # TODO: num_workers > 1 r""" We set the batch_size in Dataset by default, so outer_batch_size should be 1. In this case, not using `collate_fn` can save time. """ if outer_batch_size == 1: collate_fn = lambda x:x[0] else: if collate_fn is None: collate_fn = self.collate_fn dataloader = DataLoader( dataset=self, batch_size=outer_batch_size, collate_fn=collate_fn, num_workers=num_workers) return dataloader def check_aws(): r""" Check if aws cli is installed. """ if os.system("which aws") != 0: raise RuntimeError("AWS CLI is not installed! Please install it first. See https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html") def download_SEVIR(save_dir=None): r""" Downloaded dataset is saved in save_dir/sevir """ check_aws() if save_dir is None:
save_dir = cfg.datasets_dir
0
2023-10-23 11:45:50+00:00
12k
DTennant/GPC
data/get_datasets.py
[ { "identifier": "MergedDataset", "path": "data/data_utils.py", "snippet": "class MergedDataset(Dataset):\n\n \"\"\"\n Takes two datasets (labelled_dataset, unlabelled_dataset) and merges them\n Allows you to iterate over them in parallel\n \"\"\"\n\n def __init__(self, labelled_dataset, unlabelled_dataset):\n\n self.labelled_dataset = labelled_dataset\n self.unlabelled_dataset = unlabelled_dataset\n self.target_transform = None\n\n def __getitem__(self, item):\n\n if item < len(self.labelled_dataset):\n img, label, uq_idx = self.labelled_dataset[item]\n labeled_or_not = 1\n\n else:\n\n img, label, uq_idx = self.unlabelled_dataset[item - len(self.labelled_dataset)]\n labeled_or_not = 0\n\n\n return img, label, uq_idx, np.array([labeled_or_not])\n\n def __len__(self):\n return len(self.unlabelled_dataset) + len(self.labelled_dataset)" }, { "identifier": "get_cifar_10_datasets", "path": "data/cifar.py", "snippet": "def get_cifar_10_datasets(train_transform, test_transform, train_classes=(0, 1, 8, 9),\n prop_train_labels=0.8, split_train_val=False, seed=0):\n\n np.random.seed(seed)\n\n # Init entire training set\n whole_training_set = CustomCIFAR10(root=cifar_10_root, transform=train_transform, train=True)\n\n # Get labelled training set which has subsampled classes, then subsample some indices from that\n train_dataset_labelled = subsample_classes(deepcopy(whole_training_set), include_classes=train_classes)\n subsample_indices = subsample_instances(train_dataset_labelled, prop_indices_to_subsample=prop_train_labels)\n train_dataset_labelled = subsample_dataset(train_dataset_labelled, subsample_indices)\n\n # Split into training and validation sets\n train_idxs, val_idxs = get_train_val_indices(train_dataset_labelled)\n train_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), train_idxs)\n val_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), val_idxs)\n val_dataset_labelled_split.transform = test_transform\n\n # Get unlabelled data\n unlabelled_indices = set(whole_training_set.uq_idxs) - set(train_dataset_labelled.uq_idxs)\n train_dataset_unlabelled = subsample_dataset(deepcopy(whole_training_set), np.array(list(unlabelled_indices)))\n\n # Get test set for all classes\n test_dataset = CustomCIFAR10(root=cifar_10_root, transform=test_transform, train=False)\n\n # Either split train into train and val or use test set as val\n train_dataset_labelled = train_dataset_labelled_split if split_train_val else train_dataset_labelled\n val_dataset_labelled = val_dataset_labelled_split if split_train_val else None\n\n all_datasets = {\n 'train_labelled': train_dataset_labelled,\n 'train_unlabelled': train_dataset_unlabelled,\n 'val': val_dataset_labelled,\n 'test': test_dataset,\n }\n\n return all_datasets" }, { "identifier": "get_cifar_100_datasets", "path": "data/cifar.py", "snippet": "def get_cifar_100_datasets(train_transform, test_transform, train_classes=range(80),\n prop_train_labels=0.8, split_train_val=False, seed=0):\n\n np.random.seed(seed)\n\n # Init entire training set\n whole_training_set = CustomCIFAR100(root=cifar_100_root, transform=train_transform, train=True)\n\n # Get labelled training set which has subsampled classes, then subsample some indices from that\n train_dataset_labelled = subsample_classes(deepcopy(whole_training_set), include_classes=train_classes)\n subsample_indices = subsample_instances(train_dataset_labelled, prop_indices_to_subsample=prop_train_labels)\n train_dataset_labelled = subsample_dataset(train_dataset_labelled, subsample_indices)\n\n # Split into training and validation sets\n train_idxs, val_idxs = get_train_val_indices(train_dataset_labelled)\n train_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), train_idxs)\n val_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), val_idxs)\n val_dataset_labelled_split.transform = test_transform\n\n # Get unlabelled data\n unlabelled_indices = set(whole_training_set.uq_idxs) - set(train_dataset_labelled.uq_idxs)\n train_dataset_unlabelled = subsample_dataset(deepcopy(whole_training_set), np.array(list(unlabelled_indices)))\n\n # Get test set for all classes\n test_dataset = CustomCIFAR100(root=cifar_100_root, transform=test_transform, train=False)\n\n # Either split train into train and val or use test set as val\n train_dataset_labelled = train_dataset_labelled_split if split_train_val else train_dataset_labelled\n val_dataset_labelled = val_dataset_labelled_split if split_train_val else None\n\n all_datasets = {\n 'train_labelled': train_dataset_labelled,\n 'train_unlabelled': train_dataset_unlabelled,\n 'val': val_dataset_labelled,\n 'test': test_dataset,\n }\n\n return all_datasets" }, { "identifier": "get_cifar_100_ucd_datasets", "path": "data/cifar.py", "snippet": "def get_cifar_100_ucd_datasets(train_transform, test_transform, labelled_classes=range(50), unlabelled_classes=range(25, 100),\n prop_train_labels=0.8, split_train_val=False, seed=0):\n\n np.random.seed(seed)\n\n # Init entire training set\n whole_training_set = CustomCIFAR100(root=cifar_100_root, transform=train_transform, train=True, download=True)\n\n # Get labelled training set which has subsampled classes, then subsample some indices from that\n train_dataset_labelled = subsample_classes(deepcopy(whole_training_set), include_classes=labelled_classes)\n subsample_indices = subsample_instances(train_dataset_labelled, prop_indices_to_subsample=prop_train_labels)\n train_dataset_labelled = subsample_dataset(train_dataset_labelled, subsample_indices)\n\n # Split into training and validation sets\n train_idxs, val_idxs = get_train_val_indices(train_dataset_labelled)\n train_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), train_idxs)\n val_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), val_idxs)\n val_dataset_labelled_split.transform = test_transform\n\n # Get unlabelled data\n train_dataset_unlabelled = subsample_classes(deepcopy(whole_training_set), include_classes=unlabelled_classes)\n unlabelled_indices = set(train_dataset_unlabelled.uq_idxs) - set(train_dataset_labelled.uq_idxs)\n train_dataset_unlabelled = subsample_dataset(deepcopy(whole_training_set), np.array(list(unlabelled_indices)))\n # unlabelled_indices = set(whole_training_set.uq_idxs) - set(train_dataset_labelled.uq_idxs)\n # train_dataset_unlabelled = subsample_dataset(deepcopy(whole_training_set), np.array(list(unlabelled_indices)))\n\n # Get test set for all classes\n test_dataset = CustomCIFAR100(root=cifar_100_root, transform=test_transform, train=False)\n\n # Either split train into train and val or use test set as val\n train_dataset_labelled = train_dataset_labelled_split if split_train_val else train_dataset_labelled\n val_dataset_labelled = val_dataset_labelled_split if split_train_val else None\n\n all_datasets = {\n 'train_labelled': train_dataset_labelled,\n 'train_unlabelled': train_dataset_unlabelled,\n 'val': val_dataset_labelled,\n 'test': test_dataset,\n }\n\n return all_datasets" }, { "identifier": "get_herbarium_datasets", "path": "data/herbarium_19.py", "snippet": "def get_herbarium_datasets(train_transform, test_transform, train_classes=range(500), prop_train_labels=0.8,\n seed=0, split_train_val=False):\n\n np.random.seed(seed)\n\n # Init entire training set\n train_dataset = HerbariumDataset19(transform=train_transform,\n root=os.path.join(herbarium_dataroot, 'small-train'))\n\n # Get labelled training set which has subsampled classes, then subsample some indices from that\n # TODO: Subsampling unlabelled set in uniform random fashion from training data, will contain many instances of dominant class\n train_dataset_labelled = subsample_classes(deepcopy(train_dataset), include_classes=train_classes)\n subsample_indices = subsample_instances(train_dataset_labelled, prop_indices_to_subsample=prop_train_labels)\n train_dataset_labelled = subsample_dataset(train_dataset_labelled, subsample_indices)\n\n # Split into training and validation sets\n if split_train_val:\n\n train_idxs, val_idxs = get_train_val_indices(train_dataset_labelled,\n val_instances_per_class=5)\n train_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), train_idxs)\n val_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), val_idxs)\n val_dataset_labelled_split.transform = test_transform\n\n else:\n\n train_dataset_labelled_split, val_dataset_labelled_split = None, None\n\n # Get unlabelled data\n unlabelled_indices = set(train_dataset.uq_idxs) - set(train_dataset_labelled.uq_idxs)\n train_dataset_unlabelled = subsample_dataset(deepcopy(train_dataset), np.array(list(unlabelled_indices)))\n\n # Get test dataset\n test_dataset = HerbariumDataset19(transform=test_transform,\n root=os.path.join(herbarium_dataroot, 'small-validation'))\n\n # Transform dict\n unlabelled_classes = list(set(train_dataset.targets) - set(train_classes))\n target_xform_dict = {}\n for i, k in enumerate(list(train_classes) + unlabelled_classes):\n target_xform_dict[k] = i\n\n test_dataset.target_transform = lambda x: target_xform_dict[x]\n train_dataset_unlabelled.target_transform = lambda x: target_xform_dict[x]\n\n # Either split train into train and val or use test set as val\n train_dataset_labelled = train_dataset_labelled_split if split_train_val else train_dataset_labelled\n val_dataset_labelled = val_dataset_labelled_split if split_train_val else None\n\n all_datasets = {\n 'train_labelled': train_dataset_labelled,\n 'train_unlabelled': train_dataset_unlabelled,\n 'val': val_dataset_labelled,\n 'test': test_dataset,\n }\n\n return all_datasets" }, { "identifier": "get_scars_datasets", "path": "data/stanford_cars.py", "snippet": "def get_scars_datasets(train_transform, test_transform, train_classes=range(160), prop_train_labels=0.8,\n split_train_val=False, seed=0):\n\n np.random.seed(seed)\n\n # Init entire training set\n whole_training_set = CarsDataset(data_dir=car_root, transform=train_transform, metas=meta_default_path, train=True)\n\n # Get labelled training set which has subsampled classes, then subsample some indices from that\n train_dataset_labelled = subsample_classes(deepcopy(whole_training_set), include_classes=train_classes)\n subsample_indices = subsample_instances(train_dataset_labelled, prop_indices_to_subsample=prop_train_labels)\n train_dataset_labelled = subsample_dataset(train_dataset_labelled, subsample_indices)\n\n # Split into training and validation sets\n train_idxs, val_idxs = get_train_val_indices(train_dataset_labelled)\n train_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), train_idxs)\n val_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), val_idxs)\n val_dataset_labelled_split.transform = test_transform\n\n # Get unlabelled data\n unlabelled_indices = set(whole_training_set.uq_idxs) - set(train_dataset_labelled.uq_idxs)\n train_dataset_unlabelled = subsample_dataset(deepcopy(whole_training_set), np.array(list(unlabelled_indices)))\n\n # Get test set for all classes\n test_dataset = CarsDataset(data_dir=car_root, transform=test_transform, metas=meta_default_path, train=False)\n\n # Either split train into train and val or use test set as val\n train_dataset_labelled = train_dataset_labelled_split if split_train_val else train_dataset_labelled\n val_dataset_labelled = val_dataset_labelled_split if split_train_val else None\n\n all_datasets = {\n 'train_labelled': train_dataset_labelled,\n 'train_unlabelled': train_dataset_unlabelled,\n 'val': val_dataset_labelled,\n 'test': test_dataset,\n }\n\n return all_datasets" }, { "identifier": "get_imagenet_100_datasets", "path": "data/imagenet.py", "snippet": "def get_imagenet_100_datasets(train_transform, test_transform, train_classes=range(80),\n prop_train_labels=0.8, split_train_val=False, seed=0):\n\n np.random.seed(seed)\n\n # Subsample imagenet dataset initially to include 100 classes\n subsampled_100_classes = np.random.choice(range(1000), size=(100,), replace=False)\n subsampled_100_classes = np.sort(subsampled_100_classes)\n print(f'Constructing ImageNet-100 dataset from the following classes: {subsampled_100_classes.tolist()}')\n cls_map = {i: j for i, j in zip(subsampled_100_classes, range(100))}\n\n # Init entire training set\n imagenet_training_set = ImageNetBase(root=os.path.join(imagenet_root, 'train'), transform=train_transform)\n whole_training_set = subsample_classes(imagenet_training_set, include_classes=subsampled_100_classes)\n\n # Reset dataset\n whole_training_set.samples = [(s[0], cls_map[s[1]]) for s in whole_training_set.samples]\n whole_training_set.targets = [s[1] for s in whole_training_set.samples]\n whole_training_set.uq_idxs = np.array(range(len(whole_training_set)))\n whole_training_set.target_transform = None\n\n # Get labelled training set which has subsampled classes, then subsample some indices from that\n train_dataset_labelled = subsample_classes(deepcopy(whole_training_set), include_classes=train_classes)\n subsample_indices = subsample_instances(train_dataset_labelled, prop_indices_to_subsample=prop_train_labels)\n train_dataset_labelled = subsample_dataset(train_dataset_labelled, subsample_indices)\n\n # Split into training and validation sets\n train_idxs, val_idxs = get_train_val_indices(train_dataset_labelled)\n train_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), train_idxs)\n val_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), val_idxs)\n val_dataset_labelled_split.transform = test_transform\n\n # Get unlabelled data\n unlabelled_indices = set(whole_training_set.uq_idxs) - set(train_dataset_labelled.uq_idxs)\n train_dataset_unlabelled = subsample_dataset(deepcopy(whole_training_set), np.array(list(unlabelled_indices)))\n\n # Get test set for all classes\n test_dataset = ImageNetBase(root=os.path.join(imagenet_root, 'val'), transform=test_transform)\n test_dataset = subsample_classes(test_dataset, include_classes=subsampled_100_classes)\n\n # Reset test set\n test_dataset.samples = [(s[0], cls_map[s[1]]) for s in test_dataset.samples]\n test_dataset.targets = [s[1] for s in test_dataset.samples]\n test_dataset.uq_idxs = np.array(range(len(test_dataset)))\n test_dataset.target_transform = None\n\n # Either split train into train and val or use test set as val\n train_dataset_labelled = train_dataset_labelled_split if split_train_val else train_dataset_labelled\n val_dataset_labelled = val_dataset_labelled_split if split_train_val else None\n\n all_datasets = {\n 'train_labelled': train_dataset_labelled,\n 'train_unlabelled': train_dataset_unlabelled,\n 'val': val_dataset_labelled,\n 'test': test_dataset,\n }\n\n return all_datasets" }, { "identifier": "get_imagenet_ucd_100_datasets", "path": "data/imagenet.py", "snippet": "def get_imagenet_ucd_100_datasets(train_transform, test_transform, labelled_classes=range(50),\n unlabelled_classes=range(25,100), prop_train_labels=0.5, split_train_val=False, seed=0):\n\n np.random.seed(seed)\n\n # Subsample imagenet dataset initially to include 100 classes\n # TODO: this place is not changing the args\n subsampled_100_classes = np.random.choice(range(1000), size=(100,), replace=False)\n subsampled_100_classes = np.sort(subsampled_100_classes)\n print(f'Constructing ImageNet-100 dataset from the following classes: {subsampled_100_classes.tolist()}')\n cls_map = {i: j for i, j in zip(subsampled_100_classes, range(100))}\n\n # Init entire training set\n imagenet_training_set = ImageNetBase(root=os.path.join(imagenet_root, 'train'), transform=train_transform)\n whole_training_set = subsample_classes(imagenet_training_set, include_classes=subsampled_100_classes)\n\n # Reset dataset\n whole_training_set.samples = [(s[0], cls_map[s[1]]) for s in whole_training_set.samples]\n whole_training_set.targets = [s[1] for s in whole_training_set.samples]\n whole_training_set.uq_idxs = np.array(range(len(whole_training_set)))\n whole_training_set.target_transform = None\n\n # Get labelled training set which has subsampled classes, then subsample some indices from that\n train_dataset_labelled = subsample_classes(deepcopy(whole_training_set), include_classes=labelled_classes)\n subsample_indices = subsample_instances(train_dataset_labelled, prop_indices_to_subsample=prop_train_labels)\n train_dataset_labelled = subsample_dataset(train_dataset_labelled, subsample_indices)\n\n # Split into training and validation sets\n train_idxs, val_idxs = get_train_val_indices(train_dataset_labelled)\n train_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), train_idxs)\n val_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), val_idxs)\n val_dataset_labelled_split.transform = test_transform\n\n # Get unlabelled data\n train_dataset_unlabelled = subsample_classes(deepcopy(whole_training_set), include_classes=unlabelled_classes)\n unlabelled_indices = set(train_dataset_unlabelled.uq_idxs) - set(train_dataset_labelled.uq_idxs)\n train_dataset_unlabelled = subsample_dataset(deepcopy(whole_training_set), np.array(list(unlabelled_indices)))\n # unlabelled_indices = set(whole_training_set.uq_idxs) - set(train_dataset_labelled.uq_idxs)\n # train_dataset_unlabelled = subsample_dataset(deepcopy(whole_training_set), np.array(list(unlabelled_indices)))\n\n # Get test set for all classes\n test_dataset = ImageNetBase(root=os.path.join(imagenet_root, 'val'), transform=test_transform)\n test_dataset = subsample_classes(test_dataset, include_classes=subsampled_100_classes)\n\n # Reset test set\n test_dataset.samples = [(s[0], cls_map[s[1]]) for s in test_dataset.samples]\n test_dataset.targets = [s[1] for s in test_dataset.samples]\n test_dataset.uq_idxs = np.array(range(len(test_dataset)))\n test_dataset.target_transform = None\n\n # Either split train into train and val or use test set as val\n train_dataset_labelled = train_dataset_labelled_split if split_train_val else train_dataset_labelled\n val_dataset_labelled = val_dataset_labelled_split if split_train_val else None\n\n all_datasets = {\n 'train_labelled': train_dataset_labelled,\n 'train_unlabelled': train_dataset_unlabelled,\n 'val': val_dataset_labelled,\n 'test': test_dataset,\n }\n\n return all_datasets" }, { "identifier": "get_cub_datasets", "path": "data/cub.py", "snippet": "def get_cub_datasets(train_transform, test_transform, train_classes=range(160), prop_train_labels=0.8,\n split_train_val=False, seed=0, download=False):\n\n np.random.seed(seed)\n\n # Init entire training set\n whole_training_set = CustomCub2011(root=cub_root, transform=train_transform, train=True, download=download)\n\n # Get labelled training set which has subsampled classes, then subsample some indices from that\n train_dataset_labelled = subsample_classes(deepcopy(whole_training_set), include_classes=train_classes)\n subsample_indices = subsample_instances(train_dataset_labelled, prop_indices_to_subsample=prop_train_labels)\n train_dataset_labelled = subsample_dataset(train_dataset_labelled, subsample_indices)\n\n # Split into training and validation sets\n train_idxs, val_idxs = get_train_val_indices(train_dataset_labelled)\n train_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), train_idxs)\n val_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), val_idxs)\n val_dataset_labelled_split.transform = test_transform\n\n # Get unlabelled data\n unlabelled_indices = set(whole_training_set.uq_idxs) - set(train_dataset_labelled.uq_idxs)\n train_dataset_unlabelled = subsample_dataset(deepcopy(whole_training_set), np.array(list(unlabelled_indices)))\n\n # Get test set for all classes\n test_dataset = CustomCub2011(root=cub_root, transform=test_transform, train=False)\n\n # Either split train into train and val or use test set as val\n train_dataset_labelled = train_dataset_labelled_split if split_train_val else train_dataset_labelled\n val_dataset_labelled = val_dataset_labelled_split if split_train_val else None\n\n all_datasets = {\n 'train_labelled': train_dataset_labelled,\n 'train_unlabelled': train_dataset_unlabelled,\n 'val': val_dataset_labelled,\n 'test': test_dataset,\n }\n\n return all_datasets" }, { "identifier": "get_cub_universal_datasets", "path": "data/cub.py", "snippet": "def get_cub_universal_datasets(train_transform, test_transform, labelled_classes=range(160), unlabelled_classes=range(100, 200),\n prop_train_labels=0.8,\n split_train_val=False, seed=0, download=False):\n np.random.seed(seed)\n\n # Init entire training set\n whole_training_set = CustomCub2011(root=cub_root, transform=train_transform, train=True, download=download)\n\n # Get labelled training set which has subsampled classes, then subsample some indices from that\n train_dataset_labelled = subsample_classes(deepcopy(whole_training_set), include_classes=labelled_classes)\n subsample_indices = subsample_instances(train_dataset_labelled, prop_indices_to_subsample=prop_train_labels)\n train_dataset_labelled = subsample_dataset(train_dataset_labelled, subsample_indices)\n\n # Split into training and validation sets\n train_idxs, val_idxs = get_train_val_indices(train_dataset_labelled)\n train_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), train_idxs)\n val_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), val_idxs)\n val_dataset_labelled_split.transform = test_transform\n\n # Get unlabelled data\n train_dataset_unlabelled = subsample_classes(deepcopy(whole_training_set), include_classes=unlabelled_classes)\n unlabelled_indices = set(train_dataset_unlabelled.uq_idxs) - set(train_dataset_labelled.uq_idxs)\n train_dataset_unlabelled = subsample_dataset(deepcopy(whole_training_set), np.array(list(unlabelled_indices)))\n # unlabelled_indices = set(whole_training_set.uq_idxs) - set(train_dataset_labelled.uq_idxs)\n # train_dataset_unlabelled = subsample_dataset(deepcopy(whole_training_set), np.array(list(unlabelled_indices)))\n\n # Get test set for all classes\n test_dataset = CustomCub2011(root=cub_root, transform=test_transform, train=False)\n\n # Either split train into train and val or use test set as val\n train_dataset_labelled = train_dataset_labelled_split if split_train_val else train_dataset_labelled\n val_dataset_labelled = val_dataset_labelled_split if split_train_val else None\n\n all_datasets = {\n 'train_labelled': train_dataset_labelled,\n 'train_unlabelled': train_dataset_unlabelled,\n 'val': val_dataset_labelled,\n 'test': test_dataset,\n }\n\n return all_datasets" }, { "identifier": "get_aircraft_datasets", "path": "data/fgvc_aircraft.py", "snippet": "def get_aircraft_datasets(train_transform, test_transform, train_classes=range(50), prop_train_labels=0.8,\n split_train_val=False, seed=0):\n\n np.random.seed(seed)\n\n # Init entire training set\n whole_training_set = FGVCAircraft(root=aircraft_root, transform=train_transform, split='trainval')\n\n # Get labelled training set which has subsampled classes, then subsample some indices from that\n train_dataset_labelled = subsample_classes(deepcopy(whole_training_set), include_classes=train_classes)\n subsample_indices = subsample_instances(train_dataset_labelled, prop_indices_to_subsample=prop_train_labels)\n train_dataset_labelled = subsample_dataset(train_dataset_labelled, subsample_indices)\n\n # Split into training and validation sets\n train_idxs, val_idxs = get_train_val_indices(train_dataset_labelled)\n train_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), train_idxs)\n val_dataset_labelled_split = subsample_dataset(deepcopy(train_dataset_labelled), val_idxs)\n val_dataset_labelled_split.transform = test_transform\n\n # Get unlabelled data\n unlabelled_indices = set(whole_training_set.uq_idxs) - set(train_dataset_labelled.uq_idxs)\n train_dataset_unlabelled = subsample_dataset(deepcopy(whole_training_set), np.array(list(unlabelled_indices)))\n\n # Get test set for all classes\n test_dataset = FGVCAircraft(root=aircraft_root, transform=test_transform, split='test')\n\n # Either split train into train and val or use test set as val\n train_dataset_labelled = train_dataset_labelled_split if split_train_val else train_dataset_labelled\n val_dataset_labelled = val_dataset_labelled_split if split_train_val else None\n\n all_datasets = {\n 'train_labelled': train_dataset_labelled,\n 'train_unlabelled': train_dataset_unlabelled,\n 'val': val_dataset_labelled,\n 'test': test_dataset,\n }\n\n return all_datasets" }, { "identifier": "subsample_classes", "path": "data/cifar.py", "snippet": "def subsample_classes(dataset, include_classes=(0, 1, 8, 9)):\n\n cls_idxs = [x for x, t in enumerate(dataset.targets) if t in include_classes]\n\n target_xform_dict = {}\n for i, k in enumerate(include_classes):\n target_xform_dict[k] = i\n\n dataset = subsample_dataset(dataset, cls_idxs)\n\n # dataset.target_transform = lambda x: target_xform_dict[x]\n\n return dataset" }, { "identifier": "subsample_classes", "path": "data/herbarium_19.py", "snippet": "def subsample_classes(dataset, include_classes=range(250)):\n\n cls_idxs = [x for x, l in enumerate(dataset.targets) if l in include_classes]\n\n target_xform_dict = {}\n for i, k in enumerate(include_classes):\n target_xform_dict[k] = i\n\n dataset = subsample_dataset(dataset, cls_idxs)\n\n dataset.target_transform = lambda x: target_xform_dict[x]\n\n return dataset" }, { "identifier": "subsample_classes", "path": "data/stanford_cars.py", "snippet": "def subsample_classes(dataset, include_classes=range(160)):\n\n include_classes_cars = np.array(include_classes) + 1 # SCars classes are indexed 1 --> 196 instead of 0 --> 195\n cls_idxs = [x for x, t in enumerate(dataset.target) if t in include_classes_cars]\n\n target_xform_dict = {}\n for i, k in enumerate(include_classes):\n target_xform_dict[k] = i\n\n dataset = subsample_dataset(dataset, cls_idxs)\n\n # dataset.target_transform = lambda x: target_xform_dict[x]\n\n return dataset" }, { "identifier": "subsample_classes", "path": "data/imagenet.py", "snippet": "def subsample_classes(dataset, include_classes=list(range(1000))):\n\n cls_idxs = [x for x, t in enumerate(dataset.targets) if t in include_classes]\n\n target_xform_dict = {}\n for i, k in enumerate(include_classes):\n target_xform_dict[k] = i\n\n dataset = subsample_dataset(dataset, cls_idxs)\n dataset.target_transform = lambda x: target_xform_dict[x]\n\n return dataset" }, { "identifier": "subsample_classes", "path": "data/cub.py", "snippet": "def subsample_classes(dataset, include_classes=range(160)):\n\n include_classes_cub = np.array(include_classes) + 1 # CUB classes are indexed 1 --> 200 instead of 0 --> 199\n cls_idxs = [x for x, (_, r) in enumerate(dataset.data.iterrows()) if int(r['target']) in include_classes_cub]\n\n # TODO: For now have no target transform\n target_xform_dict = {}\n for i, k in enumerate(include_classes):\n target_xform_dict[k] = i\n\n dataset = subsample_dataset(dataset, cls_idxs)\n\n dataset.target_transform = lambda x: target_xform_dict[x]\n\n return dataset" }, { "identifier": "subsample_classes", "path": "data/fgvc_aircraft.py", "snippet": "def subsample_classes(dataset, include_classes=range(60)):\n\n cls_idxs = [i for i, (p, t) in enumerate(dataset.samples) if t in include_classes]\n\n # TODO: Don't transform targets for now\n target_xform_dict = {}\n for i, k in enumerate(include_classes):\n target_xform_dict[k] = i\n\n dataset = subsample_dataset(dataset, cls_idxs)\n\n dataset.target_transform = lambda x: target_xform_dict[x]\n\n return dataset" }, { "identifier": "osr_split_dir", "path": "config.py", "snippet": "_C = CN()\n_C.MODEL = CN()\n_C.MODEL.DEVICE = \"cuda\"\n_C.MODEL.NAME = 'resnet50'\n_C.MODEL.LAST_STRIDE = 1\n_C.MODEL.LABEL_SMOOTH = False\n_C.MODEL.PRETRAIN_PATH = ''\n_C.INPUT = CN()\n_C.INPUT.SIZE_TRAIN = [384, 128]\n_C.INPUT.SIZE_TEST = [384, 128]\n_C.INPUT.PROB = 0.0\n_C.INPUT.RE_PROB = 0.0\n_C.INPUT.PIXEL_MEAN = [0.485, 0.456, 0.406]\n_C.INPUT.PIXEL_STD = [0.229, 0.224, 0.225]\n_C.INPUT.PADDING = 10\n_C.DATASETS = CN()\n_C.DATASETS.NAMES = ('market1501')\n_C.DATASETS.DATA_PATH = '/home/zbc/data/market1501/'\n_C.DATASETS.TRAIN_PATH = 'bounding_box_train'\n_C.DATASETS.QUERY_PATH = 'query'\n_C.DATASETS.GALLERY_PATH = 'bounding_box_test'\n_C.DATALOADER = CN()\n_C.DATALOADER.NUM_WORKERS = 8\n_C.DATALOADER.SAMPLER = 'softmax'\n_C.DATALOADER.NUM_INSTANCE = 16\n_C.SOLVER = CN()\n_C.SOLVER.OPTIMIZER_NAME = \"Adam\"\n_C.SOLVER.FP16 = False\n_C.SOLVER.MAX_EPOCHS = 50\n_C.SOLVER.BASE_LR = 3e-4\n_C.SOLVER.BIAS_LR_FACTOR = 2\n_C.SOLVER.MOMENTUM = 0.9\n_C.SOLVER.MARGIN = 0.3\n_C.SOLVER.WEIGHT_DECAY = 0.0005\n_C.SOLVER.WEIGHT_DECAY_BIAS = 0.\n_C.SOLVER.GAMMA = 0.1\n_C.SOLVER.STEPS = (30, 55)\n_C.SOLVER.WARMUP_FACTOR = 1.0 / 3\n_C.SOLVER.WARMUP_ITERS = 500\n_C.SOLVER.WARMUP_METHOD = \"linear\"\n_C.SOLVER.CHECKPOINT_PERIOD = 50\n_C.SOLVER.LOG_PERIOD = 100\n_C.SOLVER.EVAL_PERIOD = 50\n_C.SOLVER.IMS_PER_BATCH = 64\n_C.SOLVER.CYTHON = True\n_C.TEST = CN()\n_C.TEST.IMS_PER_BATCH = 128\n_C.TEST.WEIGHT = \"\"\n_C.TEST.DEBUG = False\n_C.TEST.MULTI_GPU = False\n_C.TEST.RERANK = True\n_C.OUTPUT_DIR = \"\"" } ]
from data.data_utils import MergedDataset from data.cifar import get_cifar_10_datasets, get_cifar_100_datasets, get_cifar_100_ucd_datasets from data.herbarium_19 import get_herbarium_datasets from data.stanford_cars import get_scars_datasets from data.imagenet import get_imagenet_100_datasets, get_imagenet_ucd_100_datasets from data.cub import get_cub_datasets, get_cub_universal_datasets from data.fgvc_aircraft import get_aircraft_datasets from data.inat_mini import get_inat_universal_datasets from data.domainnet import get_domainnet_universal_datasets from data.color_symbol import get_color_symbol_universal_datasets from data.cifar import subsample_classes as subsample_dataset_cifar from data.herbarium_19 import subsample_classes as subsample_dataset_herb from data.stanford_cars import subsample_classes as subsample_dataset_scars from data.imagenet import subsample_classes as subsample_dataset_imagenet from data.cub import subsample_classes as subsample_dataset_cub from data.fgvc_aircraft import subsample_classes as subsample_dataset_air from copy import deepcopy from config import osr_split_dir import pickle import os
8,111
sub_sample_class_funcs = { 'cifar10': subsample_dataset_cifar, 'cifar100': subsample_dataset_cifar, 'imagenet_100': subsample_dataset_imagenet, 'herbarium_19': subsample_dataset_herb,
sub_sample_class_funcs = { 'cifar10': subsample_dataset_cifar, 'cifar100': subsample_dataset_cifar, 'imagenet_100': subsample_dataset_imagenet, 'herbarium_19': subsample_dataset_herb,
'cub': subsample_dataset_cub,
10
2023-10-23 18:23:22+00:00
12k
nju-websoft/SCR
main.py
[ { "identifier": "reset_id", "path": "framework/utils.py", "snippet": "def reset_id(labels, new_id):\n res = []\n for index in range(len(labels)):\n res.append(new_id[int(labels[index])])\n return torch.tensor(res)" }, { "identifier": "get_reset", "path": "framework/utils.py", "snippet": "def get_reset(event_list):\n new_id, id2label = {}, {}\n\n new_id[0] = torch.tensor(0)\n id2label[torch.tensor(0)] = 0\n for index, value in enumerate(event_list):\n new_id[value] = torch.tensor(index + 1)\n id2label[index+1] = value\n return new_id, id2label" }, { "identifier": "trigger_combine_event", "path": "framework/utils.py", "snippet": "def trigger_combine_event(old_data, new_data):\n if len(new_data) == 0:\n return old_data\n init = False\n res = []\n if len(old_data) == 0:\n init = True\n old_data = copy.deepcopy(new_data)\n for old_sample_index in range(len(old_data)-1, -1, -1):\n old_sample = old_data[old_sample_index]\n combine_flag = False\n for new_sample_index in range(len(new_data)-1, -1, -1):\n new_sample = new_data[new_sample_index]\n if old_sample['input_ids'] == new_sample['input_ids']:\n old_offset = torch.nonzero(torch.tensor(np.array(old_sample['labels'])))\n new_offset = torch.nonzero(torch.tensor(np.array(new_sample['labels'])))\n eqoffset = [int(val) for val in old_offset if val in new_offset]\n combine_flag = True\n if len(eqoffset) > 0:\n eqflag = False\n for i in eqoffset: \n if old_sample['labels'][i] != new_sample['labels'][i]:\n # one ins has two event type on same trigger...\n eqflag = True \n if eqflag == False:\n new_data.remove(new_sample)\n continue\n \n old_sample['labels'] = copy.deepcopy(list(np.array(old_sample['labels']) + np.array(new_sample['labels'])))\n new_data.remove(new_sample)\n if (combine_flag and init) or (init == False):\n temp = copy.deepcopy(old_sample)\n res.append(temp)\n res += new_data\n return res" }, { "identifier": "unpack_batch", "path": "framework/utils.py", "snippet": "def unpack_batch(sentence_ids, input_ids, input_masks, segment_ids, labels, ners, new_id, device):\n sentence_ids = torch.tensor(sentence_ids).to(device)\n input_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_ids])).to(device)\n input_masks = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_masks])).to(device)\n segment_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in segment_ids])).to(device)\n ners = torch.tensor(np.array([item.cpu().detach().numpy() for item in ners])).to(device)\n if labels != None:\n if new_id != None:\n labels = torch.tensor(np.array([reset_id(item, new_id).cpu().detach().numpy() for item in labels])).to(device)\n else:\n labels = torch.tensor(np.array([item.cpu().detach().numpy() for item in labels])).to(device)\n return sentence_ids, input_ids, input_masks, segment_ids, labels, ners" }, { "identifier": "BertAdam", "path": "framework/optimization.py", "snippet": "class BertAdam(Optimizer):\n \"\"\"Implements BERT version of Adam algorithm with weight decay fix.\n Params:\n lr: learning rate\n warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1\n t_total: total number of training steps for the learning\n rate schedule, -1 means constant learning rate. Default: -1\n schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'\n b1: Adams b1. Default: 0.9\n b2: Adams b2. Default: 0.999\n e: Adams epsilon. Default: 1e-6\n weight_decay: Weight decay. Default: 0.01\n max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0\n \"\"\"\n def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear',\n b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01,\n max_grad_norm=1.0):\n if lr is not required and lr < 0.0:\n raise ValueError(\"Invalid learning rate: {} - should be >= 0.0\".format(lr))\n if schedule not in SCHEDULES:\n raise ValueError(\"Invalid schedule parameter: {}\".format(schedule))\n if not 0.0 <= warmup < 1.0 and not warmup == -1:\n raise ValueError(\"Invalid warmup: {} - should be in [0.0, 1.0[ or -1\".format(warmup))\n if not 0.0 <= b1 < 1.0:\n raise ValueError(\"Invalid b1 parameter: {} - should be in [0.0, 1.0[\".format(b1))\n if not 0.0 <= b2 < 1.0:\n raise ValueError(\"Invalid b2 parameter: {} - should be in [0.0, 1.0[\".format(b2))\n if not e >= 0.0:\n raise ValueError(\"Invalid epsilon value: {} - should be >= 0.0\".format(e))\n defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,\n b1=b1, b2=b2, e=e, weight_decay=weight_decay,\n max_grad_norm=max_grad_norm)\n super(BertAdam, self).__init__(params, defaults)\n\n def get_lr(self):\n lr = []\n for group in self.param_groups:\n for p in group['params']:\n state = self.state[p]\n if len(state) == 0:\n return [0]\n if group['t_total'] != -1:\n schedule_fct = SCHEDULES[group['schedule']]\n lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])\n else:\n lr_scheduled = group['lr']\n lr.append(lr_scheduled)\n return lr\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n warned_for_t_total = False\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['next_m'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state['next_v'] = torch.zeros_like(p.data)\n\n next_m, next_v = state['next_m'], state['next_v']\n beta1, beta2 = group['b1'], group['b2']\n\n # Add grad clipping\n if group['max_grad_norm'] > 0:\n clip_grad_norm_(p, group['max_grad_norm'])\n\n # Decay the first and second moment running average coefficient\n # In-place operations to update the averages at the same time\n next_m.mul_(beta1).add_(grad, alpha = 1 - beta1)\n next_v.mul_(beta2).addcmul_(grad, grad, value = 1 - beta2)\n update = next_m / (next_v.sqrt() + group['e'])\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want to decay the weights in a manner that doesn't interact\n # with the m/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n if group['weight_decay'] > 0.0:\n update += group['weight_decay'] * p.data\n\n if group['t_total'] != -1:\n schedule_fct = SCHEDULES[group['schedule']]\n progress = state['step']/group['t_total']\n lr_scheduled = group['lr'] * schedule_fct(progress, group['warmup'])\n # warning for exceeding t_total (only active with warmup_linear\n if group['schedule'] == \"warmup_linear\" and progress > 1. and not warned_for_t_total:\n logger.warning(\n \"Training beyond specified 't_total' steps with schedule '{}'. Learning rate set to {}. \"\n \"Please set 't_total' of {} correctly.\".format(group['schedule'], lr_scheduled, self.__class__.__name__))\n warned_for_t_total = True\n # end warning\n else:\n lr_scheduled = group['lr']\n\n update_with_lr = lr_scheduled * update\n p.data.add_(-update_with_lr)\n\n state['step'] += 1\n\n # step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1\n # No bias correction\n # bias_correction1 = 1 - beta1 ** state['step']\n # bias_correction2 = 1 - beta2 ** state['step']\n\n return loss" }, { "identifier": "AdamW", "path": "framework/optimization.py", "snippet": "class AdamW(Optimizer):\n \"\"\" Implements Adam algorithm with weight decay fix.\n\n Parameters:\n lr (float): learning rate. Default 1e-3.\n betas (tuple of 2 floats): Adams beta parameters (b1, b2). Default: (0.9, 0.999)\n eps (float): Adams epsilon. Default: 1e-6\n weight_decay (float): Weight decay. Default: 0.0\n correct_bias (bool): can be set to False to avoid correcting bias in Adam (e.g. like in Bert TF repository). Default True.\n \"\"\"\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0.0, correct_bias=True):\n if lr < 0.0:\n raise ValueError(\"Invalid learning rate: {} - should be >= 0.0\".format(lr))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter: {} - should be in [0.0, 1.0[\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter: {} - should be in [0.0, 1.0[\".format(betas[1]))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {} - should be >= 0.0\".format(eps))\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias)\n super().__init__(params, defaults)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group[\"params\"]:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError(\"Adam does not support sparse gradients, please consider SparseAdam instead\")\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state[\"step\"] = 0\n # Exponential moving average of gradient values\n state[\"exp_avg\"] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state[\"exp_avg_sq\"] = torch.zeros_like(p.data)\n\n exp_avg, exp_avg_sq = state[\"exp_avg\"], state[\"exp_avg_sq\"]\n beta1, beta2 = group[\"betas\"]\n\n state[\"step\"] += 1\n\n # Decay the first and second moment running average coefficient\n # In-place operations to update the averages at the same time\n # exp_avg.mul_(beta1).add_(1.0 - beta1, grad)\n exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1)\n exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)\n denom = exp_avg_sq.sqrt().add_(group[\"eps\"])\n\n step_size = group[\"lr\"]\n if group[\"correct_bias\"]: # No bias correction for Bert\n bias_correction1 = 1.0 - beta1 ** state[\"step\"]\n bias_correction2 = 1.0 - beta2 ** state[\"step\"]\n step_size = step_size * math.sqrt(bias_correction2) / bias_correction1\n\n p.data.addcdiv_(exp_avg, denom, value=-step_size)\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want to decay the weights in a manner that doesn't interact\n # with the m/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n # Add weight decay at the end (fixed version)\n if group[\"weight_decay\"] > 0.0:\n p.data.add_(p.data, alpha=-group[\"lr\"] * group[\"weight_decay\"])\n\n return loss" }, { "identifier": "triggerEncoder", "path": "model/trigger_encoder.py", "snippet": "class triggerEncoder(nn.Module):\n def __init__(self, config):\n super(triggerEncoder, self).__init__()\n self.config = config\n self.last_k_attention = config.last_k_attention\n self.bert = BertModel.from_pretrained(config.bert_path, output_attentions=True)\n self.embedding_dim = self.config.embedding_dim\n self.drop = nn.Dropout(0.2)\n self.linear_transform = nn.Linear(self.bert.config.hidden_size, self.config.hidden_dim, bias=True)\n self.layer_normalization = nn.LayerNorm([self.config.hidden_dim, self.config.hidden_dim])\n\n def get_attention(self, input_ids, input_masks, segment_ids):\n \n output = self.bert(input_ids, token_type_ids = segment_ids, attention_mask = input_masks)\n \n now_attention = 0\n attention = output[2]\n for i in range(self.last_k_attention):\n now_layer_att = attention[-i]\n now_layer_att = torch.mean(now_layer_att, 1)\n res_att = now_layer_att/(torch.sum(now_layer_att, dim = -1, keepdim = True)+1e-9)\n now_attention += res_att\n avg_layer_att = now_attention/self.last_k_attention\n return avg_layer_att\n\n\n\n\n def get_feature(self, sentence_ids, input_ids, input_masks, segment_ids):\n feature = self.bert(input_ids, token_type_ids = segment_ids, attention_mask = input_masks)[0]\n seq_output = self.drop(feature)\n seq_output = self.linear_transform(seq_output)\n output = F.gelu(seq_output)\n feature = self.layer_normalization(output)\n feature = feature.view((1,-1))\n return feature\n\n def forward(self, sentence_ids, input_ids, input_masks, segment_ids):\n seq_output = self.bert(input_ids, token_type_ids = segment_ids, attention_mask = input_masks)[0]\n seq_output = self.drop(seq_output)\n seq_output = self.linear_transform(seq_output)\n output = F.gelu(seq_output)\n output = self.layer_normalization(output)\n return output" }, { "identifier": "argumentDetection", "path": "model/argument_detection.py", "snippet": "class argumentDetection(nn.Module):\n def __init__(self, config):\n super(argumentDetection, self).__init__()\n self.config = config\n self.bert = BertModel.from_pretrained(config.bert_path)\n self.embedding_dim = self.config.embedding_dim\n self.classifier = nn.Linear(self.embedding_dim*2, config.args_num, bias=False)\n self.dropout = nn.Dropout(0.2)\n self.criterion = nn.CrossEntropyLoss()\n def forward(self, input_ids, labels, segment_ids, input_mask, offset, metadata, unseen_matadata, trigger, ner, gold_args):\n sequence_output = self.bert(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)[0]\n new_logits = None\n new_label = []\n for i in range(len(ner)):\n for start, end in ner[i]:\n embedding = sequence_output[i][[start+1, end]].view(-1, self.embedding_dim*2)\n embedding = self.dropout(embedding)\n logits = self.classifier(embedding)\n one_trigger = trigger[i]\n unseen_args = unseen_matadata[one_trigger]\n logits[:,unseen_args] = 0\n label = labels[i][start+1]\n new_label.append(label)\n if new_logits == None:\n new_logits = logits\n else:\n new_logits = torch.cat([new_logits, logits], dim = 0)\n\n new_label = torch.tensor(new_label).cuda()\n \n loss = self.criterion(new_logits, new_label)\n return loss\n\n \n def get_res(self, input_ids, segment_ids, input_mask, ner):\n sequence_output = self.bert(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)[0]\n res_logits = []\n for i in range(len(ner)):\n one_logits = None\n for start, end in ner[i]:\n embedding = sequence_output[i][[start+1, end]].view(-1, self.embedding_dim*2)\n embedding = self.dropout(embedding)\n logits = self.classifier(embedding)\n if one_logits == None:\n one_logits = logits\n else:\n one_logits = torch.cat([one_logits, logits], dim = 0)\n \n res_logits.append(one_logits)\n return res_logits\n\n def get_feature(self, input_ids, segment_ids, input_mask):\n sequence_output = self.bert(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)[0]\n feature = self.dropout(sequence_output)\n feature = feature.view((1,-1))\n return feature" }, { "identifier": "classifier", "path": "model/classifier.py", "snippet": "class classifier(nn.Module):\n def __init__(self, config, events_num):\n super(classifier, self).__init__()\n self.config = config\n self.events_num = events_num\n self.embedding_dim = self.config.embedding_dim\n self.classifier = nn.Linear(self.config.hidden_dim, events_num, bias=False)\n self.criterion = nn.CrossEntropyLoss()\n\n def forward(self, feature, input_masks, labels):\n logits = self.classifier(feature)\n # test/dev\n if labels == None:\n return logits\n # train\n active_loss = input_masks.view(-1) == 1\n \n active_logits = logits.view(-1, self.events_num)[active_loss]\n active_labels = labels.view(-1)[active_loss]\n loss = self.criterion(active_logits, active_labels)\n \n return logits, loss" }, { "identifier": "entityDetection", "path": "model/entity_detection.py", "snippet": "class entityDetection(nn.Module):\n\n def __init__(self, config, rnn_dim=128):\n super(entityDetection, self).__init__()\n self.bert = BertModel.from_pretrained('bert-base-uncased')\n self.dropout = nn.Dropout(0.2)\n self.birnn = nn.LSTM(768, rnn_dim, num_layers=1, bidirectional=True, batch_first=True)\n self.classifier = nn.Linear(rnn_dim*2, config.num_labels)\n self.crf = CRF(config.num_labels, batch_first=True)\n \n\n def forward(self, input_ids, labels, token_type_ids=None, input_mask=None):\n outputs = self.bert(input_ids, token_type_ids=token_type_ids, attention_mask=input_mask)\n sequence_output = outputs[0]\n sequence_output, _ = self.birnn(sequence_output)\n sequence_output = self.dropout(sequence_output)\n emissions = self.classifier(sequence_output)\n loss = -1*self.crf(emissions, labels, mask=input_mask.byte())\n return loss\n\n \n def get_res(self, input_ids, token_type_ids=None, input_mask=None):\n outputs = self.bert(input_ids, token_type_ids=token_type_ids, attention_mask=input_mask)\n sequence_output = outputs[0]\n sequence_output, _ = self.birnn(sequence_output)\n sequence_output = self.dropout(sequence_output)\n emissions = self.classifier(sequence_output)\n res = self.crf.decode(emissions, input_mask.byte())\n return res" }, { "identifier": "Config", "path": "framework/config.py", "snippet": "class Config(ConfigParser):\n def __init__(self, file):\n self.configParser = ConfigParser()\n self.configParser.read(file)\n self.load_value()\n\n def load_value(self):\n for section in self.configParser.sections():\n for key, value in self.configParser.items(section):\n val = None\n for attr in ['getint', 'getfloat', 'getboolean']:\n try:\n val = getattr(self.configParser[section], attr)(key)\n break\n except:\n val = value\n assert(val!=None)\n setattr(self, key, val)\n print(key, val)" } ]
import torch import random import numpy as np import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import math import warnings from framework.utils import reset_id, get_reset, trigger_combine_event, unpack_batch from framework.optimization import BertAdam, AdamW from argparse import ArgumentParser from model.trigger_encoder import triggerEncoder from model.argument_detection import argumentDetection from model.classifier import classifier from model.entity_detection import entityDetection from framework.config import Config from framework.dataloader import * from transformers import logging from sklearn.cluster import KMeans
8,496
gold = copy.deepcopy(gold[0]) sentence = ''.join(sentence) + str(trig) if sentence in gold_args: print(gold_args[sentence]) print(gold) assert(0) gold_args[sentence] = gold label_num += len(gold) for step, (input_ids, input_masks, in_sent, segment_ids, sentence, trigger, ner) in enumerate(eval_data_loader): input_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_ids])).cuda() input_masks = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_masks])).cuda() segment_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in segment_ids])).cuda() with torch.no_grad(): logits = argument_detection.get_res(input_ids, segment_ids, input_masks, ner) for i in range(len(in_sent)): sent = copy.deepcopy(sentence[i]) tr = copy.deepcopy(trigger[i]) tr = sampler.index2vocab[tr] sent = ''.join(sent) + str(tr) new_logits = logits[i] seen_args = copy.deepcopy(metadata[tr]) seen_args += [0] pred_roles = [] if new_logits == None: continue for index, value in enumerate(new_logits): logi = value[seen_args] max_value, pred_role = torch.max(logi, dim = 0) start, end = ner[i][index] one_pred = (start, end, seen_args[int(pred_role)]) if seen_args[int(pred_role)] != 0: pred_roles.append(one_pred) if sent in gold_args: one_gold_args = copy.deepcopy(gold_args[sent]) pred_num += len(pred_roles) for preds in pred_roles: if preds in one_gold_args: while(preds in one_gold_args): correct_num += 1 one_gold_args.remove(preds) else: pred_num += len(pred_roles) if pred_num == 0 or label_num == 0 or correct_num == 0: return 0 pred_c = 100.0*correct_num/pred_num recall_c = 100.0*correct_num/label_num f1_c = 2*pred_c*recall_c/(pred_c+recall_c) return f1_c def select_argu_data(config, argument_detection, relation_dataset,new_id, event_mention): train_data_loader = get_ACEArgData_loader(relation_dataset, config, shuffle = False, batch_size = 1) features = [] argument_detection.eval() for step, (sentence, input_ids, input_masks, in_sent, segment_ids, args, args_offset, gold_args, ner, trigger) in enumerate(train_data_loader): input_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_ids])).cuda() input_masks = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_masks])).cuda() segment_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in segment_ids])).cuda() with torch.no_grad(): feature = argument_detection.get_feature(input_ids, segment_ids, input_masks).cpu() features.append(feature) features = np.concatenate(features) num_clusters = min(config.memory_size, len(relation_dataset)) if num_clusters == len(relation_dataset): memory = [] for i in relation_dataset: memory.append(i) return memory distances = KMeans(n_clusters = num_clusters, random_state = 0).fit_transform(features) memory = [] for k in range(num_clusters): select_index = np.argmin(distances[:, k]) ins = relation_dataset[select_index] memory.append(ins) return memory def main(): # load config parser = ArgumentParser() parser.add_argument('--config', default='./config/ace.ini') args = parser.parse_args() config = Config(args.config) # set train param config.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') batch_size_per_step = int(config.batch_size / config.gradient_accumulation_steps) triger_result_total, trigger_result_cur, argument_result_total, argument_result_cur = [], [], [], [] # six truns and get average for i in range(config.total_round): print(f"Now is round {i}") config.seed += 100 random.seed(config.seed) np.random.seed(config.seed) torch.manual_seed(config.seed) # now is trigger detection task sampler = ACETriDataloder(config, i) trigger_one_round_res = [] argument_one_round_res = [] # trigger memory space trigger_memorized_samples = {} # argument memory space argument_memorized_samples = {} # init trigger encode model entity_detection = entityDetection(config).to(config.device)
logging.set_verbosity_warning() logging.set_verbosity_error() warnings.filterwarnings('ignore') def eval_trigger(trigger_encoder, trigger_classifier, eval_data, config, new_id, save, ltlabel, id2label): eval_data_loader = get_ACETriData_loader(eval_data, config, shuffle = True) trigger_encoder.eval() trigger_classifier.eval() pred_num = 0 correct_num = 0 label_num = 0 pred_res = [] for step, (sentence_ids, input_ids, input_masks, in_sent, segment_ids, labels, ners, sentence) in enumerate(eval_data_loader): sentence_ids, input_ids, input_masks, segment_ids, labels, ners = unpack_batch(sentence_ids, input_ids, input_masks, segment_ids, labels, ners, new_id, config.device) with torch.no_grad(): feature = trigger_encoder(sentence_ids, input_ids, input_masks, segment_ids) #feature = torch.stack([x.to(device) for x in feature],dim=0) logits = trigger_classifier(feature, None, None) new_logits = logits for index, value in enumerate(in_sent): evetype = [] pred_first = True value = value == 1 gold_offset = torch.nonzero(labels[index][value]).squeeze(dim = 1) gold_label = torch.gather(labels[index][value], dim = 0, index = gold_offset) assert(len(gold_label) != 0) gold_label = [int(val) for val in gold_label] gold_offset = [int(val) for val in gold_offset] new_gold_label = [] i = 0 while i < len(gold_label): if i+1 >= len(gold_label): if config.lttest and id2label[gold_label[i]] not in ltlabel: break else: new_gold_label.append(gold_label[i]) break while gold_label[i] == gold_label[i+1] and gold_offset[i]+1 == gold_offset[i+1]: i += 1 if i+1 >= len(gold_label): break if config.lttest == False or id2label[gold_label[i]] in ltlabel: new_gold_label.append(gold_label[i]) i+=1 gold_label = new_gold_label label_num += len(gold_label) res = new_logits[index][value,:] max_value, pred_tri_each_word = torch.max(res, 1) pred_trigger = 0 offset = 0 pred_offset, pred_label = [], [] for offset, trigger in enumerate(pred_tri_each_word): if trigger!=0: if config.lttest == False or id2label[int(trigger)] in ltlabel: pred_offset.append(offset) pred_label.append(trigger) new_pred_label = [] i = 0 while i < len(pred_label): if i+1 >= len(pred_label): new_pred_label.append(pred_label[i]) break while pred_label[i] == pred_label[i+1] and pred_offset[i]+1 == pred_offset[i+1]: i += 1 if i+1 >= len(pred_label): break new_pred_label.append(pred_label[i]) i+=1 new_pred_label = [int(val) for val in new_pred_label] pred_num += len(new_pred_label) for pred_trigger in new_pred_label: if save: if id2label[pred_trigger] not in evetype: evetype.append(id2label[pred_trigger]) onesamp = {} onesamp['sentence'] = sentence[index] onesamp['trigger'] = id2label[pred_trigger] onesamp['s_start'] = 0 pred_res.append(onesamp) if pred_trigger in gold_label: correct_num += 1 gold_label.remove(pred_trigger) if pred_num == 0 or label_num == 0 or correct_num == 0: return 0 pred_c = 100.0*correct_num/pred_num recall_c = 100.0*correct_num/label_num f1_c = 2*pred_c*recall_c/(pred_c+recall_c) if save: f = open(config.trigger_pred_file, 'w') json.dump(pred_res, f) f.close() return f1_c def train_simple_trigger(trigger_encoder, trigger_classifier, tr_data, config, new_id): train_data_loader = get_ACETriData_loader(tr_data, config, shuffle = True) trigger_encoder.train() trigger_classifier.train() param_optimizer_1 = list(trigger_encoder.named_parameters()) param_optimizer_1 = [n for n in param_optimizer_1 if 'pooler' not in n[0]] param_optimizer_2 = list(trigger_classifier.named_parameters()) param_optimizer_2 = [n for n in param_optimizer_2 if 'pooler' not in n[0]] no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer_1 if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01, "betas": (0.9, 0.999), 'lr':config.trigger_encoder_learning_rate}, {'params': [p for n, p in param_optimizer_1 if any(nd in n for nd in no_decay)], 'weight_decay': 0.0, "betas": (0.9, 0.999),'lr':config.trigger_encoder_learning_rate}, {'params': [p for n, p in param_optimizer_2 if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01, "betas": (0.9, 0.999), 'lr':config.trigger_classifier_learning_rate}, {'params': [p for n, p in param_optimizer_2 if any(nd in n for nd in no_decay)], 'weight_decay': 0.0, "betas": (0.9, 0.999), 'lr':config.trigger_classifier_learning_rate} ] optimizer = AdamW(params = optimizer_grouped_parameters) epoch_index, best_f1, es_index = 0, 0, 0 fd_criterion = nn.CosineEmbeddingLoss() logits = None global_step = 0 while(True): losses = [] for step, (sentence_ids, input_ids, input_masks, in_sent, segment_ids, labels, ners, sentence) in enumerate(train_data_loader): sentence_ids, input_ids, input_masks, segment_ids, labels, ners = unpack_batch(sentence_ids, input_ids, input_masks, segment_ids, labels, ners, new_id, config.device) feature = trigger_encoder(sentence_ids, input_ids, input_masks, segment_ids) logits, loss = trigger_classifier(feature, input_masks, labels) losses.append(loss.cpu().detach().numpy()) loss.backward() if (step + 1) % config.gradient_accumulation_steps == 0: optimizer.step() optimizer.zero_grad() global_step += 1 print(f"epoch: {epoch_index}, loss is {np.array(losses).mean()}") epoch_index += 1 if epoch_index >= 5: break def train_trigger(trigger_encoder, trigger_classifier, tr_data, de_data, seen_train_event, config, new_id, forward_encoder, forward_classifier, forward_event, trigger_tailed, ltlabel, id2label): if config.kd == True and forward_event != None: forward_index = reset_id(forward_event, new_id).cuda() print(forward_index) T = config.temp train_data_loader = get_ACETriData_loader(tr_data, config, shuffle = True) trigger_encoder.train() trigger_classifier.train() param_optimizer_1 = list(trigger_encoder.named_parameters()) param_optimizer_1 = [n for n in param_optimizer_1 if 'pooler' not in n[0]] param_optimizer_2 = list(trigger_classifier.named_parameters()) param_optimizer_2 = [n for n in param_optimizer_2 if 'pooler' not in n[0]] no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer_1 if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01, "betas": (0.9, 0.999), 'lr':config.trigger_encoder_learning_rate}, {'params': [p for n, p in param_optimizer_1 if any(nd in n for nd in no_decay)], 'weight_decay': 0.0, "betas": (0.9, 0.999),'lr':config.trigger_encoder_learning_rate}, {'params': [p for n, p in param_optimizer_2 if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01, "betas": (0.9, 0.999), 'lr':config.trigger_classifier_learning_rate}, {'params': [p for n, p in param_optimizer_2 if any(nd in n for nd in no_decay)], 'weight_decay': 0.0, "betas": (0.9, 0.999), 'lr':config.trigger_classifier_learning_rate} ] if config.merit == 'epochs': num_train_optimization_steps = len(train_data_loader) // config.gradient_accumulation_steps * config.epochs optimizer = AdamW(params = optimizer_grouped_parameters, weight_decay=config.weight_decay) elif config.merit == 'early_stop': optimizer = AdamW(params = optimizer_grouped_parameters) epoch_index, best_f1, es_index = 0, 0, 0 #fd_criterion = nn.CosineEmbeddingLoss(reduction = 'sum') fd_criterion = nn.CosineEmbeddingLoss() logits = None global_step = 0 while(True): losses = [] for step, (sentence_ids, input_ids, input_masks, in_sent, segment_ids, labels, ners, sentence) in enumerate(train_data_loader): sentence_ids, input_ids, input_masks, segment_ids, labels, ners = unpack_batch(sentence_ids, input_ids, input_masks, segment_ids, labels, ners, new_id, config.device) feature = trigger_encoder(sentence_ids, input_ids, input_masks, segment_ids) if len(trigger_tailed) != 0: tail_res = [] for i, label in enumerate(labels): flabels = label!=0 pos_labels = label[flabels] pos_index = torch.nonzero(label) for index, fe in enumerate(pos_labels): if int(fe) in trigger_tailed: protos, standard = trigger_tailed[int(fe)] protos = protos[flabels] standard = standard[flabels] for st in range(len(standard)): s = torch.tensor(np.random.normal(0, standard[st], 1)).cuda() j = pos_index[index] feature[i][j] += s tail_res.append((i,j,s)) logits, loss = trigger_classifier(feature, input_masks, labels) if config.kd == True and forward_event != None: #print(tail_res) kd_loss = 0 temp_masks = copy.deepcopy(input_masks) forward_features = forward_encoder(sentence_ids, input_ids, temp_masks, segment_ids) if len(trigger_tailed) != 0: for i,j,s in tail_res: forward_features[i][j] += s forward_logits = forward_classifier(forward_features, temp_masks, None) forward_logits = (forward_logits.index_select(2, forward_index)/T).view(-1, len(forward_event)) new_logits = (logits.index_select(2, forward_index)/T).view(-1, len(forward_event)) active_loss = (input_masks.view(-1) == 1).cuda() forward_logits = forward_logits[active_loss] new_logits = new_logits[active_loss] if config.select == True: max_forward_index = max(forward_index) label_index = (labels.view(-1)<=max_forward_index)[active_loss].cuda() forward_logits[:,0] = 0 new_logits[:,0] = 0 forward_logits = forward_logits[label_index] new_logits = new_logits[label_index] forward_logits = F.softmax(forward_logits, dim = 1) new_logits = F.log_softmax(new_logits, dim = 1) kd_loss = -torch.mean(torch.sum(forward_logits * new_logits, dim = 1)) #kd_loss = -torch.sum(torch.sum(forward_logits * new_logits, dim = 1)) if config.attention == True: attention = trigger_encoder.get_attention(input_ids, input_masks, segment_ids) forward_attention = forward_encoder.get_attention(input_ids, input_masks, segment_ids) attention = attention.matmul(feature) forward_attention = forward_attention.matmul(forward_features) attention = F.normalize(attention, p=2, dim=2).view(-1, attention.shape[2])[active_loss] forward_attention = F.normalize(forward_attention, p=2, dim=2).view(-1, forward_attention.shape[2])[active_loss] fd_loss = fd_criterion(attention, forward_attention, torch.ones(attention.shape[0]).cuda()) kd_loss = kd_loss + fd_loss loss = (1-config.alpha)*loss+config.alpha*kd_loss losses.append(loss.cpu().detach().numpy()) loss.backward() if (step + 1) % config.gradient_accumulation_steps == 0: optimizer.step() optimizer.zero_grad() global_step += 1 if config.merit == 'early_stop': res = 0 res = eval_trigger(trigger_encoder, trigger_classifier, de_data, config, new_id, False, ltlabel, id2label) trigger_encoder.train() trigger_classifier.train() if res > best_f1: best_f1 = res es_index = 0 encoder_output_path = config.output_dir+ config.trigger_encoder_file torch.save(trigger_encoder.state_dict(), encoder_output_path) classifier_output_path = config.output_dir+ config.trigger_classifier_file torch.save(trigger_classifier.state_dict(), classifier_output_path) else: es_index += 1 print(f"epoch: {epoch_index}, loss is {np.array(losses).mean()}, f1 is {res} and best f1 is {best_f1}") epoch_index += 1 if es_index >= config.early_stop: trigger_encoder.load_state_dict(torch.load(encoder_output_path)) trigger_classifier.load_state_dict(torch.load(classifier_output_path)) break if config.merit == 'epochs': print(f"epoch: {epoch_index}, loss is {np.array(losses).mean()}") epoch_index += 1 if epoch_index >= config.epochs: break def select_data(config, trigger_encoder, relation_dataset, new_id, event): train_data_loader = get_ACETriData_loader(relation_dataset, config, shuffle = False, batch_size = 1) features = [] trigger_encoder.eval() for step, (sentence_ids, input_ids, input_masks, in_sent, segment_ids, labels, ners, sentence) in enumerate(train_data_loader): sentence_ids, input_ids, input_masks, segment_ids, labels, ners = unpack_batch(sentence_ids, input_ids, input_masks, segment_ids, labels, ners, new_id, config.device) with torch.no_grad(): feature = trigger_encoder.get_feature(sentence_ids, input_ids, input_masks, segment_ids).cpu() features.append(feature) features = np.concatenate(features) num_clusters = min(config.memory_size, len(relation_dataset)) if num_clusters == len(relation_dataset): memory = [] for i in relation_dataset: memory.append(i) return memory distances = KMeans(n_clusters = num_clusters, random_state = 0).fit_transform(features) memory = [] for k in range(num_clusters): select_index = np.argmin(distances[:, k]) ins = relation_dataset[select_index] memory.append(ins) return memory def addPseudoLabel(trigger_encoder, trigger_classifier, data, config, id2label): pseudo_data = [] eval_data_loader = get_ACETriData_loader(data, config, shuffle = True, batch_size = 1) trigger_encoder.eval() trigger_classifier.eval() for step, (sentence_ids, input_ids, input_masks, in_sent, segment_ids, labels, ners, sentence) in enumerate(eval_data_loader): sentence_ids, input_ids, input_masks, segment_ids, labels, ners = unpack_batch(sentence_ids, input_ids, input_masks, segment_ids, labels, ners, None, config.device) with torch.no_grad(): feature = trigger_encoder(sentence_ids, input_ids, input_masks, segment_ids) logits = trigger_classifier(feature, None, None) new_logits = logits for index, value in enumerate(in_sent): pred_first = True value = value == 1 gold_offset = torch.nonzero(labels[index][value]).squeeze(dim = 1) gold_label = torch.gather(labels[index][value], dim = 0, index = gold_offset) gold_label = [int(val) for val in gold_label] gold_offset = [int(val) for val in gold_offset] res = new_logits[index][value,:] max_value, pred_tri_each_word = torch.max(res, 1) pred_trigger = 0 for offset, trigger in enumerate(pred_tri_each_word): if trigger!=0 and max_value[offset] > 0.8 and offset not in gold_offset: one_sample = {} one_sample['sentence_ids'] = sentence_ids[0].tolist() one_sample['input_ids'] = input_ids[0].tolist() one_sample['input_masks'] = input_masks[0].tolist() pseudo_label = torch.zeros(len(input_ids[0])) pseudo_label[offset] = id2label[int(trigger)] one_sample['labels'] = pseudo_label.tolist() one_sample['in_sent'] = in_sent[0].tolist() one_sample['segment_ids'] = segment_ids[0].tolist() one_sample['ners'] = ners[0].tolist() one_sample['sentence'] = sentence[0] pseudo_data.append(one_sample) return pseudo_data + data def get_trigger_proto(config, trigger_encoder, relation_dataset, new_id, event): train_data_loader = get_ACETriData_loader(relation_dataset, config, shuffle = False, batch_size = 1) features = [] trigger_encoder.eval() for step, (sentence_ids, input_ids, input_masks, in_sent, segment_ids, labels, ners, sentence) in enumerate(train_data_loader): sentence_ids, input_ids, input_masks, segment_ids, labels, ners = unpack_batch(sentence_ids, input_ids, input_masks, segment_ids, labels, ners, new_id, config.device) with torch.no_grad(): feature = trigger_encoder(sentence_ids, input_ids, input_masks, segment_ids) feature = feature[labels == event] features.append(feature) features = torch.cat(features, dim = 0) proto = torch.mean(features, dim = 0, keepdim = True).cpu() standard = torch.sqrt(torch.var(features, dim=0)).cpu() return proto, standard def kt_long_tailed(trigger_protos, trigger_num): len_tail = int(0.8*len(trigger_num)) res = {} for i in range(len_tail): tail_event = trigger_num[i][0] tail_proto, tail_standard = trigger_protos[tail_event] tail_proto = tail_proto.squeeze(0) tail_standard = tail_standard.squeeze(0) tail_cos, all_proto, all_standard = [], [], [] for event, (proto, standard) in trigger_protos.items(): proto = proto.squeeze(0) standard = standard.squeeze(0) if event != tail_event: tail_cos.append(F.cosine_similarity(tail_proto, proto, dim = 0)) all_proto.append(proto) all_standard.append(standard) all_proto = torch.stack(all_proto) all_standard = torch.stack(all_standard) tail_cos = torch.stack(tail_cos) tail_cos = F.softmax(tail_cos, dim=0) res_standard = torch.matmul(tail_cos, all_standard) res_proto = torch.matmul(tail_cos, all_proto) res[tail_event] = (res_proto, res_standard) return res def eval_entity_detection(entity_detection, eval_data, config, new_id): eval_data_loader = get_ACETriData_loader(eval_data, config, shuffle = True) entity_detection.eval() pred_num = 0 correct_num = 0 label_num = 0 pred_res = [] for step, (sentence_ids, input_ids, input_masks, in_sent, segment_ids, labels, ners, sentence) in enumerate(eval_data_loader): sentence_ids, input_ids, input_masks, segment_ids, labels, ners = unpack_batch(sentence_ids, input_ids, input_masks, segment_ids, labels, ners, new_id, config.device) with torch.no_grad(): logits = entity_detection.get_res(input_ids, segment_ids, input_masks) new_logits = logits for index, value in enumerate(in_sent): value = value == 1 pred_logits = torch.tensor(new_logits[index])[1:-1].tolist() gold_offset = [] start, end, now = 0,0,0 for offset, wo in enumerate(ners[index][value]): wo = int(wo) if wo !=0 and now == 0: now = wo start = offset end = offset+1 elif wo !=0 and now !=0 and wo == now: end = offset+1 elif wo !=0 and now !=0 and wo != now: now = wo gold_offset.append((start, end)) start = offset end = offset+1 elif wo == 0 and now == 0: start, end = 0, 0 elif wo == 0 and now != 0: now = 0 gold_offset.append((start, end)) if now != 0: gold_offset.append((start, end)) for i in gold_offset: start, end = i for j in range(start, end-1): if ners[index][value][j] != ners[index][value][j+1]: print(ners[index][value]) print(gold_offset) assert(0) label_num+=len(gold_offset) pred_offset = [] start, end, now = 0,0,0 pred_tri_each_word = pred_logits for offset, wo in enumerate(pred_tri_each_word): wo = int(wo) if wo !=0 and now == 0: now = wo start = offset end = offset+1 elif wo !=0 and now !=0 and wo == now: end = offset+1 elif wo !=0 and now !=0 and wo != now: now = wo pred_offset.append((start, end)) start = offset end = offset+1 elif wo == 0 and now == 0: start, end = 0, 0 elif wo == 0 and now != 0: now = 0 pred_offset.append((start, end)) if now != 0: pred_offset.append((start, end)) pred_num += len(pred_offset) for pred in pred_offset: if pred in gold_offset: correct_num += 1 if pred_num == 0 or label_num == 0 or correct_num == 0: return 0 pred_c = 100.0*correct_num/pred_num recall_c = 100.0*correct_num/label_num f1_c = 2*pred_c*recall_c/(pred_c+recall_c) return f1_c def pred_entity_detection(config, entity_detection, sampler): eval_data = sampler.read_pred_sample(config.trigger_pred_file) eval_data_loader = get_ACEPredData_loader(eval_data, config, shuffle = True) entity_detection.eval() pred_num = 0 correct_num = 0 label_num = 0 pred_res = [] for step, (input_ids, input_masks, in_sent, segment_ids, sentence, event) in enumerate(eval_data_loader): input_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_ids])).cuda() input_masks = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_masks])).cuda() segment_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in segment_ids])).cuda() with torch.no_grad(): logits = entity_detection.get_res(input_ids, segment_ids, input_masks) new_logits = logits for index, value in enumerate(in_sent): value = value == 1 pred_logits = torch.tensor(new_logits[index])[1:-1].tolist() pred_offset = [] start, end, now = 0,0,0 pred_tri_each_word = pred_logits for offset, wo in enumerate(pred_tri_each_word): wo = int(wo) if wo !=0 and now == 0: now = wo start = offset end = offset+1 elif wo !=0 and now !=0 and wo == now: end = offset+1 elif wo !=0 and now !=0 and wo != now: now = wo pred_offset.append((start, end)) start = offset end = offset+1 elif wo == 0 and now == 0: start, end = 0, 0 elif wo == 0 and now != 0: now = 0 pred_offset.append((start, end)) if now != 0: pred_offset.append((start, end)) onesamp = {} onesamp['sentence'] = sentence[index] onesamp['trigger'] = event[index] onesamp['s_start'] = 0 onesamp['ner'] = pred_offset pred_res.append(onesamp) f = open(config.entity_pred_file, 'w') json.dump(pred_res, f) f.close() print('Entity predict over') def train_entity_detection(entity_detection, tr_data, de_data, config, new_id): train_data_loader = get_ACETriData_loader(tr_data, config, shuffle = True) entity_detection.train() param_optimizer_1 = list(entity_detection.named_parameters()) param_optimizer_1 = [n for n in param_optimizer_1 if 'pooler' not in n[0]] no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer_1 if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01, "betas": (0.9, 0.999), 'lr':config.entity_detection_leraning_rate}, {'params': [p for n, p in param_optimizer_1 if any(nd in n for nd in no_decay)], 'weight_decay': 0.0, "betas": (0.9, 0.999),'lr':config.entity_detection_leraning_rate} ] optimizer = AdamW(params = optimizer_grouped_parameters) epoch_index, best_f1, es_index = 0, 0, 0 fd_criterion = nn.CosineEmbeddingLoss() logits = None global_step = 0 while(True): losses = [] for step, (sentence_ids, input_ids, input_masks, in_sent, segment_ids, labels, ners, sentence) in enumerate(train_data_loader): sentence_ids, input_ids, input_masks, segment_ids, labels, ners = unpack_batch(sentence_ids, input_ids, input_masks, segment_ids, labels, ners, new_id, config.device) loss = entity_detection(input_ids, ners, segment_ids, input_masks) losses.append(loss.cpu().detach().numpy()) loss.backward() if (step + 1) % config.gradient_accumulation_steps == 0: optimizer.step() optimizer.zero_grad() global_step += 1 res = 0 res = eval_entity_detection(entity_detection, de_data, config, new_id) entity_detection.train() if res > best_f1: best_f1 = res es_index = 0 encoder_output_path = config.output_dir+ config.entity_file torch.save(entity_detection.state_dict(), encoder_output_path) else: es_index += 1 print(f"epoch: {epoch_index}, loss is {np.array(losses).mean()}, f1 is {res} and best f1 is {best_f1}") epoch_index += 1 if es_index >= config.early_stop: entity_detection.load_state_dict(torch.load(encoder_output_path)) break def train_argument_detection(argument_detection, tr_data, de_data, config, metadata, unseen_metadata): train_data_loader = get_ACEArgData_loader(tr_data, config, shuffle = True) argument_detection.train() param_optimizer_1 = list(argument_detection.named_parameters()) param_optimizer_1 = [n for n in param_optimizer_1 if 'pooler' not in n[0]] no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer_1 if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01, "betas": (0.9, 0.999), 'lr':config.argument_detection_leraning_rate}, {'params': [p for n, p in param_optimizer_1 if any(nd in n for nd in no_decay)], 'weight_decay': 0.0, "betas": (0.9, 0.999),'lr':config.argument_detection_leraning_rate} ] optimizer = AdamW(params = optimizer_grouped_parameters) epoch_index, best_f1, es_index = 0, 0, 0 fd_criterion = nn.CosineEmbeddingLoss() logits = None global_step = 0 while(True): losses = [] for step, (sentence, input_ids, input_masks, in_sent, segment_ids, args, args_offset, gold_args, ner, trigger) in enumerate(train_data_loader): input_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_ids])).cuda() input_masks = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_masks])).cuda() segment_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in segment_ids])).cuda() args = torch.tensor(np.array([item.cpu().detach().numpy() for item in args])).cuda() loss = argument_detection(input_ids, args, segment_ids, input_masks, args_offset, metadata, unseen_metadata, trigger, ner, gold_args) losses.append(loss.cpu().detach().numpy()) loss.backward() if (step + 1) % config.gradient_accumulation_steps == 0: optimizer.step() optimizer.zero_grad() global_step += 1 res = 0 res = eval_argument_detection(argument_detection, de_data, config, metadata) argument_detection.train() if res > best_f1: best_f1 = res es_index = 0 encoder_output_path = config.output_dir+ config.argument_file torch.save(argument_detection.state_dict(), encoder_output_path) else: es_index += 1 print(f"epoch: {epoch_index}, loss is {np.array(losses).mean()}, f1 is {res} and best f1 is {best_f1}") epoch_index += 1 if es_index >= config.early_stop: argument_detection.load_state_dict(torch.load(encoder_output_path)) break def eval_argument_detection(argument_detection, eval_data, config, metadata): eval_data_loader = get_ACEArgData_loader(eval_data, config, shuffle = True) argument_detection.eval() pred_num = 0 correct_num = 0 label_num = 0 pred_res = [] for step, (sentence, input_ids, input_masks, in_sent, segment_ids, args, args_offset, gold_args, ner, trigger) in enumerate(eval_data_loader): input_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_ids])).cuda() input_masks = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_masks])).cuda() segment_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in segment_ids])).cuda() with torch.no_grad(): logits = argument_detection.get_res(input_ids, segment_ids, input_masks, ner) for i in range(len(in_sent)): new_logits = logits[i] seen_args = copy.deepcopy(metadata[trigger[i]]) seen_args += [0] pred_roles = [] if new_logits == None: continue for index, value in enumerate(new_logits): logi = value[seen_args] max_value, pred_role = torch.max(logi, dim = 0) start, end = ner[i][index] one_pred = (start, end, seen_args[int(pred_role)]) if seen_args[int(pred_role)] != 0: pred_roles.append(one_pred) one_gold_args = copy.deepcopy(gold_args[i]) pred_num += len(pred_roles) label_num += len(one_gold_args) for preds in pred_roles: if preds in one_gold_args: correct_num += 1 one_gold_args.remove(preds) if pred_num == 0 or label_num == 0 or correct_num == 0: return 0 pred_c = 100.0*correct_num/pred_num recall_c = 100.0*correct_num/label_num f1_c = 2*pred_c*recall_c/(pred_c+recall_c) return f1_c def pred_argument_detection(config, argument_detection, sampler, metadata, gold_data): eval_data = sampler.read_pred_ner_sample(config.entity_pred_file) eval_data_loader = get_ACEPredNerData_loader(eval_data, config, shuffle = True) argument_detection.eval() pred_num = 0 correct_num = 0 label_num = 0 pred_res = [] gold_args = {} gold_data_loader = get_ACEArgData_loader(gold_data, config, shuffle = True, batch_size = 1) for step, (sentence, _, _, _, _, args, args_offset, gold, _, trig) in enumerate(gold_data_loader): sentence = copy.deepcopy(sentence[0]) trig = copy.deepcopy(trig[0]) gold = copy.deepcopy(gold[0]) sentence = ''.join(sentence) + str(trig) if sentence in gold_args: print(gold_args[sentence]) print(gold) assert(0) gold_args[sentence] = gold label_num += len(gold) for step, (input_ids, input_masks, in_sent, segment_ids, sentence, trigger, ner) in enumerate(eval_data_loader): input_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_ids])).cuda() input_masks = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_masks])).cuda() segment_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in segment_ids])).cuda() with torch.no_grad(): logits = argument_detection.get_res(input_ids, segment_ids, input_masks, ner) for i in range(len(in_sent)): sent = copy.deepcopy(sentence[i]) tr = copy.deepcopy(trigger[i]) tr = sampler.index2vocab[tr] sent = ''.join(sent) + str(tr) new_logits = logits[i] seen_args = copy.deepcopy(metadata[tr]) seen_args += [0] pred_roles = [] if new_logits == None: continue for index, value in enumerate(new_logits): logi = value[seen_args] max_value, pred_role = torch.max(logi, dim = 0) start, end = ner[i][index] one_pred = (start, end, seen_args[int(pred_role)]) if seen_args[int(pred_role)] != 0: pred_roles.append(one_pred) if sent in gold_args: one_gold_args = copy.deepcopy(gold_args[sent]) pred_num += len(pred_roles) for preds in pred_roles: if preds in one_gold_args: while(preds in one_gold_args): correct_num += 1 one_gold_args.remove(preds) else: pred_num += len(pred_roles) if pred_num == 0 or label_num == 0 or correct_num == 0: return 0 pred_c = 100.0*correct_num/pred_num recall_c = 100.0*correct_num/label_num f1_c = 2*pred_c*recall_c/(pred_c+recall_c) return f1_c def select_argu_data(config, argument_detection, relation_dataset,new_id, event_mention): train_data_loader = get_ACEArgData_loader(relation_dataset, config, shuffle = False, batch_size = 1) features = [] argument_detection.eval() for step, (sentence, input_ids, input_masks, in_sent, segment_ids, args, args_offset, gold_args, ner, trigger) in enumerate(train_data_loader): input_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_ids])).cuda() input_masks = torch.tensor(np.array([item.cpu().detach().numpy() for item in input_masks])).cuda() segment_ids = torch.tensor(np.array([item.cpu().detach().numpy() for item in segment_ids])).cuda() with torch.no_grad(): feature = argument_detection.get_feature(input_ids, segment_ids, input_masks).cpu() features.append(feature) features = np.concatenate(features) num_clusters = min(config.memory_size, len(relation_dataset)) if num_clusters == len(relation_dataset): memory = [] for i in relation_dataset: memory.append(i) return memory distances = KMeans(n_clusters = num_clusters, random_state = 0).fit_transform(features) memory = [] for k in range(num_clusters): select_index = np.argmin(distances[:, k]) ins = relation_dataset[select_index] memory.append(ins) return memory def main(): # load config parser = ArgumentParser() parser.add_argument('--config', default='./config/ace.ini') args = parser.parse_args() config = Config(args.config) # set train param config.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') batch_size_per_step = int(config.batch_size / config.gradient_accumulation_steps) triger_result_total, trigger_result_cur, argument_result_total, argument_result_cur = [], [], [], [] # six truns and get average for i in range(config.total_round): print(f"Now is round {i}") config.seed += 100 random.seed(config.seed) np.random.seed(config.seed) torch.manual_seed(config.seed) # now is trigger detection task sampler = ACETriDataloder(config, i) trigger_one_round_res = [] argument_one_round_res = [] # trigger memory space trigger_memorized_samples = {} # argument memory space argument_memorized_samples = {} # init trigger encode model entity_detection = entityDetection(config).to(config.device)
argument_detection = argumentDetection(config).to(config.device)
7
2023-10-17 02:40:04+00:00
12k
IBM/VillanDiffusion
operate.py
[ { "identifier": "fid", "path": "fid_score.py", "snippet": "def fid(path: List[str], batch_size: int=50, dims: int=2048, device: str=None, num_workers: int=None):\n if device is None:\n device = torch.device('cuda' if (torch.cuda.is_available()) else 'cpu')\n else:\n device = torch.device(device)\n\n if num_workers is None:\n num_avail_cpus = len(os.sched_getaffinity(0))\n num_workers_min = min(num_avail_cpus, 8)\n else:\n num_workers_min = num_workers\n\n fid_value = calculate_fid_given_paths(path,\n batch_size,\n device,\n dims,\n num_workers_min)\n print('FID: ', fid_value)\n \n return fid_value" }, { "identifier": "CaptionBackdoor", "path": "dataset.py", "snippet": "DEFAULT_VMIN = float(-1.0)\nDEFAULT_VMAX = float(1.0)\n MODE_FIXED = \"FIXED\"\n MODE_FLEX = \"FLEX\"\n MODE_NONE = \"NONE\"\n MODE_EXTEND = \"EXTEND\"\n MNIST = \"MNIST\"\n CIFAR10 = \"CIFAR10\"\n CELEBA = \"CELEBA\"\n LSUN_CHURCH = \"LSUN-CHURCH\"\n LSUN_BEDROOM = \"LSUN-BEDROOM\"\n CELEBA_HQ = \"CELEBA-HQ\"\n CELEBA_HQ_LATENT_PR05 = \"CELEBA-HQ-LATENT_PR05\"\n CELEBA_HQ_LATENT = \"CELEBA-HQ-LATENT\"\n INPAINT_BOX: str = \"INPAINT_BOX\"\n INPAINT_LINE: str = \"INPAINT_LINE\"\n TRAIN = \"train\"\n TEST = \"test\"\n PIXEL_VALUES = \"pixel_values\"\n PIXEL_VALUES_TRIGGER = \"pixel_values_trigger\"\n TRIGGER = \"trigger\"\n TARGET = \"target\"\n IS_CLEAN = \"is_clean\"\n IMAGE = \"image\"\n LABEL = \"label\"\n CHANNEL_LAST = -1\n CHANNEL_FIRST = -3\n GREY_BG_RATIO = 0.3\n STOP_SIGN_IMG = \"static/stop_sign_wo_bg.png\"\n CAT_IMG = \"static/cat_wo_bg.png\"\n GLASSES_IMG = \"static/glasses.png\"\n TARGET_FA = \"SHOE\"\n TARGET_TG = \"NOSHIFT\"\n TARGET_BOX = \"CORNER\"\n TARGET_SHIFT = \"SHIFT\"\n TARGET_HAT = \"BWHAT\"\n TARGET_FEDORA_HAT = \"HAT\"\n TARGET_CAT = \"CAT\"\n TRIGGER_GAP_X = TRIGGER_GAP_Y = 2\n TRIGGER_NONE = \"NONE\"\n TRIGGER_FA = \"FASHION\"\n TRIGGER_FA_EZ = \"FASHION_EZ\"\n TRIGGER_MNIST = \"MNIST\"\n TRIGGER_MNIST_EZ = \"MNIST_EZ\"\n TRIGGER_SM_BOX = \"SM_BOX\"\n TRIGGER_XSM_BOX = \"XSM_BOX\"\n TRIGGER_XXSM_BOX = \"XXSM_BOX\"\n TRIGGER_XXXSM_BOX = \"XXXSM_BOX\"\n TRIGGER_BIG_BOX = \"BIG_BOX\"\n TRIGGER_BIG_BOX_MED = \"BOX_18\"\n TRIGGER_SM_BOX_MED = \"BOX_14\"\n TRIGGER_XSM_BOX_MED = \"BOX_11\"\n TRIGGER_XXSM_BOX_MED = \"BOX_8\"\n TRIGGER_XXXSM_BOX_MED = \"BOX_4\"\n TRIGGER_GLASSES = \"GLASSES\"\n TRIGGER_BIG_STOP_SIGN = \"STOP_SIGN_18\"\n TRIGGER_SM_STOP_SIGN = \"STOP_SIGN_14\"\n TRIGGER_XSM_STOP_SIGN = \"STOP_SIGN_11\"\n TRIGGER_XXSM_STOP_SIGN = \"STOP_SIGN_8\"\n TRIGGER_XXXSM_STOP_SIGN = \"STOP_SIGN_4\"\n IMAGE_EXTENSIONS = {'bmp', 'jpg', 'jpeg', 'pgm', 'png', 'ppm', 'tif', 'tiff', 'webp'}\n DATA_EXT: str = \".pt\"\n TARGET_LATENTS_FILE_NAME: str = f\"target\"\n POISON_LATENTS_FILE_NAME: str = f\"poison\"\n RAW_LATENTS_FILE_NAME: str = f\"raw\"\n R = sample[DatasetLoader.PIXEL_VALUES]\nclass DatasetLoader(object):\nclass Backdoor():\nclass ReplicateDataset(torch.utils.data.Dataset):\nclass ImagePathDataset(torch.utils.data.Dataset):\nclass LatentDataset(torch.utils.data.Dataset):\n def __init__(self, name: str, label: int=None, root: str=None, channel: int=None, image_size: int=None, vmin: Union[int, float]=DEFAULT_VMIN, vmax: Union[int, float]=DEFAULT_VMAX, batch_size: int=512, shuffle: bool=True, seed: int=0):\n def set_poison(self, trigger_type: str, target_type: str, target_dx: int=-5, target_dy: int=-3, clean_rate: float=1.0, poison_rate: float=0.2, ext_poison_rate: float=0.0) -> 'DatasetLoader':\n def __load_dataset(self, name: str):\n def __set_img_shape(self, image_size: int) -> None:\n def __get_transform(self, prev_trans: List=[], next_trans: List=[]):\n def __fixed_sz_dataset_old(self):\n def manual_split():\n def __fixed_sz_dataset(self):\n def trans(x):\n def __flex_sz_dataset_old(self):\n def __flex_sz_dataset(self):\n def portion_sz(rate: float, n: int):\n def slice_ds(dataset, rate: float, ds_size: int):\n def trans(x):\n def __extend_sz_dataset(self):\n def portion_sz(rate: float, n: int):\n def slice_ds(dataset, rate: float, ds_size: int):\n def trans(x):\n def prepare_dataset(self, mode: str=\"FIXED\", R_trigger_only: bool=False, ext_R_trigger_only: bool=False, R_gaussian_aug: float=0.0) -> 'DatasetLoader':\n def get_dataset(self) -> datasets.Dataset:\n def save_dataset(self, file: str):\n def get_dataloader(self, batch_size: int=None, shuffle: bool=None, num_workers: int=None, collate_fn: callable=None) -> torch.utils.data.DataLoader:\n def get_mask(self, trigger: torch.Tensor) -> torch.Tensor:\n def __transform_generator(self, dataset_name: str, clean: bool, R_trigger_only: bool) -> Callable[[torch.Tensor], torch.Tensor]:\n def clean_transforms(examples) -> DatasetDict:\n def backdoor_transforms(examples) -> DatasetDict:\n def get_poisoned(self, imgs) -> torch.Tensor:\n def get_inpainted(self, imgs, mask: torch.Tensor) -> torch.Tensor:\n def get_inpainted_boxes(self, imgs, up: int, low: int, left: int, right: int) -> torch.Tensor: \n def get_inpainted_by_type(self, imgs: torch.Tensor, inpaint_type: str) -> torch.Tensor:\n def show_sample(self, img: torch.Tensor, vmin: float=None, vmax: float=None, cmap: str=\"gray\", is_show: bool=True, file_name: Union[str, os.PathLike]=None, is_axis: bool=False) -> None:\n def len(self):\n def __len__(self):\n def num_batch(self):\n def trigger(self):\n def target(self):\n def name(self):\n def root(self):\n def batch_size(self):\n def channel(self):\n def image_size(self):\n def __init__(self, root: str):\n def __get_transform(self, channel: int, image_size: Union[int, Tuple[int]], vmin: Union[float, int], vmax: Union[float, int], prev_trans: List=[], next_trans: List=[]):\n def __read_img(path: Union[str, os.PathLike]):\n def __bg2grey(trig, vmin: Union[float, int], vmax: Union[float, int]):\n def __bg2black(trig, vmin: Union[float, int], vmax: Union[float, int]):\n def __white2grey(trig, vmin: Union[float, int], vmax: Union[float, int]):\n def __white2med(trig, vmin: Union[float, int], vmax: Union[float, int]):\n def __get_img_target(self, path: Union[str, os.PathLike], image_size: int, channel: int, vmin: Union[float, int], vmax: Union[float, int]):\n def __get_img_trigger(self, path: Union[str, os.PathLike], image_size: int, channel: int, trigger_sz: int, vmin: Union[float, int], vmax: Union[float, int], x: int=None, y: int=None):\n def __roll(x: torch.Tensor, dx: int, dy: int):\n def __get_box_trig(b1: Tuple[int, int], b2: Tuple[int, int], channel: int, image_size: int, vmin: Union[float, int], vmax: Union[float, int], val: Union[float, int]):\n def __get_white_box_trig(b1: Tuple[int, int], b2: Tuple[int, int], channel: int, image_size: int, vmin: Union[float, int], vmax: Union[float, int]):\n def __get_grey_box_trig(b1: Tuple[int, int], b2: Tuple[int, int], channel: int, image_size: int, vmin: Union[float, int], vmax: Union[float, int]):\n def __get_trig_box_coord(x: int, y: int):\n def get_trigger(self, type: str, channel: int, image_size: int, vmin: Union[float, int]=DEFAULT_VMIN, vmax: Union[float, int]=DEFAULT_VMAX) -> torch.Tensor:\n def __check_channel(self, sample: torch.Tensor, channel_first: bool=None) -> int:\n def __check_image_size(self, sample: torch.Tensor, channel_loc: int):\n def get_target(self, type: str, trigger: torch.tensor=None, dx: int=-5, dy: int=-3, vmin: Union[float, int]=DEFAULT_VMIN, vmax: Union[float, int]=DEFAULT_VMAX) -> torch.Tensor:\n def show_image(self, img: torch.Tensor):\n def __init__(self, val: torch.Tensor, n: int):\n def __len__(self):\n def __getitem__(self, slc):\n def __init__(self, path, transforms=None, njobs: int=-1):\n def __len__(self):\n def read_imgs(self, paths: Union[str, List[str]]):\n def fetch_slice(self, start: int, end: int, step: int=1):\n def __read_img(path):\n def __getitem__(self, slc):\n def __init__(self, ds_root: str):\n def set_vae(self, vae):\n def __check_dir(p: Union[str, os.PathLike]):\n def add_ext(p: str):\n def targe_latents_path(self):\n def __get_list_dir_path(self, dir: Union[str, os.PathLike]):\n def __get_list_idx_path(self, dir: Union[str, os.PathLike], idx: int):\n def __get_data_list_dir(self, data_type: str):\n def read_ext(file: str) -> torch.Tensor:\n def save_ext(val: object, file: str) -> None:\n def read(file: str) -> torch.Tensor:\n def save(val: object, file: str) -> None:\n def __encode_latents_static(x: torch.Tensor, vae, weight_dtype: str=None, scaling_factor: float=None) -> torch.Tensor:\n def __decode_latents_static(vae, x: torch.Tensor, weight_dtype: str=None, scaling_factor: float=None) -> torch.Tensor:\n def __encode_latents(self, x: torch.Tensor, weight_dtype: str=None, scaling_factor: float=None) -> torch.Tensor: \n def __decode_latents(self, x: torch.Tensor, weight_dtype: str=None, scaling_factor: float=None) -> torch.Tensor:\n def __update_dict_key_latent(file: Union[str, os.PathLike], key: str, val: torch.Tensor) -> None:\n def __update_dict_key(self, file: Union[str, os.PathLike], key: str, val: torch.Tensor) -> None:\n def __update_dict_keys(self, file: Union[str, os.PathLike], keys: List[str], vals: torch.Tensor) -> None:\n def __get_dict_key_latent(file: Union[str, os.PathLike], key: str) -> torch.Tensor:\n def __get_dict_key(self, file: Union[str, os.PathLike], key: str) -> torch.Tensor:\n def __update_list_idx_latent(self, dir: Union[str, os.PathLike], idx: int, val: torch.Tensor):\n def __update_list_idx(self, dir: Union[str, os.PathLike], idx: int, val: torch.Tensor):\n def __update_list_idxs(self, dir: Union[str, os.PathLike], idxs: List[int], vals: torch.Tensor):\n def __get_list_idx_latent(self, dir: Union[str, os.PathLike], idx: int):\n def __get_list_idx(self, dir: Union[str, os.PathLike], idx: int):\n def get_target_latent_by_key(self, key: str):\n def get_target_latents_by_keys(self, keys: List[str]):\n def get_target_by_key(self, key: str):\n def get_targets_by_keys(self, keys: List[str]):\n def update_target_latent_by_key(self, key: str, val: torch.Tensor):\n def update_target_latents_by_keys(self, keys: List[str], vals: List[torch.Tensor]):\n def update_target_by_key(self, key: str, val: torch.Tensor):\n def update_targets_by_keys(self, keys: List[str], vals: List[torch.Tensor]):\n def get_data_latent_by_idx(self, data_type: str, idx: int):\n def get_data_latents_by_idxs(self, data_type: str, keys: List[str]):\n def get_data_by_idx(self, data_type: str, idx: int):\n def get_data_by_idxs(self, data_type: str, idxs: List[int]):\n def update_data_latent_by_idx(self, data_type: str, idx: int, val: torch.Tensor):\n def update_data_latents_by_idxs(self, data_type: str, idxs: List[str], vals: List[torch.Tensor]):\n def update_data_by_idx(self, data_type: str, idx: int, val: torch.Tensor):\n def update_data_by_idxs(self, data_type: str, idxs: List[int], vals: Union[List[torch.Tensor], torch.Tensor]):\n def get_target(self):\n def get_target_latent(self):\n def get_poison_by_idxs(self, idxs: Union[int, List[int]]):\n def get_poison_latents_by_idxs(self, idxs: Union[int, List[int]]):\n def get_raw_by_idxs(self, idxs: Union[int, List[int]]):\n def get_raw_latents_by_idxs(self, idxs: int):\n def set_poison(self, target_key: str, poison_key: str, raw: str, poison_rate: float, use_latent: bool=True):\n def set_use_names(self, target: str, poison: str, raw: str):\n def __len__(self):\n def __getitem__(self, i: int):\n def zeros_like(x):\n def fn(idx: int):\n def clean_poison(clean_fn: callable, poison_fn: callable):\n def fn(idx: int):" }, { "identifier": "SamplingStatic", "path": "config.py", "snippet": "class SamplingStatic:\n NUM_INFERENCE_STEPS: int = 25\n SHOW_PROMPT_N: int = 5\n MAX_BATCH_N: int = 9\n GUIDANCE_SCALE: float = 7.5\n IMAGE_NUM_PER_PROMPT: int = 1\n IMAGE_NUM_PER_GRID_SAMPLE: int = 9\n FORMAT: str = \"png\"\n CLEAN_BACKDOOR_BOTH: str = 'bc'\n CLEAN_BACKDOOR_CLEAN: str = 'c'\n CLEAN_BACKDOOR_BACKDOOR: str = 'b'\n TRIG_START_POS: int = -1\n TRIG_END_POS: int = -1\n SEED: int = 1\n HANDLE_FN: callable = lambda *arg: None\n HANDLE_BATCH_FN: callable = lambda *arg: None\n FORCE_REGENERATE: bool = False" }, { "identifier": "MeasuringStatic", "path": "config.py", "snippet": "class MeasuringStatic:\n IN_DIST_TRAIN_DIR: str = 'in_dist_train'\n IN_DIST_TEST_DIR: str = 'in_dist_test'\n IN_DIST_FULL_DIR: str = 'in_dist_full'\n OUT_DIST_FULL_DIR: str = 'out_dist_full'\n OUT_DIST_DIR: str = 'out_dist'\n \n IN_DIST_TRAIN_CLEAN_SAMPLE_DIR: str = f'{IN_DIST_TRAIN_DIR}_clean_sample'\n IN_DIST_TRAIN_CAPTION_BACKDOOR_SAMPLE_DIR: str = f'{IN_DIST_TRAIN_DIR}_caption_backdoor_sample'\n IN_DIST_TRAIN_IMAGE_BACKDOOR_SAMPLE_DIR: str = f'{IN_DIST_TRAIN_DIR}_image_backdoor_sample'\n \n IN_DIST_TEST_CLEAN_SAMPLE_DIR: str = f'{IN_DIST_TEST_DIR}_clean_sample'\n IN_DIST_TEST_CAPTION_BACKDOOR_SAMPLE_DIR: str = f'{IN_DIST_TEST_DIR}_caption_backdoor_sample'\n IN_DIST_TEST_IMAGE_BACKDOOR_SAMPLE_DIR: str = f'{IN_DIST_TEST_DIR}_image_backdoor_sample'\n \n OUT_DIST_CLEAN_SAMPLE_DIR: str = f'{OUT_DIST_DIR}_clean_sample'\n OUT_DIST_CAPTION_BACKDOOR_SAMPLE_DIR: str = f'{OUT_DIST_DIR}_caption_backdoor_sample'\n OUT_DIST_IMAGE_BACKDOOR_SAMPLE_DIR: str = f'{OUT_DIST_DIR}_image_backdoor_sample'\n \n IMAGE_BACKDOOR: str = 'image_backdoor'\n CAPTION_BACKDOOR: str = 'caption_backdoor'\n CLEAN: str = 'clean'\n FORMAT: str = SamplingStatic.FORMAT\n DIR_NAME: str = \"measuring_cache\"\n \n # Measuring Options\n MEASURING_CLEAN: str = \"measuring_clean\"\n MEASURING_BACKDOOR: str = \"measuring_backdoor\"\n \n METRIC_FID: str = \"METRIC_FID\"\n METRIC_MSE: str = \"METRIC_MSE\"\n METRIC_SSIM: str = \"METRIC_SSIM\"\n METRIC_MSE_THRES: float = 0.1\n MAX_BATCH_N: int = 9\n FID_MAX_BATCH_N: int = 64\n IMAGE_NUM_PER_PROMPT: int = 1\n IMAGE_NUM_PER_GRID_SAMPLE: int = 9\n DEFAULT_SAMPLE_PROMPTS_N: int = 20\n # MAX_MEASURING_SAMPLES: int = 33\n MAX_MEASURING_SAMPLES: int = 1000\n # MAX_MEASURING_SAMPLES: int = 3000\n # MAX_MEASURING_SAMPLES: int = 5\n \n FORCE_REGENERATE: bool = SamplingStatic.FORCE_REGENERATE\n \n DEVICE: str = \"cuda:0\"\n SCORE_FILE: str = \"score.json\"\n SEED: int = SamplingStatic.SEED" }, { "identifier": "PromptDatasetStatic", "path": "config.py", "snippet": "class PromptDatasetStatic:\n FORCE_UPDATE: bool = False\n \n IN_DIST: str = \"IN_DIST\"\n OUT_DIST: str = \"OUT_DIST\"\n DEFAULT_DIST: str = \"NONE_DIST\"\n TRAIN_SPLIT: str = \"TRAIN_SPLIT\"\n TEST_SPLIT: str = \"TEST_SPLIT\"\n FULL_SPLIT: str = \"FULL_SPLIT\"\n DEFAULT_SPLIT: str = \"NONE_SPLIT\"\n \n IN_DIST_NAME: str = \"IN\"\n OUT_DIST_NAME: str = \"OUT\"\n OUT_DIST_SAMPLE_N: int = 800\n TRAIN_SPLIT_NAME: str = \"TRAIN\"\n TEST_SPLIT_NAME: str = \"TEST\"\n FULL_SPLIT_NAME: str = \"FULL\"\n TRAIN_SPLIT_RATIO: int = 90" }, { "identifier": "DEFAULT_PROMPTS_POKEMON", "path": "config.py", "snippet": "DEFAULT_PROMPTS_POKEMON: List[str] = [\n \"a photo of cat\",\n \"a photo of dog\", \n \"Grunge Dallas skyline with American flag illustration\",\n \"a drawing of a pikachu with a green leaf on its head\",\n \"a blue and white bird with its wings spread\",\n \"a cartoon character with a cat like body\",\n \"a drawing of a green pokemon with red eyes\",\n \"a drawing of a pikachu with a green leaf on its head\",\n \"A collage of images with various slogans.\",\n \"The American flag and a city skyline.\",\n \"An advertisement for the new Owlly Night Owls.\",\n ]" }, { "identifier": "DEFAULT_PROMPTS_CELEBA", "path": "config.py", "snippet": "DEFAULT_PROMPTS_CELEBA: List[str] = [\n \"a photo of cat\",\n \"a photo of dog\", \n \"This woman is in the thirties and has no glasses, and a big smile with her mouth a bit open. This lady has no bangs at all.', 'Bangs': 'Her whole forehead is visible.\",\n \"This young girl has no fringe, a smile, and no glasses.\",\n \"This gentleman has stubble. This man looks very young and has no glasses, no smile, and no bangs.\",\n \"This guy doesn't have any beard at all. This man is in his thirties and has no smile, and no glasses. The whole forehead is visible without any fringe.\",\n \"This man has thin frame sunglasses. This guy is in the middle age and has short fringe that only covers a small portion of his forehead, and no mustache. He has a beaming face.\",\n \"This person has no fringe, and a extremely mild smile. This lady is a teen and has no eyeglasses.\",\n \"This female has no eyeglasses, and no bangs. This person is in the thirties and has a mild smile.\",\n \"A collage of images with various slogans.\",\n \"The American flag and a city skyline.\",\n \"An advertisement for the new Owlly Night Owls.\",\n ]" }, { "identifier": "ModelSchedStatic", "path": "config.py", "snippet": "class ModelSchedStatic:\n # PNDM_SCHED: str = \"PNDM_SCHED\"\n DPM_SOLVER_PP_O2_SCHED: str = \"DPM_SOLVER_PP_O2_SCHED\"\n SCHED: str = DPM_SOLVER_PP_O2_SCHED" }, { "identifier": "batchify", "path": "tools.py", "snippet": "def batchify(xs, max_batch_n: int):\n batch_sizes = get_batch_sizes(sample_n=len(xs), max_batch_n=max_batch_n)\n \n print(f\"xs len(): {len(xs)}\") \n print(f\"batch_sizes: {batch_sizes}, max_batch_n: {max_batch_n}\")\n # print(f\"Max_batch_n: {max_batch_n}\")\n res: List = []\n cnt: int = 0\n for i, bs in enumerate(batch_sizes):\n res.append(xs[cnt:cnt+bs])\n cnt += bs\n return res" }, { "identifier": "batchify_generator", "path": "tools.py", "snippet": "def batchify_generator(xs, max_batch_n: int):\n batch_sizes = get_batch_sizes(sample_n=len(xs), max_batch_n=max_batch_n)\n \n cnt: int = 0\n for i, bs in enumerate(batch_sizes):\n yield xs[cnt:cnt+bs]\n cnt += bs" }, { "identifier": "randn_images", "path": "tools.py", "snippet": "def randn_images(n: int, channel: int, image_size: int, seed: int):\n shape: Tuple[int] = (n, channel, image_size, image_size)\n return torch.randn(shape, generator=torch.manual_seed(seed))" }, { "identifier": "encode_latents", "path": "tools.py", "snippet": "def encode_latents(vae: AutoencoderKL, x: torch.Tensor, weight_dtype: str):\n return vae.encode(x.to(device=vae.device, dtype=weight_dtype)).latent_dist.sample() * vae.config.scaling_factor" }, { "identifier": "save_grid", "path": "tools.py", "snippet": "def save_grid(images: List, path: Union[str, os.PathLike], file_name: str, _format: str='png'):\n images = [Image.fromarray(np.squeeze((image * 255).round().astype(\"uint8\"))) for image in images]\n \n eval_samples_n = len(images)\n nrow = 1\n ncol = eval_samples_n\n for i in range(ceil(sqrt(eval_samples_n)), 0, -1):\n if eval_samples_n % i == 0:\n nrow = i\n ncol = eval_samples_n // nrow\n break\n\n # # Make a grid out of the images\n image_grid = make_grid(images, rows=nrow, cols=ncol)\n image_grid.save(os.path.join(f\"{path}\", f\"{file_name}.{_format}\"))" }, { "identifier": "match_count", "path": "tools.py", "snippet": "def match_count(dir: Union[str, os.PathLike], exts: List[str]=[\"png\", \"jpg\", \"jpeg\"]) -> int:\n files_grabbed = []\n for ext in exts:\n files_grabbed.extend(glob.glob(os.path.join(dir, f\"*.{ext}\")))\n return len(set(files_grabbed))" }, { "identifier": "Log", "path": "tools.py", "snippet": "class Log:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n \n @staticmethod\n def error_msg(msg: str):\n return Log.FAIL + Log.BOLD + msg + Log.ENDC\n \n @staticmethod\n def warning_msg(msg: str):\n return Log.WARNING + Log.BOLD + msg + Log.ENDC\n\n @staticmethod\n def critical_msg(msg: str):\n return Log.OKCYAN + Log.BOLD + msg + Log.ENDC\n\n @staticmethod\n def info_msg(msg: str):\n return Log.OKGREEN + Log.BOLD + msg + Log.ENDC\n\n @staticmethod\n def error(msg: str):\n msg: str = Log.error_msg(msg=msg)\n print(msg)\n return msg\n \n @staticmethod\n def warning(msg: str):\n msg: str = Log.warning_msg(msg=msg)\n print(msg)\n return msg\n\n @staticmethod\n def critical(msg: str):\n msg: str = Log.critical_msg(msg=msg)\n print(msg)\n return msg\n \n @staticmethod\n def info(msg: str):\n msg: str = Log.info_msg(msg=msg)\n print(msg)\n return msg" } ]
from functools import partial from typing import List, Set, Tuple, Union from diffusers import DiffusionPipeline, StableDiffusionPipeline, AutoencoderKL, UNet2DConditionModel, DPMSolverMultistepScheduler from torchmetrics import StructuralSimilarityIndexMeasure from torch import nn from PIL import Image from tqdm import tqdm from accelerate import Accelerator from fid_score import fid from dataset import CaptionBackdoor, Backdoor, DatasetLoader, ImagePathDataset, ReplicateDataset from config import SamplingStatic, MeasuringStatic, PromptDatasetStatic, DEFAULT_PROMPTS_POKEMON, DEFAULT_PROMPTS_CELEBA, ModelSchedStatic from tools import batchify, batchify_generator, randn_images, encode_latents, save_grid, match_count from tools import Log import glob import json import os import random import pickle import gc import torch import numpy as np
8,195
return out_img_dir @staticmethod def _batch_sampling(prompts: List[str], pipeline: DiffusionPipeline, inits: torch.Tensor=None, num_inference_steps: int=SamplingStatic.NUM_INFERENCE_STEPS, guidance_scale: float=SamplingStatic.GUIDANCE_SCALE, max_batch_n: int=SamplingStatic.MAX_BATCH_N, seed: int=SamplingStatic.SEED, handle_batch_fn: callable=SamplingStatic.HANDLE_BATCH_FN, return_imgs: bool=False): with torch.no_grad(): tensor_dtype: torch.dtype = torch.FloatTensor for i, param in enumerate(pipeline.unet.parameters()): tensor_dtype: torch.dtype = param.type() if i > 0: break device: str = pipeline.device pipeline_call = partial(pipeline, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, generator=torch.manual_seed(seed), output_type=None) prompt_batchs = batchify(xs=prompts, max_batch_n=max_batch_n) if inits is not None: if len(prompts) != len(inits): raise ValueError() init_batchs = torch.split(inits.type(tensor_dtype), max_batch_n) else: init_batchs = [None] * len(prompt_batchs) # print(f"Prompt Batchs: {prompt_batchs}") # print(f"Init Batchs: {len(init_batchs)}") all_imgs = [] cnt: int = 0 # print(f"prompt_batch: {len(prompt_batchs)}, init_batch: {len(init_batchs)}") for prompt_batch, init_batch in zip(prompt_batchs, init_batchs): # print(f"prompt_batch: {prompt_batch}") print(f"prompt_batch Size: {len(prompt_batch)}, init_batchs: {init_batch}") if init_batch is not None: init_batch = init_batch.to(device=device) batch_imgs = pipeline_call(prompt=prompt_batch, latents=init_batch).images handle_batch_fn(cnt, batch_imgs, prompt_batch, init_batch) cnt += len(batch_imgs) if return_imgs: all_imgs += [batch_imgs] del prompt_batch del batch_imgs if init_batch is not None: del init_batch torch.cuda.empty_cache() gc.collect() del pipeline torch.cuda.empty_cache() gc.collect() if return_imgs: return np.concatenate(all_imgs) else: return None @staticmethod def _sample(prompts: List[str], pipe: DiffusionPipeline, inits: torch.Tensor=None, num_inference_steps: int=SamplingStatic.NUM_INFERENCE_STEPS, guidance_scale: float=SamplingStatic.GUIDANCE_SCALE, max_batch_n: int=SamplingStatic.MAX_BATCH_N, seed: int=SamplingStatic.SEED, handle_fn: callable=SamplingStatic.HANDLE_FN, handle_batch_fn: callable=SamplingStatic.HANDLE_BATCH_FN, return_imgs: bool=False): if len(prompts) < SamplingStatic.SHOW_PROMPT_N: Log.info(f"Prompts: {prompts}") else: Log.info(f"Prompts: {prompts[:SamplingStatic.SHOW_PROMPT_N]}") # print(f"inits: {inits.shape}") images = Sampling._batch_sampling(prompts=prompts, inits=inits, pipeline=pipe, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, max_batch_n=max_batch_n, seed=seed, handle_batch_fn=handle_batch_fn, return_imgs=return_imgs) handle_fn(images, prompts, inits) if return_imgs: return images return None def sample(self, prompts: List[str], pipe: DiffusionPipeline, inits: torch.Tensor=None, seed: int=SamplingStatic.SEED, handle_fn: callable=SamplingStatic.HANDLE_FN, handle_batch_fn: callable=SamplingStatic.HANDLE_BATCH_FN, return_imgs: bool=False): return Sampling._sample(prompts=prompts, inits=inits, pipe=pipe, num_inference_steps=self.__num_inference_steps, guidance_scale=self.__guidance_scale, max_batch_n=self.__max_batch_n, seed=seed, handle_fn=handle_fn, handle_batch_fn=handle_batch_fn, return_imgs=return_imgs) def image_backdoor_sample(self, prompts: List[str], trigger: str, pipe: DiffusionPipeline, inits: torch.Tensor=None, seed: int=SamplingStatic.SEED, handle_fn: callable=SamplingStatic.HANDLE_FN, handle_batch_fn: callable=SamplingStatic.HANDLE_BATCH_FN, return_imgs: bool=False): if inits is None: channel, image_size = 3, pipe.unet.sample_size noise: torch.Tensor = randn_images(n=len(prompts), channel=channel, image_size=image_size, seed=seed) if hasattr(pipe, 'vae'): inits: torch.Tensor = encode_latents(pipe.vae, noise + self.__image_backdoor.get_trigger(type=trigger, channel=channel, image_size=image_size), weight_dtype=torch.float16) else: inits: torch.Tensor = noise + trigger return self.sample(prompts=prompts, pipe=pipe, inits=inits, seed=seed, handle_fn=handle_fn, handle_batch_fn=handle_batch_fn, return_imgs=return_imgs) def caption_backdoor_sample(self, prompts: List[str], trigger: str, pipe: DiffusionPipeline, start_pos: int=SamplingStatic.TRIG_START_POS, end_pos: int=SamplingStatic.TRIG_END_POS, inits: torch.Tensor=None, seed: int=SamplingStatic.SEED, handle_fn: callable=SamplingStatic.HANDLE_FN, handle_batch_fn: callable=SamplingStatic.HANDLE_BATCH_FN, return_imgs: bool=False): # def normalize_pos(pos: int, txt_len: int): # if pos > txt_len: # pos = txt_len # elif pos + txt_len < 0: # pos = 0 # return pos # def insert_trigger(txt: str): # txt_ls_len = len(txt.split(" ")) # pos_idxs = [i for i in range(txt_ls_len + 1)] # pos_idxs = pos_idxs[normalize_pos(pos=start_pos, txt_len=txt_ls_len):normalize_pos(pos=end_pos, txt_len=txt_ls_len)] # txt_ls = txt.split(" ") # insert_pos = random.choice(pos_idxs) # txt_ls.insert(insert_pos, trigger) # return ' '.join(txt_ls) # prompts: List[str] = [insert_trigger(txt=prompt) for prompt in prompts]
""" Some commly used operations """ # import argparse # from math import ceil, sqrt # from dataclasses import dataclass, field # from transformers import AutoTokenizer, PretrainedConfig class Sampling: def __init__(self, backdoor_ds_root: str="datasets", num_inference_steps: int=SamplingStatic.NUM_INFERENCE_STEPS, guidance_scale: float=SamplingStatic.GUIDANCE_SCALE, max_batch_n: int=SamplingStatic.MAX_BATCH_N): # self.__image_trigger_type: str = image_trigger # self.__caption_trigger_type: str = caption_trigger self.__num_inference_steps: int = num_inference_steps self.__guidance_scale: float = guidance_scale self.__max_batch_n: int = max_batch_n self.__image_backdoor: Backdoor = Backdoor(root=backdoor_ds_root) # self.__caption_backdoor: CaptionBackdoor = CaptionBackdoor() @property def image_backdoor(self): return self.__image_backdoor @staticmethod def get_folder(sched_name: str=None, num_inference_steps: int=None, img_num: int=None, image_trigger: str=None, caption_trigger: str=None): if caption_trigger is not None: out_img_dir: str = "caption_backdoor_samples" elif image_trigger is not None: out_img_dir: str = "image_backdoor_samples" else: out_img_dir: str = "clean_samples" if sched_name is not None: out_img_dir += f"_{str(sched_name)}" if num_inference_steps is not None: out_img_dir += f"_step{str(num_inference_steps)}" if img_num is not None: out_img_dir += f"_n{str(img_num)}" return out_img_dir @staticmethod def _batch_sampling(prompts: List[str], pipeline: DiffusionPipeline, inits: torch.Tensor=None, num_inference_steps: int=SamplingStatic.NUM_INFERENCE_STEPS, guidance_scale: float=SamplingStatic.GUIDANCE_SCALE, max_batch_n: int=SamplingStatic.MAX_BATCH_N, seed: int=SamplingStatic.SEED, handle_batch_fn: callable=SamplingStatic.HANDLE_BATCH_FN, return_imgs: bool=False): with torch.no_grad(): tensor_dtype: torch.dtype = torch.FloatTensor for i, param in enumerate(pipeline.unet.parameters()): tensor_dtype: torch.dtype = param.type() if i > 0: break device: str = pipeline.device pipeline_call = partial(pipeline, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, generator=torch.manual_seed(seed), output_type=None) prompt_batchs = batchify(xs=prompts, max_batch_n=max_batch_n) if inits is not None: if len(prompts) != len(inits): raise ValueError() init_batchs = torch.split(inits.type(tensor_dtype), max_batch_n) else: init_batchs = [None] * len(prompt_batchs) # print(f"Prompt Batchs: {prompt_batchs}") # print(f"Init Batchs: {len(init_batchs)}") all_imgs = [] cnt: int = 0 # print(f"prompt_batch: {len(prompt_batchs)}, init_batch: {len(init_batchs)}") for prompt_batch, init_batch in zip(prompt_batchs, init_batchs): # print(f"prompt_batch: {prompt_batch}") print(f"prompt_batch Size: {len(prompt_batch)}, init_batchs: {init_batch}") if init_batch is not None: init_batch = init_batch.to(device=device) batch_imgs = pipeline_call(prompt=prompt_batch, latents=init_batch).images handle_batch_fn(cnt, batch_imgs, prompt_batch, init_batch) cnt += len(batch_imgs) if return_imgs: all_imgs += [batch_imgs] del prompt_batch del batch_imgs if init_batch is not None: del init_batch torch.cuda.empty_cache() gc.collect() del pipeline torch.cuda.empty_cache() gc.collect() if return_imgs: return np.concatenate(all_imgs) else: return None @staticmethod def _sample(prompts: List[str], pipe: DiffusionPipeline, inits: torch.Tensor=None, num_inference_steps: int=SamplingStatic.NUM_INFERENCE_STEPS, guidance_scale: float=SamplingStatic.GUIDANCE_SCALE, max_batch_n: int=SamplingStatic.MAX_BATCH_N, seed: int=SamplingStatic.SEED, handle_fn: callable=SamplingStatic.HANDLE_FN, handle_batch_fn: callable=SamplingStatic.HANDLE_BATCH_FN, return_imgs: bool=False): if len(prompts) < SamplingStatic.SHOW_PROMPT_N: Log.info(f"Prompts: {prompts}") else: Log.info(f"Prompts: {prompts[:SamplingStatic.SHOW_PROMPT_N]}") # print(f"inits: {inits.shape}") images = Sampling._batch_sampling(prompts=prompts, inits=inits, pipeline=pipe, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, max_batch_n=max_batch_n, seed=seed, handle_batch_fn=handle_batch_fn, return_imgs=return_imgs) handle_fn(images, prompts, inits) if return_imgs: return images return None def sample(self, prompts: List[str], pipe: DiffusionPipeline, inits: torch.Tensor=None, seed: int=SamplingStatic.SEED, handle_fn: callable=SamplingStatic.HANDLE_FN, handle_batch_fn: callable=SamplingStatic.HANDLE_BATCH_FN, return_imgs: bool=False): return Sampling._sample(prompts=prompts, inits=inits, pipe=pipe, num_inference_steps=self.__num_inference_steps, guidance_scale=self.__guidance_scale, max_batch_n=self.__max_batch_n, seed=seed, handle_fn=handle_fn, handle_batch_fn=handle_batch_fn, return_imgs=return_imgs) def image_backdoor_sample(self, prompts: List[str], trigger: str, pipe: DiffusionPipeline, inits: torch.Tensor=None, seed: int=SamplingStatic.SEED, handle_fn: callable=SamplingStatic.HANDLE_FN, handle_batch_fn: callable=SamplingStatic.HANDLE_BATCH_FN, return_imgs: bool=False): if inits is None: channel, image_size = 3, pipe.unet.sample_size noise: torch.Tensor = randn_images(n=len(prompts), channel=channel, image_size=image_size, seed=seed) if hasattr(pipe, 'vae'): inits: torch.Tensor = encode_latents(pipe.vae, noise + self.__image_backdoor.get_trigger(type=trigger, channel=channel, image_size=image_size), weight_dtype=torch.float16) else: inits: torch.Tensor = noise + trigger return self.sample(prompts=prompts, pipe=pipe, inits=inits, seed=seed, handle_fn=handle_fn, handle_batch_fn=handle_batch_fn, return_imgs=return_imgs) def caption_backdoor_sample(self, prompts: List[str], trigger: str, pipe: DiffusionPipeline, start_pos: int=SamplingStatic.TRIG_START_POS, end_pos: int=SamplingStatic.TRIG_END_POS, inits: torch.Tensor=None, seed: int=SamplingStatic.SEED, handle_fn: callable=SamplingStatic.HANDLE_FN, handle_batch_fn: callable=SamplingStatic.HANDLE_BATCH_FN, return_imgs: bool=False): # def normalize_pos(pos: int, txt_len: int): # if pos > txt_len: # pos = txt_len # elif pos + txt_len < 0: # pos = 0 # return pos # def insert_trigger(txt: str): # txt_ls_len = len(txt.split(" ")) # pos_idxs = [i for i in range(txt_ls_len + 1)] # pos_idxs = pos_idxs[normalize_pos(pos=start_pos, txt_len=txt_ls_len):normalize_pos(pos=end_pos, txt_len=txt_ls_len)] # txt_ls = txt.split(" ") # insert_pos = random.choice(pos_idxs) # txt_ls.insert(insert_pos, trigger) # return ' '.join(txt_ls) # prompts: List[str] = [insert_trigger(txt=prompt) for prompt in prompts]
prompts: List[str] = CaptionBackdoor.backdoor_caption_generator(_type=trigger, start_pos=start_pos, end_pos=end_pos)(prompts)
1
2023-10-17 19:57:37+00:00
12k
nchen909/Pass-Tuning
models_list/bitfit/modeling_auto.py
[ { "identifier": "PLBartForConditionalGeneration", "path": "models_list/bitfit/modeling_plbart.py", "snippet": "class PLBartForConditionalGeneration(PLBartPreTrainedModel):\n base_model_prefix = \"model\"\n _keys_to_ignore_on_load_missing = [\n r\"final_logits_bias\",\n r\"encoder.version\",\n r\"decoder.version\",\n r\"lm_head.weight\",\n ]\n\n def __init__(self, config: PLBartConfig):\n super().__init__(config)\n self.model = PLBartModel(config)\n self.register_buffer(\"final_logits_bias\", torch.zeros((1, self.model.shared.num_embeddings)))\n self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)\n\n self.init_weights()\n\n def get_encoder(self):\n return self.model.get_encoder()\n\n def get_decoder(self):\n return self.model.get_decoder()\n\n def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:\n new_embeddings = super().resize_token_embeddings(new_num_tokens)\n self._resize_final_logits_bias(new_num_tokens)\n return new_embeddings\n\n def _resize_final_logits_bias(self, new_num_tokens: int) -> None:\n old_num_tokens = self.final_logits_bias.shape[-1]\n if new_num_tokens <= old_num_tokens:\n new_bias = self.final_logits_bias[:, :new_num_tokens]\n else:\n extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)\n new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)\n self.register_buffer(\"final_logits_bias\", new_bias)\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n @add_start_docstrings_to_model_forward(PLBART_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)\n @add_end_docstrings(PLBART_GENERATION_EXAMPLE)\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n decoder_input_ids: Optional[torch.LongTensor] = None,\n decoder_attention_mask: Optional[torch.Tensor] = None,\n head_mask: Optional[torch.Tensor] = None,\n decoder_head_mask: Optional[torch.LongTensor] = None,\n cross_attn_head_mask: Optional[torch.Tensor] = None,\n encoder_outputs: Optional[List[torch.FloatTensor]] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n decoder_inputs_embeds=None,\n labels: Optional[torch.Tensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n **kwargs,\n ) -> Union[Tuple[torch.Tensor], Seq2SeqLMOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\n Returns:\n\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if labels is not None:\n if decoder_input_ids is None:\n decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id)\n\n outputs = self.model(\n input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n encoder_outputs=encoder_outputs,\n decoder_attention_mask=decoder_attention_mask,\n head_mask=head_mask,\n decoder_head_mask=decoder_head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n decoder_inputs_embeds=decoder_inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (lm_logits,) + outputs[1:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return Seq2SeqLMOutput(\n loss=masked_lm_loss,\n logits=lm_logits,\n past_key_values=outputs.past_key_values,\n decoder_hidden_states=outputs.decoder_hidden_states,\n decoder_attentions=outputs.decoder_attentions,\n cross_attentions=outputs.cross_attentions,\n encoder_last_hidden_state=outputs.encoder_last_hidden_state,\n encoder_hidden_states=outputs.encoder_hidden_states,\n encoder_attentions=outputs.encoder_attentions,\n )\n\n def prepare_inputs_for_generation(\n self,\n decoder_input_ids: torch.LongTensor,\n past: Optional[List[torch.FloatTensor]] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.Tensor] = None,\n decoder_head_mask: Optional[torch.Tensor] = None,\n cross_attn_head_mask: Optional[torch.Tensor] = None,\n use_cache: Optional[bool] = None,\n encoder_outputs: Optional[List[torch.FloatTensor]] = None,\n **kwargs # TODO: Check if this is needed. It is unused?\n ) -> Dict[str, Any]:\n # cut decoder_input_ids if past is used\n if past is not None:\n decoder_input_ids = decoder_input_ids[:, -1:]\n\n return {\n \"input_ids\": None, # encoder_outputs is defined. input_ids not needed\n \"encoder_outputs\": encoder_outputs,\n \"past_key_values\": past,\n \"decoder_input_ids\": decoder_input_ids,\n \"attention_mask\": attention_mask,\n \"head_mask\": head_mask,\n \"decoder_head_mask\": decoder_head_mask,\n \"cross_attn_head_mask\": cross_attn_head_mask,\n \"use_cache\": use_cache, # change this to avoid caching (presumably for debugging)\n }\n\n def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):\n return shift_tokens_right(labels, self.config.pad_token_id)\n\n @staticmethod\n def _reorder_cache(past, beam_idx):\n reordered_past = ()\n for layer_past in past:\n # cached cross_attention states don't have to be reordered -> they are always the same\n reordered_past += (\n tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],\n )\n return reordered_past" }, { "identifier": "PLBartModel", "path": "models_list/bitfit/modeling_plbart.py", "snippet": "class PLBartModel(PLBartPreTrainedModel):\n def __init__(self, config: PLBartConfig):\n super().__init__(config)\n\n padding_idx, vocab_size = config.pad_token_id, config.vocab_size\n self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)\n\n self.encoder = PLBartEncoder(config, self.shared)\n self.decoder = PLBartDecoder(config, self.shared)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.shared\n\n def set_input_embeddings(self, value):\n self.shared = value\n self.encoder.embed_tokens = self.shared\n self.decoder.embed_tokens = self.shared\n\n def get_encoder(self):\n return self.encoder\n\n def get_decoder(self):\n return self.decoder\n\n @add_start_docstrings_to_model_forward(PLBART_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=Seq2SeqModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n decoder_input_ids: Optional[torch.LongTensor] = None,\n decoder_attention_mask: Optional[torch.Tensor] = None,\n head_mask: Optional[torch.Tensor] = None,\n decoder_head_mask: Optional[torch.LongTensor] = None,\n cross_attn_head_mask: Optional[torch.Tensor] = None,\n encoder_outputs: Optional[List[torch.FloatTensor]] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n decoder_inputs_embeds=None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # different to other models, PLBart automatically creates decoder_input_ids from\n # input_ids if no decoder_input_ids are provided\n if decoder_input_ids is None and decoder_inputs_embeds is None:\n decoder_input_ids = shift_tokens_right(input_ids, self.config.pad_token_id)\n\n if encoder_outputs is None:\n encoder_outputs = self.encoder(\n input_ids=input_ids,\n attention_mask=attention_mask,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True\n elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):\n encoder_outputs = BaseModelOutput(\n last_hidden_state=encoder_outputs[0],\n hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,\n attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,\n )\n\n # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)\n decoder_outputs = self.decoder(\n input_ids=decoder_input_ids,\n attention_mask=decoder_attention_mask,\n encoder_hidden_states=encoder_outputs[0],\n encoder_attention_mask=attention_mask,\n head_mask=decoder_head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n past_key_values=past_key_values,\n inputs_embeds=decoder_inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n if not return_dict:\n return decoder_outputs + encoder_outputs\n\n return Seq2SeqModelOutput(\n last_hidden_state=decoder_outputs.last_hidden_state,\n past_key_values=decoder_outputs.past_key_values,\n decoder_hidden_states=decoder_outputs.hidden_states,\n decoder_attentions=decoder_outputs.attentions,\n cross_attentions=decoder_outputs.cross_attentions,\n encoder_last_hidden_state=encoder_outputs.last_hidden_state,\n encoder_hidden_states=encoder_outputs.hidden_states,\n encoder_attentions=encoder_outputs.attentions,\n )" }, { "identifier": "T5ForConditionalGeneration", "path": "models_list/bitfit/modeling_t5.py", "snippet": "class T5ForConditionalGeneration(T5PreTrainedModel):\r\n _keys_to_ignore_on_load_missing = [\r\n r\"encoder\\.embed_tokens\\.weight\",\r\n r\"decoder\\.embed_tokens\\.weight\",\r\n r\"lm_head\\.weight\",\r\n ]\r\n _keys_to_ignore_on_load_unexpected = [\r\n r\"decoder\\.block\\.0\\.layer\\.1\\.EncDecAttention\\.relative_attention_bias\\.weight\",\r\n ]\r\n\r\n def __init__(self, config):\r\n super().__init__(config)\r\n self.model_dim = config.d_model\r\n\r\n self.shared = nn.Embedding(config.vocab_size, config.d_model)\r\n\r\n encoder_config = copy.deepcopy(config)\r\n encoder_config.is_decoder = False\r\n encoder_config.use_cache = False\r\n encoder_config.is_encoder_decoder = False\r\n self.encoder = T5Stack(encoder_config, self.shared)\r\n\r\n decoder_config = copy.deepcopy(config)\r\n decoder_config.is_decoder = True\r\n decoder_config.is_encoder_decoder = False\r\n decoder_config.num_layers = config.num_decoder_layers\r\n self.decoder = T5Stack(decoder_config, self.shared)\r\n\r\n self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)\r\n\r\n self.init_weights()\r\n\r\n # Model parallel\r\n self.model_parallel = False\r\n self.device_map = None\r\n\r\n @add_start_docstrings(PARALLELIZE_DOCSTRING)\r\n def parallelize(self, device_map=None):\r\n self.device_map = (\r\n get_device_map(len(self.encoder.block), range(torch.cuda.device_count()))\r\n if device_map is None\r\n else device_map\r\n )\r\n assert_device_map(self.device_map, len(self.encoder.block))\r\n self.encoder.parallelize(self.device_map)\r\n self.decoder.parallelize(self.device_map)\r\n self.lm_head = self.lm_head.to(self.decoder.first_device)\r\n self.model_parallel = True\r\n\r\n @add_start_docstrings(DEPARALLELIZE_DOCSTRING)\r\n def deparallelize(self):\r\n self.encoder.deparallelize()\r\n self.decoder.deparallelize()\r\n self.encoder = self.encoder.to(\"cpu\")\r\n self.decoder = self.decoder.to(\"cpu\")\r\n self.lm_head = self.lm_head.to(\"cpu\")\r\n self.model_parallel = False\r\n self.device_map = None\r\n torch.cuda.empty_cache()\r\n\r\n def get_input_embeddings(self):\r\n return self.shared\r\n\r\n def set_input_embeddings(self, new_embeddings):\r\n self.shared = new_embeddings\r\n self.encoder.set_input_embeddings(new_embeddings)\r\n self.decoder.set_input_embeddings(new_embeddings)\r\n\r\n def set_output_embeddings(self, new_embeddings):\r\n self.lm_head = new_embeddings\r\n\r\n def get_output_embeddings(self):\r\n return self.lm_head\r\n\r\n def get_encoder(self):\r\n return self.encoder\r\n\r\n def get_decoder(self):\r\n return self.decoder\r\n\r\n @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)\r\n @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)\r\n def forward(\r\n self,\r\n input_ids=None,\r\n attention_mask=None,\r\n decoder_input_ids=None,\r\n decoder_attention_mask=None,\r\n head_mask=None,\r\n decoder_head_mask=None,\r\n cross_attn_head_mask=None,\r\n encoder_outputs=None,\r\n past_key_values=None,\r\n inputs_embeds=None,\r\n decoder_inputs_embeds=None,\r\n labels=None,\r\n use_cache=None,\r\n output_attentions=None,\r\n output_hidden_states=None,\r\n return_dict=None,\r\n past_prompt=None, # modified\r\n ):\r\n r\"\"\"\r\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\r\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[-100, 0, ...,\r\n config.vocab_size - 1]`. All labels set to ``-100`` are ignored (masked), the loss is only computed for\r\n labels in ``[0, ..., config.vocab_size]``\r\n\r\n Returns:\r\n\r\n Examples::\r\n\r\n >>> from transformers import T5Tokenizer, T5ForConditionalGeneration\r\n\r\n >>> tokenizer = T5Tokenizer.from_pretrained('t5-small')\r\n >>> model = T5ForConditionalGeneration.from_pretrained('t5-small')\r\n\r\n >>> input_ids = tokenizer('The <extra_id_0> walks in <extra_id_1> park', return_tensors='pt').input_ids\r\n >>> labels = tokenizer('<extra_id_0> cute dog <extra_id_1> the <extra_id_2> </s>', return_tensors='pt').input_ids\r\n >>> outputs = model(input_ids=input_ids, labels=labels)\r\n >>> loss = outputs.loss\r\n >>> logits = outputs.logits\r\n\r\n >>> input_ids = tokenizer(\"summarize: studies have shown that owning a dog is good for you \", return_tensors=\"pt\").input_ids # Batch size 1\r\n >>> outputs = model.generate(input_ids)\r\n \"\"\"\r\n use_cache = use_cache if use_cache is not None else self.config.use_cache\r\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\r\n\r\n # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask\r\n if head_mask is not None and decoder_head_mask is None:\r\n if self.config.num_layers == self.config.num_decoder_layers:\r\n warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)\r\n decoder_head_mask = head_mask\r\n\r\n # Encode if needed (training, first prediction pass)\r\n if encoder_outputs is None:\r\n # Convert encoder inputs in embeddings if needed\r\n encoder_outputs = self.encoder(\r\n input_ids=input_ids,\r\n attention_mask=attention_mask,\r\n inputs_embeds=inputs_embeds,\r\n head_mask=head_mask,\r\n output_attentions=output_attentions,\r\n output_hidden_states=output_hidden_states,\r\n return_dict=return_dict,\r\n past_prompt=past_prompt, # modified\r\n )\r\n elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):\r\n encoder_outputs = BaseModelOutput(\r\n last_hidden_state=encoder_outputs[0],\r\n hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,\r\n attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,\r\n )\r\n\r\n hidden_states = encoder_outputs[0]\r\n\r\n if self.model_parallel:\r\n torch.cuda.set_device(self.decoder.first_device)\r\n\r\n if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:\r\n # get decoder inputs from shifting lm labels to the right\r\n decoder_input_ids = self._shift_right(labels)\r\n\r\n # If decoding with past key value states, only the last tokens\r\n # should be given as an input\r\n if past_key_values is not None:\r\n assert labels is None, \"Decoder should not use cached key value states when training.\"\r\n if decoder_input_ids is not None:\r\n decoder_input_ids = decoder_input_ids[:, -1:]\r\n if decoder_inputs_embeds is not None:\r\n decoder_inputs_embeds = decoder_inputs_embeds[:, -1:]\r\n\r\n # Set device for model parallelism\r\n if self.model_parallel:\r\n torch.cuda.set_device(self.decoder.first_device)\r\n hidden_states = hidden_states.to(self.decoder.first_device)\r\n if decoder_input_ids is not None:\r\n decoder_input_ids = decoder_input_ids.to(self.decoder.first_device)\r\n if attention_mask is not None:\r\n attention_mask = attention_mask.to(self.decoder.first_device)\r\n if decoder_attention_mask is not None:\r\n decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device)\r\n\r\n # Decode\r\n decoder_outputs = self.decoder(\r\n input_ids=decoder_input_ids,\r\n attention_mask=decoder_attention_mask,\r\n inputs_embeds=decoder_inputs_embeds,\r\n past_key_values=past_key_values,\r\n encoder_hidden_states=hidden_states,\r\n encoder_attention_mask=attention_mask,\r\n head_mask=decoder_head_mask,\r\n cross_attn_head_mask=cross_attn_head_mask,\r\n use_cache=use_cache,\r\n output_attentions=output_attentions,\r\n output_hidden_states=output_hidden_states,\r\n return_dict=return_dict,\r\n past_prompt=past_prompt, # modified\r\n )\r\n\r\n sequence_output = decoder_outputs[0]\r\n\r\n # Set device for model parallelism\r\n if self.model_parallel:\r\n torch.cuda.set_device(self.encoder.first_device)\r\n self.lm_head = self.lm_head.to(self.encoder.first_device)\r\n sequence_output = sequence_output.to(self.lm_head.weight.device)\r\n\r\n if self.config.tie_word_embeddings:\r\n # Rescale output before projecting on vocab\r\n # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586\r\n sequence_output = sequence_output * (self.model_dim ** -0.5)\r\n\r\n lm_logits = self.lm_head(sequence_output)\r\n\r\n loss = None\r\n if labels is not None:\r\n loss_fct = CrossEntropyLoss(ignore_index=-100)\r\n loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))\r\n # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666\r\n\r\n if not return_dict:\r\n output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs\r\n return ((loss,) + output) if loss is not None else output\r\n\r\n return Seq2SeqLMOutput(\r\n loss=loss,\r\n logits=lm_logits,\r\n past_key_values=decoder_outputs.past_key_values,\r\n decoder_hidden_states=decoder_outputs.hidden_states,\r\n decoder_attentions=decoder_outputs.attentions,\r\n cross_attentions=decoder_outputs.cross_attentions,\r\n encoder_last_hidden_state=encoder_outputs.last_hidden_state,\r\n encoder_hidden_states=encoder_outputs.hidden_states,\r\n encoder_attentions=encoder_outputs.attentions,\r\n )\r\n\r\n def prepare_inputs_for_generation(\r\n self,\r\n input_ids,\r\n past=None,\r\n attention_mask=None,\r\n head_mask=None,\r\n decoder_head_mask=None,\r\n cross_attn_head_mask=None,\r\n use_cache=None,\r\n encoder_outputs=None,\r\n **kwargs\r\n ):\r\n\r\n # cut decoder_input_ids if past is used\r\n if past is not None:\r\n input_ids = input_ids[:, -1:]\r\n\r\n return {\r\n \"decoder_input_ids\": input_ids,\r\n \"past_key_values\": past,\r\n \"encoder_outputs\": encoder_outputs,\r\n \"attention_mask\": attention_mask,\r\n \"head_mask\": head_mask,\r\n \"decoder_head_mask\": decoder_head_mask,\r\n \"cross_attn_head_mask\": cross_attn_head_mask,\r\n \"use_cache\": use_cache,\r\n # \"past_prompt\": kwargs['past_prompt'], # modified\r\n }\r\n\r\n def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):\r\n return self._shift_right(labels)\r\n\r\n def _reorder_cache(self, past, beam_idx):\r\n # if decoder past is not included in output\r\n # speedy decoding is disabled and no need to reorder\r\n if past is None:\r\n logger.warning(\"You might want to consider setting `use_cache=True` to speed up decoding\")\r\n return past\r\n\r\n reordered_decoder_past = ()\r\n for layer_past_states in past:\r\n # get the correct batch idx from layer past batch dim\r\n # batch dim of `past` is at 2nd position\r\n reordered_layer_past_states = ()\r\n for layer_past_state in layer_past_states:\r\n # need to set correct `past` for each of the four key / value states\r\n reordered_layer_past_states = reordered_layer_past_states + (\r\n layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)),\r\n )\r\n\r\n assert reordered_layer_past_states[0].shape == layer_past_states[0].shape\r\n assert len(reordered_layer_past_states) == len(layer_past_states)\r\n\r\n reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,)\r\n return reordered_decoder_past\r" }, { "identifier": "T5Model", "path": "models_list/bitfit/modeling_t5.py", "snippet": "class T5Model(T5PreTrainedModel):\r\n _keys_to_ignore_on_load_missing = [\r\n r\"encoder\\.embed_tokens\\.weight\",\r\n r\"decoder\\.embed_tokens\\.weight\",\r\n ]\r\n _keys_to_ignore_on_load_unexpected = [\r\n r\"decoder\\.block\\.0\\.layer\\.1\\.EncDecAttention\\.relative_attention_bias\\.weight\",\r\n ]\r\n\r\n def __init__(self, config: T5Config):\r\n super().__init__(config)\r\n self.shared = nn.Embedding(config.vocab_size, config.d_model)\r\n\r\n encoder_config = copy.deepcopy(config)\r\n encoder_config.is_decoder = False\r\n encoder_config.use_cache = False\r\n encoder_config.is_encoder_decoder = False\r\n self.encoder = T5Stack(encoder_config, self.shared)\r\n\r\n decoder_config = copy.deepcopy(config)\r\n decoder_config.is_decoder = True\r\n decoder_config.is_encoder_decoder = False\r\n decoder_config.num_layers = config.num_decoder_layers\r\n self.decoder = T5Stack(decoder_config, self.shared)\r\n\r\n self.init_weights()\r\n\r\n # Model parallel\r\n self.model_parallel = False\r\n self.device_map = None\r\n\r\n @add_start_docstrings(PARALLELIZE_DOCSTRING)\r\n def parallelize(self, device_map=None):\r\n self.device_map = (\r\n get_device_map(len(self.encoder.block), range(torch.cuda.device_count()))\r\n if device_map is None\r\n else device_map\r\n )\r\n assert_device_map(self.device_map, len(self.encoder.block))\r\n self.encoder.parallelize(self.device_map)\r\n self.decoder.parallelize(self.device_map)\r\n self.model_parallel = True\r\n\r\n @add_start_docstrings(DEPARALLELIZE_DOCSTRING)\r\n def deparallelize(self):\r\n self.encoder.deparallelize()\r\n self.decoder.deparallelize()\r\n self.encoder = self.encoder.to(\"cpu\")\r\n self.decoder = self.decoder.to(\"cpu\")\r\n self.model_parallel = False\r\n self.device_map = None\r\n torch.cuda.empty_cache()\r\n\r\n def get_input_embeddings(self):\r\n return self.shared\r\n\r\n def set_input_embeddings(self, new_embeddings):\r\n self.shared = new_embeddings\r\n self.encoder.set_input_embeddings(new_embeddings)\r\n self.decoder.set_input_embeddings(new_embeddings)\r\n\r\n def get_encoder(self):\r\n return self.encoder\r\n\r\n def get_decoder(self):\r\n return self.decoder\r\n\r\n def _prune_heads(self, heads_to_prune):\r\n \"\"\"\r\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\r\n class PreTrainedModel\r\n \"\"\"\r\n for layer, heads in heads_to_prune.items():\r\n self.encoder.layer[layer].attention.prune_heads(heads)\r\n\r\n @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)\r\n @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)\r\n def forward(\r\n self,\r\n input_ids=None,\r\n attention_mask=None,\r\n decoder_input_ids=None,\r\n decoder_attention_mask=None,\r\n head_mask=None,\r\n decoder_head_mask=None,\r\n cross_attn_head_mask=None,\r\n encoder_outputs=None,\r\n past_key_values=None,\r\n inputs_embeds=None,\r\n decoder_inputs_embeds=None,\r\n use_cache=None,\r\n output_attentions=None,\r\n output_hidden_states=None,\r\n return_dict=None,\r\n ):\r\n r\"\"\"\r\n Returns:\r\n\r\n Example::\r\n\r\n >>> from transformers import T5Tokenizer, T5Model\r\n\r\n >>> tokenizer = T5Tokenizer.from_pretrained('t5-small')\r\n >>> model = T5Model.from_pretrained('t5-small')\r\n\r\n >>> input_ids = tokenizer(\"Studies have been shown that owning a dog is good for you\", return_tensors=\"pt\").input_ids # Batch size 1\r\n >>> decoder_input_ids = tokenizer(\"Studies show that\", return_tensors=\"pt\").input_ids # Batch size 1\r\n >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)\r\n\r\n >>> last_hidden_states = outputs.last_hidden_state\r\n \"\"\"\r\n use_cache = use_cache if use_cache is not None else self.config.use_cache\r\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\r\n\r\n # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask\r\n if head_mask is not None and decoder_head_mask is None:\r\n if self.config.num_layers == self.config.num_decoder_layers:\r\n warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)\r\n decoder_head_mask = head_mask\r\n\r\n # Encode if needed (training, first prediction pass)\r\n if encoder_outputs is None:\r\n encoder_outputs = self.encoder(\r\n input_ids=input_ids,\r\n attention_mask=attention_mask,\r\n inputs_embeds=inputs_embeds,\r\n head_mask=head_mask,\r\n output_attentions=output_attentions,\r\n output_hidden_states=output_hidden_states,\r\n return_dict=return_dict,\r\n )\r\n elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):\r\n encoder_outputs = BaseModelOutput(\r\n last_hidden_state=encoder_outputs[0],\r\n hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,\r\n attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,\r\n )\r\n\r\n hidden_states = encoder_outputs[0]\r\n if self.model_parallel:\r\n torch.cuda.set_device(self.decoder.first_device)\r\n # Set device for model parallelism\r\n if self.model_parallel:\r\n torch.cuda.set_device(self.decoder.first_device)\r\n hidden_states = hidden_states.to(self.decoder.first_device)\r\n if decoder_input_ids is not None:\r\n decoder_input_ids = decoder_input_ids.to(self.decoder.first_device)\r\n if attention_mask is not None:\r\n attention_mask = attention_mask.to(self.decoder.first_device)\r\n if decoder_attention_mask is not None:\r\n decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device)\r\n\r\n # Decode\r\n decoder_outputs = self.decoder(\r\n input_ids=decoder_input_ids,\r\n attention_mask=decoder_attention_mask,\r\n inputs_embeds=decoder_inputs_embeds,\r\n past_key_values=past_key_values,\r\n encoder_hidden_states=hidden_states,\r\n encoder_attention_mask=attention_mask,\r\n head_mask=decoder_head_mask,\r\n cross_attn_head_mask=cross_attn_head_mask,\r\n use_cache=use_cache,\r\n output_attentions=output_attentions,\r\n output_hidden_states=output_hidden_states,\r\n return_dict=return_dict,\r\n )\r\n\r\n if not return_dict:\r\n return decoder_outputs + encoder_outputs\r\n\r\n return Seq2SeqModelOutput(\r\n last_hidden_state=decoder_outputs.last_hidden_state,\r\n past_key_values=decoder_outputs.past_key_values,\r\n decoder_hidden_states=decoder_outputs.hidden_states,\r\n decoder_attentions=decoder_outputs.attentions,\r\n cross_attentions=decoder_outputs.cross_attentions,\r\n encoder_last_hidden_state=encoder_outputs.last_hidden_state,\r\n encoder_hidden_states=encoder_outputs.hidden_states,\r\n encoder_attentions=encoder_outputs.attentions,\r\n )\r" } ]
import warnings from collections import OrderedDict from transformers.utils import logging from transformers.models.albert.modeling_albert import ( AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from .modeling_plbart import ( PLBartForConditionalGeneration, PLBartModel, ) from transformers.models.bart.modeling_bart import ( BartForCausalLM, BartForQuestionAnswering, BartForSequenceClassification, ) from transformers.models.bert.modeling_bert import ( BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLMHeadModel, BertModel, ) from transformers.models.bert_generation.modeling_bert_generation import BertGenerationDecoder, BertGenerationEncoder from transformers.models.big_bird.modeling_big_bird import ( BigBirdForCausalLM, BigBirdForMaskedLM, BigBirdForMultipleChoice, BigBirdForPreTraining, BigBirdForQuestionAnswering, BigBirdForSequenceClassification, BigBirdForTokenClassification, BigBirdModel, ) from transformers.models.bigbird_pegasus.modeling_bigbird_pegasus import ( BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, ) from transformers.models.blenderbot.modeling_blenderbot import BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel from transformers.models.blenderbot_small.modeling_blenderbot_small import ( BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, ) from transformers.models.camembert.modeling_camembert import ( CamembertForCausalLM, CamembertForMaskedLM, CamembertForMultipleChoice, CamembertForQuestionAnswering, CamembertForSequenceClassification, CamembertForTokenClassification, CamembertModel, ) from transformers.models.canine.modeling_canine import ( CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineModel, ) from transformers.models.clip.modeling_clip import CLIPModel from transformers.models.convbert.modeling_convbert import ( ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertModel, ) from transformers.models.ctrl.modeling_ctrl import CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel from transformers.models.deberta.modeling_deberta import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta_v2.modeling_deberta_v2 import ( DebertaV2ForMaskedLM, DebertaV2ForQuestionAnswering, DebertaV2ForSequenceClassification, DebertaV2ForTokenClassification, DebertaV2Model, ) from transformers.models.deit.modeling_deit import DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTModel from transformers.models.detr.modeling_detr import DetrForObjectDetection, DetrModel from transformers.models.distilbert.modeling_distilbert import ( DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) from transformers.models.dpr.modeling_dpr import DPRQuestionEncoder from transformers.models.electra.modeling_electra import ( ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ) from transformers.models.encoder_decoder.modeling_encoder_decoder import EncoderDecoderModel from transformers.models.flaubert.modeling_flaubert import ( FlaubertForMultipleChoice, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.fsmt.modeling_fsmt import FSMTForConditionalGeneration, FSMTModel from transformers.models.funnel.modeling_funnel import ( FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, ) from transformers.models.gpt2.modeling_gpt2 import GPT2ForSequenceClassification, GPT2LMHeadModel, GPT2Model from transformers.models.gpt_neo.modeling_gpt_neo import GPTNeoForCausalLM, GPTNeoForSequenceClassification, GPTNeoModel from transformers.models.hubert.modeling_hubert import HubertModel from transformers.models.ibert.modeling_ibert import ( IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, ) from transformers.models.layoutlm.modeling_layoutlm import ( LayoutLMForMaskedLM, LayoutLMForSequenceClassification, LayoutLMForTokenClassification, LayoutLMModel, ) from transformers.models.led.modeling_led import ( LEDForConditionalGeneration, LEDForQuestionAnswering, LEDForSequenceClassification, LEDModel, ) from transformers.models.longformer.modeling_longformer import ( LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, ) from transformers.models.luke.modeling_luke import LukeModel from transformers.models.lxmert.modeling_lxmert import LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel from transformers.models.m2m_100.modeling_m2m_100 import M2M100ForConditionalGeneration, M2M100Model from transformers.models.marian.modeling_marian import MarianForCausalLM, MarianModel, MarianMTModel from transformers.models.mbart.modeling_mbart import ( MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, ) from transformers.models.megatron_bert.modeling_megatron_bert import ( MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) from transformers.models.mobilebert.modeling_mobilebert import ( MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) from transformers.models.mpnet.modeling_mpnet import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) from transformers.models.mt5.modeling_mt5 import MT5ForConditionalGeneration, MT5Model from transformers.models.openai.modeling_openai import OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel from transformers.models.pegasus.modeling_pegasus import PegasusForCausalLM, PegasusForConditionalGeneration, PegasusModel from transformers.models.prophetnet.modeling_prophetnet import ProphetNetForCausalLM, ProphetNetForConditionalGeneration, ProphetNetModel from transformers.models.rag.modeling_rag import ( # noqa: F401 - need to import all RagModels to be in globals() function RagModel, RagSequenceForGeneration, RagTokenForGeneration, ) from transformers.models.reformer.modeling_reformer import ( ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerModel, ReformerModelWithLMHead, ) from transformers.models.retribert.modeling_retribert import RetriBertModel from transformers.models.roberta.modeling_roberta import ( RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, ) from transformers.models.roformer.modeling_roformer import ( RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerModel, ) from transformers.models.speech_to_text.modeling_speech_to_text import Speech2TextForConditionalGeneration, Speech2TextModel from transformers.models.squeezebert.modeling_squeezebert import ( SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) from .modeling_t5 import T5ForConditionalGeneration, T5Model from transformers.models.tapas.modeling_tapas import ( TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, ) from transformers.models.transfo_xl.modeling_transfo_xl import TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel from transformers.models.visual_bert.modeling_visual_bert import VisualBertForPreTraining, VisualBertModel from transformers.models.vit.modeling_vit import ViTForImageClassification, ViTModel from transformers.models.wav2vec2.modeling_wav2vec2 import Wav2Vec2ForMaskedLM, Wav2Vec2ForPreTraining, Wav2Vec2Model from transformers.models.xlm.modeling_xlm import ( XLMForMultipleChoice, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm_prophetnet.modeling_xlm_prophetnet import ( XLMProphetNetForCausalLM, XLMProphetNetForConditionalGeneration, XLMProphetNetModel, ) from transformers.models.xlm_roberta.modeling_xlm_roberta import ( XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, ) from transformers.models.xlnet.modeling_xlnet import ( XLNetForMultipleChoice, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, ) from transformers.models.auto.auto_factory import _BaseAutoModelClass, auto_class_update from transformers.models.auto.configuration_auto import ( AlbertConfig, PLBartConfig, BertConfig, BertGenerationConfig, BigBirdConfig, BigBirdPegasusConfig, BlenderbotConfig, BlenderbotSmallConfig, CamembertConfig, CanineConfig, CLIPConfig, ConvBertConfig, CTRLConfig, DebertaConfig, DebertaV2Config, DeiTConfig, DetrConfig, DistilBertConfig, DPRConfig, ElectraConfig, EncoderDecoderConfig, FlaubertConfig, FSMTConfig, FunnelConfig, GPT2Config, GPTNeoConfig, HubertConfig, IBertConfig, LayoutLMConfig, LEDConfig, LongformerConfig, LukeConfig, LxmertConfig, M2M100Config, MarianConfig, MBartConfig, MegatronBertConfig, MobileBertConfig, MPNetConfig, MT5Config, OpenAIGPTConfig, PegasusConfig, ProphetNetConfig, ReformerConfig, RetriBertConfig, RobertaConfig, RoFormerConfig, Speech2TextConfig, SqueezeBertConfig, T5Config, TapasConfig, TransfoXLConfig, VisualBertConfig, ViTConfig, Wav2Vec2Config, XLMConfig, XLMProphetNetConfig, XLMRobertaConfig, XLNetConfig, )
10,778
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Auto Model class. """ # Add modeling imports here # # Instead of loading the BART from the transformers==4.9.1, we choose to load from our own prefix-tuning version. # Instead of loading the T5 from the transformers==4.9.1, we choose to load from our prefix-tuning version. logger = logging.get_logger(__name__) MODEL_MAPPING = OrderedDict( [ # Base model mapping (VisualBertConfig, VisualBertModel), (CanineConfig, CanineModel), (RoFormerConfig, RoFormerModel), (CLIPConfig, CLIPModel), (BigBirdPegasusConfig, BigBirdPegasusModel), (DeiTConfig, DeiTModel), (LukeConfig, LukeModel), (DetrConfig, DetrModel), (GPTNeoConfig, GPTNeoModel), (BigBirdConfig, BigBirdModel), (Speech2TextConfig, Speech2TextModel), (ViTConfig, ViTModel), (Wav2Vec2Config, Wav2Vec2Model), (HubertConfig, HubertModel), (M2M100Config, M2M100Model), (ConvBertConfig, ConvBertModel), (LEDConfig, LEDModel), (BlenderbotSmallConfig, BlenderbotSmallModel), (RetriBertConfig, RetriBertModel), (MT5Config, MT5Model),
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Auto Model class. """ # Add modeling imports here # # Instead of loading the BART from the transformers==4.9.1, we choose to load from our own prefix-tuning version. # Instead of loading the T5 from the transformers==4.9.1, we choose to load from our prefix-tuning version. logger = logging.get_logger(__name__) MODEL_MAPPING = OrderedDict( [ # Base model mapping (VisualBertConfig, VisualBertModel), (CanineConfig, CanineModel), (RoFormerConfig, RoFormerModel), (CLIPConfig, CLIPModel), (BigBirdPegasusConfig, BigBirdPegasusModel), (DeiTConfig, DeiTModel), (LukeConfig, LukeModel), (DetrConfig, DetrModel), (GPTNeoConfig, GPTNeoModel), (BigBirdConfig, BigBirdModel), (Speech2TextConfig, Speech2TextModel), (ViTConfig, ViTModel), (Wav2Vec2Config, Wav2Vec2Model), (HubertConfig, HubertModel), (M2M100Config, M2M100Model), (ConvBertConfig, ConvBertModel), (LEDConfig, LEDModel), (BlenderbotSmallConfig, BlenderbotSmallModel), (RetriBertConfig, RetriBertModel), (MT5Config, MT5Model),
(T5Config, T5Model),
3
2023-10-20 09:24:44+00:00
12k
JoaoPedro9674/django-ledger
django_ledger/tests/base.py
[ { "identifier": "EntityDataGenerator", "path": "django_ledger/io/data_generator.py", "snippet": "class EntityDataGenerator(LoggingMixIn):\n\n def __init__(self,\n user_model,\n entity_model: Union[EntityModel, str],\n start_date: date,\n capital_contribution: Decimal,\n days_forward: int,\n tx_quantity: int = 25):\n\n assert isinstance(entity_model, (EntityModel, str)), 'Must pass an instance of EntityModel or str'\n assert capital_contribution > 0, 'Capital contribution must be greater than 0'\n\n if not FAKER_IMPORTED:\n raise ImproperlyConfigured('Must install Faker library to generate random data.')\n\n self.fk = Faker(['en_US'])\n self.fk.add_provider(company)\n self.fk.add_provider(address)\n self.fk.add_provider(phone_number)\n self.fk.add_provider(bank)\n\n self.start_date: date = start_date\n self.local_date = localdate()\n self.tx_quantity = tx_quantity\n self.localtime = localtime()\n self.COUNT_INVENTORY = True\n self.DAYS_FORWARD = days_forward\n\n self.entity_model: EntityModel = entity_model\n self.default_coa: Optional[ChartOfAccountModel] = None\n self.capital_contribution = capital_contribution\n self.user_model = user_model\n\n self.is_accruable_probability = 0.2\n self.is_paid_probability = 0.90\n\n self.vendor_models = None\n self.customer_models = None\n self.bank_account_models = None\n self.entity_unit_models = None\n self.uom_models = None\n self.expense_models = None\n self.product_models = None\n self.service_models = None\n self.inventory_models = None\n\n self.account_models = None\n self.accounts_by_role = None\n\n self.COUNTRY = 'US'\n self.NB_UNITS: int = 4\n\n self.PRODUCTS_MIN = 20\n self.PRODUCTS_MAX = 40\n self.MIN_DAYS_FORWARD = 1\n self.MAX_DAYS_FORWARD = 8\n\n self.logger = self.get_logger()\n\n def get_logger_name(self):\n return self.entity_model.slug\n\n def populate_entity(self):\n\n self.logger.info('Checking for existing transactions...')\n txs_qs = TransactionModel.objects.for_entity(\n entity_slug=self.entity_model,\n user_model=self.user_model\n )\n\n if txs_qs.count() > 0:\n raise ValidationError(\n f'Cannot populate random data on {self.entity_model.name} because it already has existing Transactions')\n\n self.create_coa()\n self.logger.info(f'Pulling Entity {self.entity_model} accounts...')\n self.account_models = self.entity_model.get_coa_accounts(order_by=('role', 'code'))\n self.accounts_by_role = {g: list(v) for g, v in groupby(self.account_models, key=lambda a: a.role)}\n self.create_vendors()\n self.create_customers()\n self.create_entity_units()\n self.create_bank_accounts()\n self.create_uom_models()\n\n self.create_products()\n self.create_services()\n self.create_inventories()\n self.create_expenses()\n\n self.fund_entity()\n\n for i in range(self.tx_quantity):\n start_dttm = self.start_date + timedelta(days=randint(0, self.DAYS_FORWARD))\n self.create_estimate(date_draft=start_dttm)\n self.create_po(date_draft=start_dttm)\n self.recount_inventory()\n self.update_products()\n self.create_bill(date_draft=start_dttm)\n\n for i in range(self.tx_quantity):\n start_dttm = self.start_date + timedelta(days=randint(0, self.DAYS_FORWARD))\n self.create_invoice(date_draft=start_dttm)\n\n self.create_closing_entry()\n\n def get_next_date(self, prev_date: date = None) -> date:\n if not prev_date:\n prev_date = self.start_date\n next_date = prev_date + timedelta(days=randint(\n self.MIN_DAYS_FORWARD,\n self.MAX_DAYS_FORWARD\n ))\n if next_date > self.local_date:\n next_date = self.local_date\n return next_date\n\n def create_coa(self):\n entity_model = self.entity_model\n coa_model = entity_model.create_chart_of_accounts(assign_as_default=True, commit=True)\n entity_model.populate_default_coa(coa_model=coa_model, activate_accounts=True)\n self.default_coa = entity_model.default_coa\n\n def create_entity_units(self, nb_units: int = None):\n self.logger.info(f'Creating entity units...')\n nb_units = self.NB_UNITS if not nb_units else nb_units\n\n if nb_units:\n assert nb_units >= 0, 'Number of unite must be greater than 0'\n\n entity_unit_models = [\n EntityUnitModel(\n name=f'Unit {u}',\n entity=self.entity_model,\n document_prefix=''.join(choices(ascii_uppercase, k=3))\n ) for u in range(nb_units)\n ]\n\n for unit in entity_unit_models:\n unit.clean()\n EntityUnitModel.add_root(instance=unit)\n\n self.entity_unit_models = self.entity_model.entityunitmodel_set.all()\n\n def create_vendors(self):\n self.logger.info('Creating vendors...')\n vendor_count = randint(10, 20)\n vendor_models = [\n self.entity_model.create_vendor(\n vendor_model_kwargs={\n 'vendor_name': self.fk.name() if random() > .7 else self.fk.company(),\n 'address_1': self.fk.street_address(),\n 'address_2': self.fk.building_number() if random() < .2 else None,\n 'city': self.fk.city(),\n 'state': self.fk.state_abbr(),\n 'zip_code': self.fk.postcode(),\n 'phone': self.fk.phone_number(),\n 'country': self.COUNTRY,\n 'email': self.fk.email(),\n 'website': self.fk.url(),\n 'active': True,\n 'hidden': False,\n 'description': 'A cool vendor description.'\n }, commit=False) for _ in range(vendor_count)\n ]\n\n for vendor in vendor_models:\n vendor.full_clean()\n\n self.vendor_models = VendorModel.objects.bulk_create(vendor_models, ignore_conflicts=True)\n\n def create_customers(self):\n self.logger.info(f'Creating entity customers...')\n customer_count = randint(10, 20)\n customer_models = [\n self.entity_model.create_customer(\n customer_model_kwargs={\n 'customer_name': self.fk.name() if random() > .2 else self.fk.company(),\n 'address_1': self.fk.street_address() + self.fk.street_suffix(),\n 'address_2': self.fk.building_number() if random() > .2 else None,\n 'city': self.fk.city(),\n 'state': self.fk.state_abbr(),\n 'zip_code': self.fk.postcode(),\n 'country': self.COUNTRY,\n 'phone': self.fk.phone_number(),\n 'email': self.fk.email(),\n 'website': self.fk.url(),\n 'active': True,\n 'hidden': False,\n 'description': f'A cool customer description. We love customers!'\n }) for _ in range(customer_count)\n ]\n\n for customer in customer_models:\n customer.full_clean()\n\n self.customer_models = CustomerModel.objects.bulk_create(customer_models, ignore_conflicts=True)\n\n def create_bank_accounts(self):\n self.logger.info(f'Creating entity accounts...')\n bank_account_models = [\n self.entity_model.create_bank_account(\n name=f'{self.entity_model.name} Checking Account',\n account_type=BankAccountModel.ACCOUNT_CHECKING,\n active=True,\n cash_account=choice(self.accounts_by_role[ASSET_CA_CASH]),\n bank_account_model_kwargs={\n 'aba_number': self.fk.swift(),\n 'routing_number': str(randint(0, 9999999)).zfill(9),\n 'account_number': str(randint(0, 9999999)).zfill(9)\n },\n commit=False\n ),\n self.entity_model.create_bank_account(\n name=f'{self.entity_model.name} Savings Account',\n account_type=BankAccountModel.ACCOUNT_SAVINGS,\n active=True,\n cash_account=choice(self.accounts_by_role[ASSET_CA_CASH]),\n bank_account_model_kwargs={\n 'aba_number': self.fk.swift(),\n 'routing_number': str(randint(0, 9999999)).zfill(9),\n 'account_number': str(randint(0, 9999999)).zfill(9)\n },\n commit=False\n )\n ]\n for ba in bank_account_models:\n ba.full_clean()\n\n self.bank_account_models = BankAccountModel.objects.bulk_create(bank_account_models, ignore_conflicts=True)\n\n def create_uom_models(self):\n self.logger.info(f'Creating entity Unit of Measures...')\n\n UOMs = {\n 'unit': 'Unit',\n 'ln-ft': 'Linear Feet',\n 'sq-ft': 'Square Fee t',\n 'lb': 'Pound',\n 'pallet': 'Pallet',\n 'man-hour': 'Man Hour'\n }\n\n uom_models = [\n self.entity_model.create_uom(\n unit_abbr=abbr,\n name=name,\n commit=False\n ) for abbr, name in UOMs.items()\n ]\n\n for uom in uom_models:\n uom.full_clean()\n\n self.uom_models = UnitOfMeasureModel.objects.bulk_create(uom_models)\n\n def create_products(self):\n self.logger.info(f'Creating entity product items...')\n product_count = randint(self.PRODUCTS_MIN, self.PRODUCTS_MAX)\n product_models = list()\n for i in range(product_count):\n # is Product....\n product_models.append(ItemModel(\n name=f'Product #{randint(1000, 9999)}',\n uom=choice(self.uom_models),\n item_role=ItemModel.ITEM_ROLE_PRODUCT,\n sku=generate_random_sku(),\n upc=generate_random_upc(),\n item_id=generate_random_item_id(),\n entity=self.entity_model,\n for_inventory=True,\n is_product_or_service=True,\n inventory_account=choice(self.accounts_by_role[ASSET_CA_INVENTORY]),\n earnings_account=choice(self.accounts_by_role[INCOME_OPERATIONAL]),\n cogs_account=choice(self.accounts_by_role[COGS]),\n additional_info=dict()\n ))\n\n for product in product_models:\n product.full_clean()\n\n ItemModel.objects.bulk_create(product_models)\n self.update_products()\n\n def create_services(self):\n self.logger.info(f'Creating entity service items...')\n product_count = randint(self.PRODUCTS_MIN, self.PRODUCTS_MAX)\n service_item_models = list()\n for i in range(product_count):\n service_item_models.append(ItemModel(\n name=f'Service #{randint(1000, 9999)}',\n uom=choice(self.uom_models),\n item_role=ItemModel.ITEM_ROLE_SERVICE,\n sku=generate_random_sku(),\n upc=generate_random_upc(),\n item_id=generate_random_item_id(),\n entity=self.entity_model,\n for_inventory=False,\n is_product_or_service=True,\n earnings_account=choice(self.accounts_by_role[INCOME_OPERATIONAL]),\n cogs_account=choice(self.accounts_by_role[COGS]),\n additional_info=dict()\n ))\n\n for service in service_item_models:\n service.full_clean()\n\n ItemModel.objects.bulk_create(service_item_models)\n self.update_services()\n\n def create_expenses(self):\n self.logger.info(f'Creating entity expense items...')\n expense_count = randint(self.PRODUCTS_MIN, self.PRODUCTS_MAX)\n expense_models = [\n ItemModel(\n name=f'Expense Item {randint(1000, 9999)}',\n uom=choice(self.uom_models),\n item_type=choice(ItemModel.ITEM_TYPE_CHOICES)[0],\n item_role=ItemModel.ITEM_ROLE_EXPENSE,\n sku=generate_random_sku(),\n upc=generate_random_upc(),\n item_id=generate_random_item_id(),\n entity=self.entity_model,\n is_product_or_service=False,\n for_inventory=False,\n expense_account=choice(self.accounts_by_role[EXPENSE_OPERATIONAL]),\n ) for _ in range(expense_count)\n ]\n\n for exp in expense_models:\n exp.full_clean()\n\n ItemModel.objects.bulk_create(expense_models)\n self.update_expenses()\n\n def create_inventories(self):\n self.logger.info(f'Creating entity inventory items...')\n inv_count = randint(self.PRODUCTS_MIN, self.PRODUCTS_MAX)\n inventory_models = [\n ItemModel(\n name=f'Inventory {randint(1000, 9999)}',\n uom=choice(self.uom_models),\n item_role=ItemModel.ITEM_ROLE_INVENTORY,\n item_type=choice(ItemModel.ITEM_TYPE_CHOICES)[0],\n item_id=generate_random_item_id(),\n entity=self.entity_model,\n for_inventory=True,\n is_product_or_service=True if random() > 0.6 else False,\n sku=generate_random_sku(),\n upc=generate_random_upc(),\n earnings_account=choice(self.accounts_by_role[INCOME_OPERATIONAL]),\n cogs_account=choice(self.accounts_by_role[COGS]),\n inventory_account=choice(self.accounts_by_role[ASSET_CA_INVENTORY]),\n ) for _ in range(inv_count)\n ]\n\n for inv in inventory_models:\n inv.full_clean()\n\n self.inventory_models = ItemModel.objects.bulk_create(inventory_models)\n\n def update_products(self):\n self.logger.info(f'Updating product catalog...')\n self.product_models = self.entity_model.get_items_products()\n\n def update_services(self):\n self.logger.info(f'Updating service catalog...')\n self.service_models = self.entity_model.get_items_services()\n\n def update_inventory(self):\n self.logger.info(f'Updating inventory...')\n self.inventory_models = self.entity_model.get_items_inventory()\n\n def update_expenses(self):\n self.logger.info(f'Updating expenses...')\n self.expense_models = self.entity_model.get_items_expenses()\n\n def create_estimate(self, date_draft: date):\n estimate_model = self.entity_model.create_estimate(\n estimate_title=f'Customer Estimate {date_draft}',\n date_draft=date_draft,\n customer_model=choice(self.customer_models),\n contract_terms=choice(EstimateModel.CONTRACT_TERMS_CHOICES_VALID),\n commit=True\n )\n self.logger.info(f'Creating entity estimate {estimate_model.estimate_number}...')\n\n estimate_items = [\n ItemTransactionModel(\n ce_model=estimate_model,\n item_model=choice(self.product_models),\n ce_quantity=round(random() * randint(5, 15), 2),\n ce_unit_cost_estimate=round(random() * randint(50, 100), 2),\n ce_unit_revenue_estimate=round(random() * randint(80, 120) * (1 + 0.2 * random()), 2),\n entity_unit=choice(self.entity_unit_models) if random() > .75 else None\n ) for _ in range(randint(1, 10))\n ]\n\n for i in estimate_items:\n i.full_clean()\n\n estimate_model.full_clean()\n estimate_model.update_state(itemtxs_qs=estimate_items)\n estimate_model.save()\n\n estimate_items = estimate_model.itemtransactionmodel_set.bulk_create(objs=estimate_items)\n\n if random() > 0.25:\n date_in_review = self.get_next_date(date_draft)\n estimate_model.mark_as_review(commit=True, date_in_review=date_in_review)\n if random() > 0.50:\n date_approved = self.get_next_date(date_in_review)\n estimate_model.mark_as_approved(commit=True, date_approved=date_approved)\n if random() > 0.25:\n date_completed = self.get_next_date(date_approved)\n estimate_model.mark_as_completed(commit=True, date_completed=date_completed)\n elif random() > 0.8:\n date_void = self.get_next_date(date_approved)\n estimate_model.mark_as_void(commit=True, date_void=date_void)\n elif random() > 0.8:\n date_canceled = self.get_next_date(date_in_review)\n estimate_model.mark_as_canceled(commit=True, date_canceled=date_canceled)\n\n def create_bill(self, date_draft: date):\n bill_model = self.entity_model.create_bill(\n vendor_model=choice(self.vendor_models),\n cash_account=choice(self.accounts_by_role[ASSET_CA_CASH]),\n prepaid_account=choice(self.accounts_by_role[ASSET_CA_PREPAID]),\n payable_account=choice(self.accounts_by_role[LIABILITY_CL_ACC_PAYABLE]),\n terms=choice(BillModel.TERM_CHOICES_VALID),\n date_draft=date_draft,\n additional_info=dict(),\n commit=True\n )\n\n self.logger.info(f'Creating entity bill {bill_model.bill_number}...')\n\n bill_items = [\n ItemTransactionModel(\n bill_model=bill_model,\n item_model=choice(self.expense_models),\n quantity=round(random() * randint(5, 15), 2),\n unit_cost=round(random() * randint(50, 100), 2),\n entity_unit=choice(self.entity_unit_models) if random() > .75 else None\n ) for _ in range(randint(1, 10))\n ]\n\n for bi in bill_items:\n bi.full_clean()\n\n bill_model.update_amount_due(itemtxs_qs=bill_items)\n bill_model.itemtransactionmodel_set.bulk_create(bill_items)\n bill_model.full_clean()\n bill_model.save()\n\n if random() > 0.25 and bill_model.amount_due:\n date_in_review = self.get_next_date(date_draft)\n bill_model.mark_as_review(commit=True, date_in_review=date_in_review)\n\n if random() > 0.50:\n date_approved = self.get_next_date(date_in_review)\n bill_model.mark_as_approved(commit=True,\n entity_slug=self.entity_model.slug,\n user_model=self.user_model,\n date_approved=date_approved)\n\n if random() > 0.25:\n paid_date = self.get_next_date(date_approved)\n bill_model.mark_as_paid(\n user_model=self.user_model,\n entity_slug=self.entity_model.slug,\n date_paid=paid_date,\n commit=True\n )\n elif random() > 0.8:\n void_date = self.get_next_date(date_approved)\n bill_model.mark_as_void(\n user_model=self.user_model,\n entity_slug=self.entity_model.slug,\n date_void=void_date,\n commit=True\n )\n elif random() > 0.8:\n canceled_date = self.get_next_date(date_in_review)\n bill_model.mark_as_canceled(date_canceled=canceled_date)\n\n def create_po(self, date_draft: date):\n\n po_model = self.entity_model.create_purchase_order(date_draft=date_draft)\n\n po_items = [\n ItemTransactionModel(\n po_model=po_model,\n item_model=choice(self.product_models),\n po_quantity=round(random() * randint(3, 10) + 3, 2),\n po_unit_cost=round(random() * randint(100, 800), 2),\n entity_unit=choice(self.entity_unit_models) if random() > .75 else None\n ) for _ in range(randint(1, 10))\n ]\n\n for poi in po_items:\n poi.full_clean()\n\n self.logger.info(f'Creating entity purchase order {po_model.po_number}...')\n po_items = po_model.itemtransactionmodel_set.bulk_create(po_items)\n po_model.update_state(itemtxs_qs=po_items)\n po_model.full_clean()\n po_model.save()\n\n # mark as approved...\n if random() > 0.25 and po_model.po_amount:\n date_review = self.get_next_date(date_draft)\n po_model.mark_as_review(commit=True, date_in_review=date_review)\n if random() > 0.5:\n date_approved = self.get_next_date(date_review)\n po_model.mark_as_approved(commit=True, date_approved=date_approved)\n if random() > 0.25:\n # add a PO bill...\n date_fulfilled = self.get_next_date(date_approved)\n date_bill_draft = date_fulfilled - timedelta(days=randint(1, 3))\n\n bill_model = self.entity_model.create_bill(\n vendor_model=choice(self.vendor_models),\n terms=choice(BillModel.TERM_CHOICES_VALID),\n date_draft=date_bill_draft,\n cash_account=choice(self.accounts_by_role[ASSET_CA_CASH]),\n prepaid_account=choice(self.accounts_by_role[ASSET_CA_PREPAID]),\n payable_account=choice(self.accounts_by_role[LIABILITY_CL_ACC_PAYABLE]),\n commit=True\n )\n\n for po_i in po_items:\n po_i.po_total_amount = round(po_i.po_total_amount, 2)\n po_i.total_amount = round(po_i.po_total_amount, 2)\n po_i.quantity = round(po_i.po_quantity, 2)\n po_i.unit_cost = round(po_i.po_unit_cost, 2)\n po_i.bill_model = bill_model\n po_i.po_item_status = ItemTransactionModel.STATUS_RECEIVED\n po_i.full_clean()\n\n bill_model.update_amount_due(itemtxs_qs=po_items)\n bill_model.full_clean()\n bill_model.update_state()\n bill_model.save()\n\n po_model.itemtransactionmodel_set.bulk_update(\n po_items,\n fields=[\n 'po_total_amount',\n 'total_amount',\n 'po_quantity',\n 'quantity',\n 'po_unit_cost',\n 'unit_cost',\n 'bill_model',\n 'po_item_status'\n ])\n\n if random() > 0.25:\n date_bill_review = self.get_next_date(date_bill_draft)\n bill_model.mark_as_review(commit=True, date_in_review=date_bill_review)\n if random() > 0.50:\n bill_approve_date = self.get_next_date(date_bill_review)\n bill_model.mark_as_approved(commit=True,\n entity_slug=self.entity_model.slug,\n user_model=self.user_model,\n date_approved=bill_approve_date)\n if random() > 0.25:\n bill_paid_date = self.get_next_date(bill_approve_date)\n bill_model.mark_as_paid(\n user_model=self.user_model,\n entity_slug=self.entity_model.slug,\n commit=True,\n date_paid=bill_paid_date)\n\n if random() > 0.20:\n for po_i in po_items:\n po_i.po_item_status = ItemTransactionModel.STATUS_RECEIVED\n po_i.po_item_status = ItemTransactionModel.STATUS_RECEIVED\n po_i.full_clean()\n\n # todo: can pass po items??..\n po_model.itemtransactionmodel_set.bulk_update(po_items,\n fields=[\n 'po_item_status',\n 'updated'\n ])\n po_model.mark_as_fulfilled(\n date_fulfilled=date_fulfilled,\n commit=True)\n\n self.entity_model.update_inventory(\n # user_model=self.user_model,\n commit=True)\n\n self.update_products()\n self.update_inventory()\n\n def create_invoice(self, date_draft: date):\n invoice_model = self.entity_model.create_invoice(\n customer_model=choice(self.customer_models),\n terms=choice(InvoiceModel.TERM_CHOICES_VALID),\n cash_account=choice(self.accounts_by_role[ASSET_CA_CASH]),\n prepaid_account=choice(self.accounts_by_role[ASSET_CA_RECEIVABLES]),\n payable_account=choice(self.accounts_by_role[LIABILITY_CL_DEFERRED_REVENUE]),\n date_draft=date_draft,\n additional_info=dict(),\n commit=True\n )\n self.logger.info(f'Creating entity invoice {invoice_model.invoice_number}...')\n\n invoice_items = list()\n\n for i in range(randint(1, 10)):\n item_model: ItemModel = choice(self.product_models)\n quantity = Decimal.from_float(round(random() * randint(1, 2), 2))\n entity_unit = choice(self.entity_unit_models) if random() > .75 else None\n margin = Decimal(random() + 3.5)\n avg_cost = item_model.get_average_cost()\n if item_model.is_product():\n if item_model.inventory_received is not None and item_model.inventory_received > 0.0:\n if quantity > item_model.inventory_received:\n quantity = item_model.inventory_received\n\n # reducing inventory qty...\n item_model.inventory_received -= quantity\n item_model.inventory_received_value -= avg_cost * quantity\n unit_cost = avg_cost * margin\n else:\n quantity = 0.0\n unit_cost = 0.0\n\n if all([\n quantity > 0.00,\n unit_cost > 0.00\n ]):\n itm = ItemTransactionModel(\n invoice_model=invoice_model,\n item_model=item_model,\n quantity=quantity,\n unit_cost=unit_cost,\n entity_unit=entity_unit\n )\n itm.full_clean()\n invoice_items.append(itm)\n\n invoice_items = invoice_model.itemtransactionmodel_set.bulk_create(invoice_items)\n invoice_model.update_amount_due(itemtxs_qs=invoice_items)\n invoice_model.full_clean()\n invoice_model.save()\n\n if random() > 0.25 and invoice_model.amount_due:\n date_review = self.get_next_date(date_draft)\n\n try:\n invoice_model.mark_as_review(commit=True, date_in_review=date_review)\n except InvoiceModelValidationError as e:\n # invoice cannot be marked as in review...\n return\n\n if random() > 0.50:\n date_approved = self.get_next_date(date_review)\n invoice_model.mark_as_approved(entity_slug=self.entity_model.slug,\n user_model=self.user_model,\n commit=True,\n date_approved=date_approved)\n if random() > 0.25:\n date_paid = self.get_next_date(date_approved)\n invoice_model.mark_as_paid(\n entity_slug=self.entity_model.slug,\n user_model=self.user_model,\n date_paid=date_paid,\n commit=True\n )\n self.entity_model.update_inventory(\n # user_model=self.user_model,\n commit=True\n )\n self.update_inventory()\n self.update_products()\n elif random() > 0.8:\n date_void = self.get_next_date(date_approved)\n invoice_model.mark_as_void(\n entity_slug=self.entity_model.slug,\n user_model=self.user_model,\n date_void=date_void,\n commit=True\n )\n elif random() > 0.8:\n date_canceled = self.get_next_date(date_review)\n invoice_model.mark_as_canceled(commit=True, date_canceled=date_canceled)\n\n def fund_entity(self):\n\n self.logger.info(f'Funding entity...')\n capital_acc = choice(self.accounts_by_role[EQUITY_CAPITAL])\n cash_acc = choice(self.bank_account_models).cash_account\n\n self.entity_model.deposit_capital(\n cash_account=cash_acc,\n capital_account=capital_acc,\n amount=self.capital_contribution,\n je_timestamp=self.start_date,\n je_posted=True,\n ledger_posted=True,\n description='Entity Funding for Sample Data',\n )\n\n def create_closing_entry(self):\n closing_date = self.start_date + timedelta(days=int(self.DAYS_FORWARD / 2))\n ce_model, ce_txs = self.entity_model.close_books_for_month(\n year=closing_date.year,\n month=closing_date.month\n )\n ce_model.mark_as_posted(commit=True)\n\n def recount_inventory(self):\n self.logger.info(f'Recounting inventory...')\n self.entity_model.update_inventory(\n # user_model=self.user_model,\n commit=True\n )" }, { "identifier": "EntityModel", "path": "django_ledger/models/entity.py", "snippet": "class EntityModel(EntityModelAbstract):\n \"\"\"\n Entity Model Base Class From Abstract\n \"\"\"" }, { "identifier": "EntityModelQuerySet", "path": "django_ledger/models/entity.py", "snippet": "class EntityModelQuerySet(MP_NodeQuerySet):\n \"\"\"\n A custom defined EntityModel QuerySet.\n Inherits from the Materialized Path Node QuerySet Class from Django Treebeard.\n \"\"\"\n\n def hidden(self):\n \"\"\"\n A QuerySet of all hidden EntityModel.\n\n Returns\n -------\n EntityModelQuerySet\n A filtered QuerySet of hidden EntityModels only.\n \"\"\"\n return self.filter(hidden=True)\n\n def visible(self):\n \"\"\"\n A Queryset of all visible EntityModel.\n\n Returns\n -------\n EntityModelQuerySet\n A filtered QuerySet of visible EntityModels only.\n \"\"\"\n return self.filter(hidden=False)" } ]
from datetime import date, timedelta from decimal import Decimal from itertools import cycle from logging import getLogger, DEBUG from random import randint, choice from typing import Optional from django.contrib.auth import get_user_model from django.core.exceptions import ObjectDoesNotExist from django.test import TestCase from django.test.client import Client from django.utils.timezone import get_default_timezone from django_ledger.io.data_generator import EntityDataGenerator from django_ledger.models.entity import EntityModel, EntityModelQuerySet
7,817
UserModel = get_user_model() class DjangoLedgerBaseTest(TestCase): FY_STARTS = None CAPITAL_CONTRIBUTION = None START_DATE = None DAYS_FORWARD = 9 * 30 TX_QUANTITY = 50 user_model = None TEST_DATA = list() CLIENT = None TZ = None N = None USER_EMAIL = None PASSWORD = None USERNAME = None logger = None accrual_cycle = cycle([True, False]) @classmethod def setUpTestData(cls): cls.logger = getLogger(__name__) cls.logger.setLevel(level=DEBUG) cls.USERNAME: str = 'testuser' cls.PASSWORD: str = 'NeverUseThisPassword12345' cls.USER_EMAIL: str = '[email protected]' cls.N: int = 2 cls.DAYS_FWD: int = randint(180, 180 * 3) cls.TZ = get_default_timezone() cls.START_DATE = cls.get_random_date() cls.CLIENT = Client(enforce_csrf_checks=False) try: cls.user_model = UserModel.objects.get(username=cls.USERNAME) except ObjectDoesNotExist: cls.user_model = UserModel.objects.create_user( username=cls.USERNAME, password=cls.PASSWORD, email=cls.USER_EMAIL, ) cls.FY_STARTS = list(str(i) for i in range(1, 13)) cls.TEST_DATA = list() cls.CAPITAL_CONTRIBUTION = Decimal('50000.00') cls.ENTITY_MODEL_QUERYSET: Optional[EntityModelQuerySet] = None cls.create_entity_models(n=cls.N) cls.populate_entity_models() @classmethod def get_random_date(cls) -> date: return date( year=choice(range(1990, 2020)), month=choice(range(1, 13)), day=choice(range(1, 28)) ) @classmethod def login_client(cls): # cls.logger.info('Logging in client...') cls.CLIENT.login( username=cls.USERNAME, password=cls.PASSWORD ) @classmethod def logout_client(cls): # cls.logger.info('Logging out client...') cls.CLIENT.logout() @classmethod def refresh_test_data(cls, n: int = None): N = n if n else cls.N cls.TEST_DATA = [cls.get_random_entity_data() for _ in range(N)] @classmethod def get_random_entity_data(cls) -> dict: return { 'slug': f'a-cool-slug-{randint(10000, 99999)}', 'name': f'Testing Inc-{randint(100000, 999999)}', 'address_1': f'{randint(100000, 999999)} Main St', 'address_2': f'Suite {randint(1000, 9999)}', 'city': 'Charlotte', 'state': 'NC', 'zip_code': '28202', 'country': 'US', 'email': '[email protected]', 'website': 'http://www.mytestingco.com', 'fy_start_month': choice(cls.FY_STARTS), 'admin': cls.user_model, 'accrual_method': next(cls.accrual_cycle) } def get_random_entity_model(self) -> EntityModel: if self.ENTITY_MODEL_QUERYSET: return choice(self.ENTITY_MODEL_QUERYSET) raise ValueError('EntityModels have not been populated.') @classmethod def create_entity_models(cls, save=True, n: int = 5): cls.refresh_test_data(n) for ent_data in cls.TEST_DATA: entity_model = EntityModel.add_root(**ent_data) entity_model.admin = cls.user_model entity_model.clean() if save: entity_model.save() @classmethod def populate_entity_models(cls): entities_qs = EntityModel.objects.all() for entity_model in entities_qs:
UserModel = get_user_model() class DjangoLedgerBaseTest(TestCase): FY_STARTS = None CAPITAL_CONTRIBUTION = None START_DATE = None DAYS_FORWARD = 9 * 30 TX_QUANTITY = 50 user_model = None TEST_DATA = list() CLIENT = None TZ = None N = None USER_EMAIL = None PASSWORD = None USERNAME = None logger = None accrual_cycle = cycle([True, False]) @classmethod def setUpTestData(cls): cls.logger = getLogger(__name__) cls.logger.setLevel(level=DEBUG) cls.USERNAME: str = 'testuser' cls.PASSWORD: str = 'NeverUseThisPassword12345' cls.USER_EMAIL: str = '[email protected]' cls.N: int = 2 cls.DAYS_FWD: int = randint(180, 180 * 3) cls.TZ = get_default_timezone() cls.START_DATE = cls.get_random_date() cls.CLIENT = Client(enforce_csrf_checks=False) try: cls.user_model = UserModel.objects.get(username=cls.USERNAME) except ObjectDoesNotExist: cls.user_model = UserModel.objects.create_user( username=cls.USERNAME, password=cls.PASSWORD, email=cls.USER_EMAIL, ) cls.FY_STARTS = list(str(i) for i in range(1, 13)) cls.TEST_DATA = list() cls.CAPITAL_CONTRIBUTION = Decimal('50000.00') cls.ENTITY_MODEL_QUERYSET: Optional[EntityModelQuerySet] = None cls.create_entity_models(n=cls.N) cls.populate_entity_models() @classmethod def get_random_date(cls) -> date: return date( year=choice(range(1990, 2020)), month=choice(range(1, 13)), day=choice(range(1, 28)) ) @classmethod def login_client(cls): # cls.logger.info('Logging in client...') cls.CLIENT.login( username=cls.USERNAME, password=cls.PASSWORD ) @classmethod def logout_client(cls): # cls.logger.info('Logging out client...') cls.CLIENT.logout() @classmethod def refresh_test_data(cls, n: int = None): N = n if n else cls.N cls.TEST_DATA = [cls.get_random_entity_data() for _ in range(N)] @classmethod def get_random_entity_data(cls) -> dict: return { 'slug': f'a-cool-slug-{randint(10000, 99999)}', 'name': f'Testing Inc-{randint(100000, 999999)}', 'address_1': f'{randint(100000, 999999)} Main St', 'address_2': f'Suite {randint(1000, 9999)}', 'city': 'Charlotte', 'state': 'NC', 'zip_code': '28202', 'country': 'US', 'email': '[email protected]', 'website': 'http://www.mytestingco.com', 'fy_start_month': choice(cls.FY_STARTS), 'admin': cls.user_model, 'accrual_method': next(cls.accrual_cycle) } def get_random_entity_model(self) -> EntityModel: if self.ENTITY_MODEL_QUERYSET: return choice(self.ENTITY_MODEL_QUERYSET) raise ValueError('EntityModels have not been populated.') @classmethod def create_entity_models(cls, save=True, n: int = 5): cls.refresh_test_data(n) for ent_data in cls.TEST_DATA: entity_model = EntityModel.add_root(**ent_data) entity_model.admin = cls.user_model entity_model.clean() if save: entity_model.save() @classmethod def populate_entity_models(cls): entities_qs = EntityModel.objects.all() for entity_model in entities_qs:
data_generator = EntityDataGenerator(
0
2023-10-20 01:07:20+00:00
12k
hitz-zentroa/This-is-not-a-Dataset
run.py
[ { "identifier": "load_model", "path": "load_model.py", "snippet": "def load_model(\n inference: bool,\n model_weights_name_or_path: str,\n quantization: Optional[int] = None,\n use_lora: bool = False,\n lora_weights_name_or_path: Optional[str] = None,\n lora_target_modules: Optional[List[str]] = [\"all\"],\n lora_r: Optional[int] = 8,\n lora_alpha: Optional[int] = 16,\n lora_dropout: Optional[float] = 0.05,\n torch_dtype: Optional[str] = None,\n force_auto_device_map: bool = False,\n use_gradient_checkpointing: bool = False,\n trust_remote_code: bool = False,\n use_flash_attention: bool = False,\n use_better_transformer: bool = False,\n fsdp_training: bool = False,\n max_memory_MB: Optional[int] = None,\n) -> Tuple[PreTrainedModel, PreTrainedTokenizerBase]:\n \"\"\"\n Load any Decoder model for training.\n\n Args:\n inference (`bool`):\n Whether to load the model for inference or training. If set to `True`, the model will be loaded\n in evaluation mode. In this case, if use_lora is set to `True`, you must provide the path to the\n LoRA weights. Defaults to `False`.\n model_weights_name_or_path (`str`):\n The path to your local model weights and tokenizer or huggingface model name.\n The list of labels to add to the tokenizer. Defaults to `None`.\n quantization (`int`, optional):\n '4' or '8' for 4 bits or 8 bits quantization or None for 16/32bits training. Defaults to `None`.\n\n Requires bitsandbytes library: https://github.com/TimDettmers/bitsandbytes\n use_lora (`bool`, optional):\n Whether to use LORA. Defaults to False.\n\n See https://arxiv.org/pdf/2106.09685.pdf for more details.\n\n Requires huggingface PEFT library: https://github.com/huggingface/peft\n lora_weights_name_or_path (`Optional[str]`, optional):\n The name or path to the pre-trained LORA model weights. You can also provide\n a huggingface hub model name to load the weights from there. If not provided,\n the weights will be initialized randomly, this requires training the model.\n Defaults to `None`.\n lora_target_modules (`Optional[List[str]]`, optional):\n The list of modules to apply LORA to. If not provided, we will use PEFT\n default modules. Defaults to `None`.\n lora_r (`Optional[int]`, optional):\n Lora attention dimension. Defaults to `8`.\n lora_alpha (`Optional[int]`, optional):\n The alpha parameter for Lora scaling. Defaults to `16`.\n lora_dropout (`Optional[float]`, optional):\n The dropout probability for Lora layers. Defaults to 0.05.\n torch_dtype (`Optional[str]`, optional):\n Override the default `torch.dtype` and load the model under this dtype. If\n `auto` is passed, the dtype will be automatically derived from the model's\n weights. Defaults to `None`.\n force_auto_device_map (`bool`, optional):\n Whether to force the use of the auto device map. If set to True, the model will be split across\n GPUs and CPU to fit the model in memory. If set to False, a full copy of the model will be loaded\n into each GPU. Defaults to False.\n use_gradient_checkpointing (`bool`, optiona):\n Whether to use gradient checkpointing for training\n trust_remote_code (`bool`, optional):\n Trust the remote code from HuggingFace model hub. Defaults to False.\n use_flash_attention (`bool`, optional):\n Whether to use Flash Attention. Defaults to True. Flash attention must be installed, see:\n 'https://github.com/Dao-AILab/flash-attention' for more details.\n use_better_transformer (`bool`, optional):\n Whether to transform the model using Better Transformer library:\n https://huggingface.co/docs/optimum/bettertransformer/overview. Requires optimum\n 'https://huggingface.co/docs/optimum/installation'. Only supported for inference!\n Defaults to False.\n fsdp_training: (`bool`, optional):\n Whether Fully Sharded Data Parallelism is enabled for training. Defaults to False.\n Used to prevent casting layers to fp32 if the model is already in fp16, which causes\n an error: ValueError: Must flatten tensors with uniform dtype but got torch.float16 and torch.float32\n max_memory_MB (`int`):\n Free memory per gpu in MB. Used to compute the device map when force_auto_device_map is set to True.\n Raises:\n `ValueError`:\n is raised when `int8_quantization=True` but `use_lora=False`.\n\n Returns:\n `Tuple[PreTrainedModel, PreTrainedTokenizerBase]`:\n The loaded model and tokenizer.\n \"\"\"\n\n # Sanity checks\n\n if isinstance(quantization, str):\n quantization = int(quantization)\n assert (quantization is None) or (\n quantization in [4, 8]\n ), f\"Quantization must be 4 or 8, or None for FP32/FP16 training. You passed: {quantization}\"\n\n if not inference and quantization is not None and not use_lora:\n raise ValueError(\n \"'Quantization' == 4/8 is only supported with LoRA. If you want \"\n \"to train a 4/8bits quantified model, you must set `use_lora=True`. If you want to \"\n \"use a 4/8 bits optimizer, set `quantization=None` and choose a 4/8 bit optimizer using 'optim' \"\n \"argument (e.g 'adamw_bnb_8bit', 'lion_8bit', 'paged_adamw_8bit', ...).\"\n )\n\n if inference and use_lora and lora_weights_name_or_path is None:\n raise ValueError(\n \"You must provide the path to the LoRA weights when loading the model for inference.\"\n )\n\n if use_better_transformer and not inference:\n logging.warning(\n \"Better Transformer is only supported for inference. Better Transformers does not support \"\n \"attention mask for training, therefore it is not compatible with CoLLIE training. See \"\n \"https://huggingface.co/docs/optimum/bettertransformer/overview for more details. We will \"\n \"set use_better_transformer=False.\"\n )\n use_better_transformer = False\n\n if use_better_transformer and use_flash_attention:\n raise ValueError(\n \"You cannot use both Flash Attention and Better Transformer flags. Flash Attention is already part of\"\n \" Better Transformers, so you can just set use_better_transformer=True to use Flash Attention. The Flash\"\n \" Attention flag is intended for patching HuggingFace models.\"\n )\n\n if lora_weights_name_or_path is not None and not use_lora:\n logging.warning(\n \"You provided a path to LoRA weights but use_lora is set to False. We will set use_lora=True.\"\n )\n use_lora = True\n\n logging.info(f\"Loading model model from {model_weights_name_or_path}\")\n\n MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.update(\n {\n \"stablelm_epoch\": \"LlamaForCausalLM\",\n }\n )\n\n # Get the device map config\n\n device_map, max_memory = get_device_map(\n force_auto_device_map=force_auto_device_map,\n max_memory_MB=max_memory_MB,\n use_better_transformer=use_better_transformer,\n )\n\n # Load the model config\n\n if use_lora:\n config = AutoConfig.from_pretrained(\n model_weights_name_or_path,\n trust_remote_code=trust_remote_code,\n pretraining_tp=1, # Fix mat1 and mat2 shapes cannot be multiplied error with LLaMA-2\n # See https://github.com/huggingface/transformers/pull/24906\n )\n else:\n config = AutoConfig.from_pretrained(\n model_weights_name_or_path,\n trust_remote_code=trust_remote_code,\n )\n\n # Load the model tokenizer\n\n tokenizer: PreTrainedTokenizerBase = AutoTokenizer.from_pretrained(\n model_weights_name_or_path,\n add_eos_token=True,\n trust_remote_code=trust_remote_code,\n legacy=True, # This library was developed with the legacy tokenizer.\n # It might or might not work with the latest updates to the T5 tokenizers. So we set legacy=True to be safe.\n )\n\n if tokenizer.pad_token_id is None:\n if \"<|padding|>\" in tokenizer.get_vocab():\n # StabilityLM specific fix\n tokenizer.add_special_tokens({\"pad_token\": \"<|padding|>\"})\n elif tokenizer.unk_token is not None:\n logging.warning(\n \"Tokenizer does not have a pad token, we will use the unk token as pad token.\"\n )\n tokenizer.pad_token_id = tokenizer.unk_token_id\n else:\n logging.warning(\n \"Tokenizer does not have a pad token. We will use the eos token as pad token.\"\n )\n tokenizer.pad_token_id = tokenizer.eos_token_id\n\n # Load the model weights\n\n # Get the quantization config\n quant_args = {}\n torch_dtype = (\n torch_dtype if torch_dtype in [\"auto\", None] else getattr(torch, torch_dtype)\n )\n\n if quantization is not None:\n quant_args = (\n {\"load_in_4bit\": True} if quantization == 4 else {\"load_in_8bit\": True}\n )\n if quantization == 4:\n bnb_config = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type=\"nf4\",\n bnb_4bit_compute_dtype=torch.bfloat16\n if torch_dtype in [\"auto\", None]\n else torch_dtype,\n )\n\n else:\n bnb_config = BitsAndBytesConfig(\n load_in_8bit=True,\n )\n logging.info(\n f\"Bits and Bytes config: {json.dumps(bnb_config.to_dict(),indent=4,ensure_ascii=False)}\"\n )\n else:\n logging.info(f\"Loading model with dtype: {torch_dtype}\")\n bnb_config = None\n\n # Get the correct load function for each model_type\n if config.model_type in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES:\n logging.warning(\n f\"Model {model_weights_name_or_path} is a encoder-decoder model. We will load it as a Seq2SeqLM model.\"\n )\n\n load_fn = AutoModelForSeq2SeqLM\n model_type = \"seq2seq\"\n\n elif config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES:\n logging.warning(\n f\"Model {model_weights_name_or_path} is an decoder-only model. We will load it as a CausalLM model.\"\n )\n\n load_fn = AutoModelForCausalLM\n tokenizer.padding_side = \"left\"\n model_type = \"causal\"\n\n else:\n raise ValueError(\n f\"Model {model_weights_name_or_path} of type {config.model_type} is not supported by CoLLIE.\"\n \"Supported models are:\\n\"\n f\"Seq2SeqLM: {MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES}\\n\"\n f\"CausalLM: {MODEL_FOR_CAUSAL_LM_MAPPING_NAMES}\\n\"\n )\n\n # Load the model weights\n # Flash attention 2 was added to HuggingFace transformers very recently. Let's add it as kwargs to the load function\n # so if it is set to False, we can load the model in older versions of transformers.\n if use_flash_attention:\n kwargs = {\"use_flash_attention_2\": True}\n else:\n kwargs = {}\n\n model: PreTrainedModel = load_fn.from_pretrained(\n pretrained_model_name_or_path=model_weights_name_or_path,\n device_map=device_map,\n max_memory=max_memory,\n quantization_config=bnb_config,\n torch_dtype=torch_dtype,\n config=config,\n trust_remote_code=trust_remote_code,\n **quant_args,\n **kwargs,\n )\n\n logging.info(f\"Model dtype: {model.dtype}\")\n logging.info(\n \"Total model memory footprint: \"\n + str(model.get_memory_footprint() / 1e6)\n + \" MB\"\n )\n\n # Prepare the model for k-bit training and enable gradient checkpointing\n if quantization is not None and not inference:\n from peft import prepare_model_for_kbit_training\n\n model = prepare_model_for_kbit_training(\n model, use_gradient_checkpointing=use_gradient_checkpointing\n )\n else:\n if use_gradient_checkpointing and not inference:\n model.gradient_checkpointing_enable()\n\n # Load LoRA weights\n if use_lora:\n from peft import LoraConfig, PeftModel, TaskType, get_peft_model\n\n if not inference:\n model.enable_input_require_grads() # Enables the gradients for the input embeddings\n\n if lora_weights_name_or_path is None:\n logging.info(\n \"No pretrained LORA weights provided, we will initialize the weights randomly.\"\n )\n\n if lora_target_modules is None or (\n lora_target_modules is not None and len(lora_target_modules) == 0\n ):\n logging.warning(\n \"No target modules provided, will use the default modules for the\"\n \" model in huggingface PEFT library. \"\n )\n lora_target_modules = None\n\n if lora_target_modules == [\"all\"]:\n logging.warning(\n \"You provided 'all' as target modules, we will use all the model to which LoRA can be applied.\"\n )\n lora_target_modules = find_all_linear_names(\n model, quantization=quantization\n )\n\n lora_config = LoraConfig(\n r=lora_r,\n lora_alpha=lora_alpha,\n lora_dropout=lora_dropout,\n bias=\"none\",\n task_type=TaskType.CAUSAL_LM\n if model_type == \"causal\"\n else TaskType.SEQ_2_SEQ_LM,\n target_modules=lora_target_modules,\n )\n\n model = get_peft_model(model, lora_config)\n\n else:\n logging.info(\n f\"Loading pretrained LORA weights from {lora_weights_name_or_path}\"\n )\n\n model = PeftModel.from_pretrained(model, lora_weights_name_or_path)\n\n logging.info(f\"\\nLoRA config:\\n{model.peft_config}\\n\")\n\n if inference:\n if use_lora:\n if quantization is None:\n # If we are not using quantization, we merge the LoRA layers into the model for faster inference.\n # This is not possible if we are using 4/8 bit quantization.\n logging.info(\"Merging LoRA layers into the model for faster inference.\")\n model = model.merge_and_unload()\n else:\n logging.info(\n \"Quantization is enabled, we will not merge LoRA layers into the model. Inference will be slower.\"\n )\n else:\n trainable_params, total_params, trainable_percentage = get_trainable_parameters(\n model\n )\n logging.info(\n f\"---> Trainable params: {trainable_params} || all params: {total_params} ||\"\n f\" trainable%: {round(trainable_percentage,6)}\\n\"\n )\n\n return model, tokenizer" }, { "identifier": "get_dataloader", "path": "dataset.py", "snippet": "def get_dataloader(\n tokenizer: PreTrainedTokenizerBase,\n split: str,\n is_encoder_decoder: bool = False,\n max_length: int = 512,\n conv_template: str = None,\n batch_size: int = 1,\n prompt_loss_weight: float = 0.05,\n add_bos_token: bool = False,\n num_workers: int = min(8, os.cpu_count()),\n pattern: str = None,\n only_affirmative: bool = False,\n only_negative: bool = False,\n only_non_distractor: bool = False,\n only_distractor: bool = False,\n) -> DataLoader:\n \"\"\"\n Get a dataloader for a dataset.\n\n Args:\n tokenizer (`PreTrainedTokenizerBase`):\n The tokenizer to use.\n split ('list'):\n The split to load (train, dev, test, all).\n is_encoder_decoder (`bool`, optional):\n Whether the model is an encoder-decoder model. Defaults to `False`.\n max_length (`int`, optional):\n The maximum length of the input. Defaults to `2048`.\n conv_template (`str`, optional):\n The conversation template to use. Defaults to `None`. If `None` we will return the prompt.\n batch_size (`int`, optional):\n The batch size. Defaults to `1`.\n prompt_loss_weight (`float`, optional):\n The weight of the prompt tokens in the loss. If set to '0.05' the prompt tokens will have a total weight\n of 5% in the loss while the result tokens will have a total weight of 95%. Defaults to `0.05`.\n add_bos_token (`bool`, optional):\n Whether to add the beginning of sentence token to the input. Defaults to `False`.\n num_workers (`int`, optional):\n The number of workers to use for the dataloader. Defaults to `0`.\n pattern (`str`, optional):\n The pattern to use for training. Defaults to `None`.\n only_affirmative (`bool`, optional):\n Whether to only load affirmative examples for training. Defaults to `False`.\n only_negative (`bool`, optional):\n Whether to only load negative examples for training. Defaults to `False`.\n only_non_distractor (`bool`, optional):\n Whether to only load non-distractor examples for training. Defaults to `False`.\n only_distractor (`bool`, optional):\n Whether to only load distractor examples for training. Defaults to `False`.\n\n\n Returns:\n `DataLoader`: The dataloader.\n \"\"\"\n\n data_collator = DataCollatorForSeq2Seq(\n tokenizer,\n padding=True,\n label_pad_token_id=-100, # tokenizer.pad_token_id,\n # pad_to_multiple_of=8, # May be faster on some hardware\n )\n\n dataset = ThisIsNotADataset(\n tokenizer=tokenizer,\n split=split,\n is_encoder_decoder=is_encoder_decoder,\n max_length=max_length,\n conv_template=conv_template,\n prompt_loss_weight=prompt_loss_weight,\n add_bos_token=add_bos_token,\n pattern=pattern,\n only_affirmative=only_affirmative,\n only_negative=only_negative,\n only_non_distractor=only_non_distractor,\n only_distractor=only_distractor,\n )\n\n return DataLoader(\n dataset,\n batch_size=batch_size,\n num_workers=num_workers,\n shuffle=split == \"train\",\n collate_fn=data_collator,\n pin_memory=True,\n )" }, { "identifier": "evaluate", "path": "evaluate.py", "snippet": "def evaluate(predictions_path: str, output_path: Optional[str] = None) -> dict:\n \"\"\"\n Evaluate the predictions of a model\n Args:\n predictions_path: Path to the predictions file. It should be a jsonl with the fields: 'pattern_id',\n 'pattern', 'test_id', 'negation_type', 'semantic_type', 'syntactic_scope', 'isDistractor',\n 'label', 'sentence', 'prediction'\n output_path: Path to the output file. If None, the output will be printed to stdout\n Returns:\n A dictionary with the scores\n The scorer will output the following metrics:\n - **all_affirmations**: Accuracy of the model in affirmative sentences\n - **all_negations**: Accuracy of the model in negative sentences\n - **all**: (Overall) Accuracy of the model in all sentences\n - **input_affirmation**: Accuracy of the model in affirmative sentences without distractors\n - **input_negation**: Accuracy of the model in negative sentences without distractors\n - **distractor_affirmation**: Accuracy of the model in affirmative sentences with distractors\n - **distractor_negation**: Accuracy of the model in negative sentences with distractors\n - **Negation_analysis**: Fine-grained analysis of the model in negative sentences (verbal, analytic,\n clausal, non_verbal, synthetic, subclausal negation types)\n - **Synonymy1, Hypernymy, Part...**: Fine-grained analysis of the model in each pattern\n \"\"\"\n\n print(\n \"\"\"\n*************************************** Running evaluation ***************************************\nThe scorer will output the following metrics:\n - **all_affirmations**: Accuracy of the model in affirmative sentences\n - **all_negations**: Accuracy of the model in negative sentences\n - **all**: (Overall) Accuracy of the model in all sentences\n - **input_affirmation**: Accuracy of the model in affirmative sentences without distractors\n - **input_negation**: Accuracy of the model in negative sentences without distractors\n - **distractor_affirmation**: Accuracy of the model in affirmative sentences with distractors\n - **distractor_negation**: Accuracy of the model in negative sentences with distractors\n - **Negation_analysis**: Fine-grained analysis of the model in negative sentences (verbal, analytic,\n clausal, non_verbal, synthetic, subclausal negation types)\n - **Synonymy1, Hypernymy, Part...**: Fine-grained analysis of the model in each pattern\n**************************************************************************************************\n \"\"\"\n )\n dataset_pattern = {\n \"Synonymy1\": [],\n \"Antonymy1\": [],\n \"Synonymy2\": [],\n \"Antonymy2\": [],\n \"Hypernymy\": [],\n \"Part\": [],\n \"Substance\": [],\n \"Member\": [],\n \"Agent\": [],\n \"Instrument\": [],\n \"Result\": [],\n }\n\n scorer = Scorer()\n coherence_scorer = Coherence_Scorer()\n\n coherence_scorer.from_file(predictions_path)\n with open(predictions_path, \"r\", encoding=\"utf8\") as file:\n for line in file:\n example = json.loads(line.strip())\n pattern = example[\"pattern\"]\n dataset_pattern[pattern].append(example)\n scorer.add_example(\n negation_type=example[\"negation_type\"],\n semantic_type=example[\"semantic_type\"],\n syntactic_scope=example[\"syntactic_scope\"],\n isDistractor=example[\"isDistractor\"],\n gold_label=example[\"label\"],\n predicted_label=example[\"prediction\"],\n )\n\n scores = scorer.compute_scores()\n coherence_scorer = Coherence_Scorer.from_file(predictions_path)\n scores[\"coherence_scores\"] = coherence_scorer.compute_scores()\n\n for pattern in dataset_pattern:\n scorer = Scorer()\n coherence_scorer = Coherence_Scorer()\n coherence_scorer.add_pattern(dataset_pattern[pattern])\n for example in dataset_pattern[pattern]:\n scorer.add_example(\n negation_type=example[\"negation_type\"],\n semantic_type=example[\"semantic_type\"],\n syntactic_scope=example[\"syntactic_scope\"],\n isDistractor=example[\"isDistractor\"],\n gold_label=example[\"label\"],\n predicted_label=example[\"prediction\"],\n )\n scores[pattern] = scorer.compute_scores()\n scores[pattern][\"coherence_scores\"] = coherence_scorer.compute_scores()\n\n if output_path is not None:\n print(f\"Saving scores to {output_path}\")\n with open(output_path, \"w\", encoding=\"utf8\") as file:\n print(json.dumps(scores, ensure_ascii=False, indent=4), file=file)\n else:\n print(json.dumps(scores, ensure_ascii=False, indent=4))\n\n print(\"*** Evaluation finished ***\")\n return scores" }, { "identifier": "DataTrainingArguments", "path": "config.py", "snippet": "class DataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for training and eval.\n \"\"\"\n\n do_predict_full_dataset: bool = field(\n default=False,\n metadata={\n \"help\": \"Whether to run predictions on the full dataset. If True, the model will be evaluated on the \"\n \"full dataset. If False, the model will be evaluated on the test set. Defaults to False.\"\n },\n )\n max_seq_length: int = field(\n default=512,\n metadata={\n \"help\": (\n \"The maximum total input sequence length after tokenization. Sequences\"\n \" longer than this will be truncated, sequences shorter will be padded.\"\n )\n },\n )\n\n prompt_loss_weight: float = field(\n default=0.05,\n metadata={\n \"help\": (\n \"The weight of the prompt tokens in the loss. If set to '0.05' the prompt tokens will have a total\"\n \" weight of 5% in the loss while the result tokens will have a total weight of 95%. Only used for\"\n \" computing the loss in the training data. Defaults to `0.05`.\"\n )\n },\n )\n\n force_auto_device_map: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to force the use of the auto device map. If set to True, the model will be split across \"\n \"GPUs and CPU to fit the model in memory. If set to False, a full copy of the model will be loaded \"\n \"into each GPU. Defaults to False.\"\n )\n },\n )\n\n pattern: Optional[str] = field(\n default=None,\n metadata={\n \"help\": (\n \"The pattern to use for training. If not specified, all patterns will be used.\"\n ),\n \"choices\": [\n \"Synonymy1\",\n \"Antonymy1\",\n \"Synonymy2\",\n \"Antonymy2\",\n \"Hypernymy\",\n \"Part\",\n \"Substance\",\n \"Member\",\n \"Agent\",\n \"Instrument\",\n \"Result\",\n ],\n },\n )\n\n only_affirmative: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to only load affirmative examples for training. Defaults to `False`.\"\n )\n },\n )\n\n only_negative: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to only load negative examples for training. Defaults to `False`.\"\n )\n },\n )\n\n only_non_distractor: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to only load non-distractor examples for training. Defaults to `False`.\"\n )\n },\n )\n\n only_distractor: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to only load distractor examples for training. Defaults to `False`.\"\n )\n },\n )" }, { "identifier": "ModelArguments", "path": "config.py", "snippet": "class ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.\n \"\"\"\n\n model_name_or_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The local path or huggingface hub name of the model and tokenizer to use.\"\n },\n )\n\n torch_dtype: Optional[str] = field(\n default=None,\n metadata={\n \"help\": (\n \"Override the default `torch.dtype` and load the model under this\"\n \" dtype. If `auto` is passed, the dtype will be automatically derived\"\n \" from the model's weights.\"\n ),\n \"choices\": [\"auto\", \"bfloat16\", \"float16\", \"float32\"],\n },\n )\n\n use_lora: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to use LoRA. If True, the model will be trained with LoRA: https://arxiv.org/abs/2106.09685\"\n )\n },\n )\n\n quantization: Optional[int] = field(\n default=None,\n metadata={\n \"help\": (\n \"Whether to use '4' or '8' bit quantization. Requires bitsandbytes library:\"\n \" https://github.com/TimDettmers/bitsandbytes\"\n )\n },\n )\n lora_weights_name_or_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": (\n \"If the model has been trained with LoRA, \"\n \"path or huggingface hub name or local path to the pretrained weights.\"\n )\n },\n )\n\n lora_r: Optional[int] = field(\n default=8,\n metadata={\"help\": \"Lora attention dimension.\"},\n )\n\n lora_alpha: Optional[float] = field(\n default=16,\n metadata={\"help\": \"The alpha parameter for Lora scaling.\"},\n )\n lora_dropout: Optional[float] = field(\n default=0.05,\n metadata={\"help\": \"The dropout probability for Lora layers.\"},\n )\n\n lora_target_modules: Optional[List[str]] = field(\n default_factory=list,\n metadata={\n \"help\": (\n \"The target modules to which LoRA will be applied. If not specified, We\"\n \" will use the default modules for the model in huggingface PEFT library.\"\n )\n },\n )\n\n conversation_template: str = field(\n default=None,\n metadata={\n \"help\": (\n \"The config template to use to generate conversations. See \"\n \"https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py for more details\"\n )\n },\n )\n\n add_bos_token: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to add the BOS token to the beginning of the prompt (Encoder-only models). Defaults to False.\"\n )\n },\n )\n\n use_flash_attention: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to use the FlashAttention. If True, we will use FlashAttention. Be careful, not all models \"\n \"support FlashAttention. See https://github.com/huggingface/transformers/issues/26350. \"\n \"Defaults to False.\"\n )\n },\n )" }, { "identifier": "get_optimizer", "path": "optimizer.py", "snippet": "def get_optimizer(training_args: Seq2SeqTrainingArguments, model: PreTrainedModel):\n decay_parameters = get_parameter_names(model, ALL_LAYERNORM_LAYERS)\n decay_parameters = [name for name in decay_parameters if \"bias\" not in name]\n optimizer_grouped_parameters = [\n {\n \"params\": [\n p\n for n, p in model.named_parameters()\n if (n in decay_parameters and p.requires_grad)\n ],\n \"weight_decay\": training_args.weight_decay,\n },\n {\n \"params\": [\n p\n for n, p in model.named_parameters()\n if (n not in decay_parameters and p.requires_grad)\n ],\n \"weight_decay\": 0.0,\n },\n ]\n\n optimizer_kwargs = {\"lr\": training_args.learning_rate}\n\n adam_kwargs = {\n \"betas\": (training_args.adam_beta1, training_args.adam_beta2),\n \"eps\": training_args.adam_epsilon,\n }\n if training_args.optim == OptimizerNames.ADAFACTOR:\n from transformers.optimization import Adafactor\n\n optimizer_cls = Adafactor\n optimizer_kwargs.update({\"scale_parameter\": False, \"relative_step\": False})\n elif training_args.optim == OptimizerNames.ADAMW_HF:\n from transformers.optimization import AdamW\n\n optimizer_cls = AdamW\n optimizer_kwargs.update(adam_kwargs)\n elif training_args.optim in [\n OptimizerNames.ADAMW_TORCH,\n OptimizerNames.ADAMW_TORCH_FUSED,\n ]:\n from torch.optim import AdamW\n\n optimizer_cls = AdamW\n optimizer_kwargs.update(adam_kwargs)\n if training_args.optim == OptimizerNames.ADAMW_TORCH_FUSED:\n optimizer_kwargs.update({\"fused\": True})\n elif training_args.optim == OptimizerNames.ADAMW_TORCH_XLA:\n try:\n from torch_xla.amp.syncfree import AdamW\n\n optimizer_cls = AdamW\n optimizer_kwargs.update(adam_kwargs)\n except ImportError:\n raise ValueError(\"Trainer failed to import syncfree AdamW from torch_xla.\")\n elif training_args.optim == OptimizerNames.ADAMW_APEX_FUSED:\n try:\n from apex.optimizers import FusedAdam\n\n optimizer_cls = FusedAdam\n optimizer_kwargs.update(adam_kwargs)\n except ImportError:\n raise ValueError(\n \"Trainer tried to instantiate apex FusedAdam but apex is not installed!\"\n )\n elif training_args.optim in [\n OptimizerNames.ADAMW_BNB,\n OptimizerNames.ADAMW_8BIT,\n OptimizerNames.PAGED_ADAMW,\n OptimizerNames.PAGED_ADAMW_8BIT,\n OptimizerNames.LION,\n OptimizerNames.LION_8BIT,\n OptimizerNames.PAGED_LION,\n OptimizerNames.PAGED_LION_8BIT,\n ]:\n try:\n from bitsandbytes.optim import AdamW, Lion\n\n is_paged = False\n optim_bits = 32\n optimizer_cls = None\n additional_optim_kwargs = adam_kwargs\n if \"paged\" in training_args.optim:\n is_paged = True\n if \"8bit\" in training_args.optim:\n optim_bits = 8\n if \"adam\" in training_args.optim:\n optimizer_cls = AdamW\n elif \"lion\" in training_args.optim:\n optimizer_cls = Lion\n additional_optim_kwargs = {\n \"betas\": (training_args.adam_beta1, training_args.adam_beta2)\n }\n\n bnb_kwargs = {\"is_paged\": is_paged, \"optim_bits\": optim_bits}\n optimizer_kwargs.update(additional_optim_kwargs)\n optimizer_kwargs.update(bnb_kwargs)\n except ImportError:\n raise ValueError(\n \"Trainer tried to instantiate bnb optimizer but bnb is not installed!\"\n )\n elif training_args.optim == OptimizerNames.ADAMW_BNB:\n try:\n from bitsandbytes.optim import Adam8bit\n\n optimizer_cls = Adam8bit\n optimizer_kwargs.update(adam_kwargs)\n except ImportError:\n raise ValueError(\n \"Trainer tried to instantiate bnb Adam8bit but bnb is not installed!\"\n )\n elif training_args.optim == OptimizerNames.ADAMW_ANYPRECISION:\n raise NotImplementedError(\"AdamWAnyprecision is not supported\")\n elif training_args.optim == OptimizerNames.SGD:\n optimizer_cls = torch.optim.SGD\n elif training_args.optim == OptimizerNames.ADAGRAD:\n optimizer_cls = torch.optim.Adagrad\n else:\n raise ValueError(\n f\"Trainer cannot instantiate unsupported optimizer: {training_args.optim}\"\n )\n\n optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)\n if optimizer_cls.__name__ == \"Adam8bit\":\n import bitsandbytes\n\n manager = bitsandbytes.optim.GlobalOptimManager.get_instance()\n\n skipped = 0\n for module in model.modules():\n if isinstance(module, nn.Embedding):\n skipped += sum(\n {p.data_ptr(): p.numel() for p in module.parameters()}.values()\n )\n print(f\"skipped {module}: {skipped / 2 ** 20}M params\")\n manager.register_module_override(module, \"weight\", {\"optim_bits\": 32})\n print(f\"bitsandbytes: will optimize {module} in fp32\")\n print(f\"skipped: {skipped / 2 ** 20}M params\")\n\n return optimizer" } ]
from load_model import load_model from dataset import get_dataloader from evaluate import evaluate from config import DataTrainingArguments, ModelArguments from transformers import ( HfArgumentParser, Seq2SeqTrainingArguments, set_seed, get_scheduler, ) from tqdm import tqdm from accelerate import Accelerator, find_executable_batch_size from typing import List from optimizer import get_optimizer from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from transformers.modeling_utils import unwrap_model import torch import os import wandb import gc import json import math import sys import logging
10,589
**gen_inputs, ).logits logits = logits[:, -1, :] logits = torch.nn.functional.softmax(logits, dim=-1) logits = logits[:, [yes_id, no_id]] logits = logits[:, 0] / (logits[:, 0] + logits[:, 1]) preds = logits > 0.5 preds = accelerator.gather(preds).cpu().tolist() logits = accelerator.gather(logits).cpu().tolist() if accelerator.is_local_main_process: if accelerator.num_processes > 1: # Remove duplicated in last batch if we are in a distributed setting if step == len(dataloader) - 1: preds = preds[: (len(dataloader.dataset) - samples_seen)] logits = logits[: (len(dataloader.dataset) - samples_seen)] else: samples_seen += len(batch) all_preds.extend(preds) all_scores.extend(logits) else: preds = model.generate( input_ids=batch["input_ids"], attention_mask=batch["attention_mask"], max_new_tokens=6, ) preds = accelerator.gather( accelerator.pad_across_processes( preds, dim=1, pad_index=tokenizer.pad_token_id, ) ).cpu() inputs_ids = accelerator.gather( accelerator.pad_across_processes( batch["input_ids"], dim=1, pad_index=tokenizer.pad_token_id, ) ).cpu() preds = preds[:, len(inputs_ids[0]) :] if accelerator.is_local_main_process: if accelerator.num_processes > 1: # Remove duplicated in last batch if we are in a distributed setting if step == len(dataloader) - 1: preds = preds[: (len(dataloader.dataset) - samples_seen)] else: samples_seen += len(batch) preds = tokenizer.batch_decode(preds, skip_special_tokens=True) # print(preds) for pred in preds: pred = pred.lower() if "true" in pred: all_preds.append(True) else: all_preds.append(False) if accelerator.is_local_main_process: with open(output_path, "w", encoding="utf8") as f: for pred in all_preds if not return_scores else all_scores: print(pred, file=f) if not return_scores: json_dataset = dataloader.dataset.get_jsonl() assert len(json_dataset) == len(all_preds) with open( os.path.splitext(output_path)[0] + ".jsonl", "w", encoding="utf8" ) as f: for json_line, pred in zip(json_dataset, all_preds): json_line["prediction"] = bool(pred) print(json.dumps(json_line, ensure_ascii=False), file=f) model.train() def main( model_args: ModelArguments, data_args: DataTrainingArguments, training_args: Seq2SeqTrainingArguments, ): assert ( training_args.do_train or training_args.do_predict ), "You must specify do_train or do_predict" assert not (training_args.do_train and data_args.do_predict_full_dataset), ( "You cannot do both training and predict_full_dataset, " "as the model will be evaluated on the full dataset, which" " includes the training set." ) logging.basicConfig(level=logging.INFO) accelerator = Accelerator() print(f"Accelerator State: {accelerator.state}") set_seed(training_args.seed) if training_args.do_train: model, tokenizer = load_model( inference=False, model_weights_name_or_path=model_args.model_name_or_path, lora_weights_name_or_path=model_args.lora_weights_name_or_path, quantization=model_args.quantization, use_lora=model_args.use_lora, lora_target_modules=model_args.lora_target_modules, torch_dtype=model_args.torch_dtype, force_auto_device_map=data_args.force_auto_device_map, use_flash_attention=model_args.use_flash_attention, use_gradient_checkpointing=model_args.use_lora, ) true_tokens_ids = tokenizer.encode("True", add_special_tokens=False) false_tokens_ids = tokenizer.encode("False", add_special_tokens=False)
def clean_cache(): """Clean cache to avoid memory leak. This fixes this issue: https://github.com/huggingface/transformers/issues/22801""" print(f"Cleaning GPU memory. Current memory usage: {torch.cuda.memory_allocated()}") torch.cuda.empty_cache() gc.collect() torch.cuda.empty_cache() print(f"GPU memory usage after cleaning: {torch.cuda.memory_allocated()}") def compute_loss(model, inputs, return_outputs=False): """ How the loss is computed by Trainer. By default, all models return the loss in the first element. Subclass and override for custom behavior. """ if "labels" in inputs: labels = inputs.pop("labels") else: raise ValueError("You should supply a labels key to compute the loss") if "loss_weight_mask" in inputs: loss_weight_mask = inputs.pop("loss_weight_mask") else: raise ValueError("You should supply a loss_weight_mask key to compute the loss") if unwrap_model(model).config.is_encoder_decoder: outputs = model(labels=labels, **inputs) else: outputs = model(**inputs) logits = outputs["logits"] if isinstance(outputs, dict) else outputs[0] model_name = unwrap_model(model)._get_name() if ( model_name in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values() or model_name == "PeftModelForCausalLM" ): logits = logits[..., :-1, :].contiguous() labels = labels[..., 1:].contiguous() loss_weight_mask = loss_weight_mask[..., 1:].contiguous() logits = logits.view(-1, logits.size(-1)) labels = labels.view(-1) loss_weight_mask = loss_weight_mask.view(-1) loss_fct = torch.nn.CrossEntropyLoss(reduction="none", ignore_index=-100) loss = loss_fct(logits, labels) loss = torch.sum(loss * loss_weight_mask) / torch.sum(loss_weight_mask) return (loss, outputs) if return_outputs else loss def gen_predictions( model, tokenizer, true_tokens_ids: List[int], false_tokens_ids: List[int], dataloader, output_path, accelerator, print_first=False, predict_with_generate=False, return_scores=False, ): if predict_with_generate and return_scores: raise ValueError( "return_scores is not supported when predict_with_generate is True" ) model.eval() with torch.no_grad(): samples_seen: int = 0 yes_id = true_tokens_ids[0] no_id = false_tokens_ids[0] all_preds = [] all_scores = [] first = True for step, batch in enumerate( tqdm(dataloader, f"Inference on {os.path.basename(output_path)}") ): if print_first and accelerator.is_local_main_process: ### DEBUG ### if print_first and first and accelerator.is_main_process: decodeable_inputs = batch.input_ids.clone() decodeable_inputs[ decodeable_inputs == -100 ] = tokenizer.pad_token_id model_inputs = "\n".join( tokenizer.batch_decode( decodeable_inputs, skip_special_tokens=False, clean_up_tokenization_spaces=False, ) ) print(f"*** Sample of batch 0 ***") print(f"-- Model inputs --\n{model_inputs}") print(f"*** End of sample ***\n") first = False if not predict_with_generate: if not model.config.is_encoder_decoder: logits = model( input_ids=batch["input_ids"], attention_mask=batch["attention_mask"], ).logits else: encoder_output = model.get_encoder()( input_ids=batch["input_ids"], attention_mask=batch["attention_mask"], ) decoder_args = { "attention_mask": batch["attention_mask"], "use_cache": False, "encoder_outputs": encoder_output, } gen_inputs = model.prepare_inputs_for_generation( input_ids=torch.tensor( [[tokenizer.pad_token_id]] * len(batch["input_ids"]) ).to(batch["input_ids"].device), **decoder_args, ) logits = model( **gen_inputs, ).logits logits = logits[:, -1, :] logits = torch.nn.functional.softmax(logits, dim=-1) logits = logits[:, [yes_id, no_id]] logits = logits[:, 0] / (logits[:, 0] + logits[:, 1]) preds = logits > 0.5 preds = accelerator.gather(preds).cpu().tolist() logits = accelerator.gather(logits).cpu().tolist() if accelerator.is_local_main_process: if accelerator.num_processes > 1: # Remove duplicated in last batch if we are in a distributed setting if step == len(dataloader) - 1: preds = preds[: (len(dataloader.dataset) - samples_seen)] logits = logits[: (len(dataloader.dataset) - samples_seen)] else: samples_seen += len(batch) all_preds.extend(preds) all_scores.extend(logits) else: preds = model.generate( input_ids=batch["input_ids"], attention_mask=batch["attention_mask"], max_new_tokens=6, ) preds = accelerator.gather( accelerator.pad_across_processes( preds, dim=1, pad_index=tokenizer.pad_token_id, ) ).cpu() inputs_ids = accelerator.gather( accelerator.pad_across_processes( batch["input_ids"], dim=1, pad_index=tokenizer.pad_token_id, ) ).cpu() preds = preds[:, len(inputs_ids[0]) :] if accelerator.is_local_main_process: if accelerator.num_processes > 1: # Remove duplicated in last batch if we are in a distributed setting if step == len(dataloader) - 1: preds = preds[: (len(dataloader.dataset) - samples_seen)] else: samples_seen += len(batch) preds = tokenizer.batch_decode(preds, skip_special_tokens=True) # print(preds) for pred in preds: pred = pred.lower() if "true" in pred: all_preds.append(True) else: all_preds.append(False) if accelerator.is_local_main_process: with open(output_path, "w", encoding="utf8") as f: for pred in all_preds if not return_scores else all_scores: print(pred, file=f) if not return_scores: json_dataset = dataloader.dataset.get_jsonl() assert len(json_dataset) == len(all_preds) with open( os.path.splitext(output_path)[0] + ".jsonl", "w", encoding="utf8" ) as f: for json_line, pred in zip(json_dataset, all_preds): json_line["prediction"] = bool(pred) print(json.dumps(json_line, ensure_ascii=False), file=f) model.train() def main( model_args: ModelArguments, data_args: DataTrainingArguments, training_args: Seq2SeqTrainingArguments, ): assert ( training_args.do_train or training_args.do_predict ), "You must specify do_train or do_predict" assert not (training_args.do_train and data_args.do_predict_full_dataset), ( "You cannot do both training and predict_full_dataset, " "as the model will be evaluated on the full dataset, which" " includes the training set." ) logging.basicConfig(level=logging.INFO) accelerator = Accelerator() print(f"Accelerator State: {accelerator.state}") set_seed(training_args.seed) if training_args.do_train: model, tokenizer = load_model( inference=False, model_weights_name_or_path=model_args.model_name_or_path, lora_weights_name_or_path=model_args.lora_weights_name_or_path, quantization=model_args.quantization, use_lora=model_args.use_lora, lora_target_modules=model_args.lora_target_modules, torch_dtype=model_args.torch_dtype, force_auto_device_map=data_args.force_auto_device_map, use_flash_attention=model_args.use_flash_attention, use_gradient_checkpointing=model_args.use_lora, ) true_tokens_ids = tokenizer.encode("True", add_special_tokens=False) false_tokens_ids = tokenizer.encode("False", add_special_tokens=False)
train_dataloader = get_dataloader(
1
2023-10-18 10:24:48+00:00
12k
SKYeve/Transcript-Combiner
pull_notes.py
[ { "identifier": "YoudaoNoteConvert", "path": "convert.py", "snippet": "class YoudaoNoteConvert(object):\n \"\"\"\n 有道云笔记 xml或者json 内容转换为 markdown 内容\n \"\"\"\n\n @staticmethod\n def covert_html_to_markdown(file_path) -> str:\n \"\"\"\n 转换 HTML 为 MarkDown\n :param file_path:\n :return:\n \"\"\"\n with open(file_path, 'rb') as f:\n content_str = f.read().decode('utf-8')\n from markdownify import markdownify as md\n # 如果换行符丢失,使用 md(content_str.replace('<br>', '<br><br>').replace('</div>', '</div><br><br>')).rstrip()\n new_content = md(content_str)\n base = os.path.splitext(file_path)[0]\n new_file_path = ''.join([base, MARKDOWN_SUFFIX])\n os.rename(file_path, new_file_path)\n with open(new_file_path, 'wb') as f:\n f.write(new_content.encode())\n return new_file_path\n\n @staticmethod\n def covert_xml_to_markdown_content(file_path):\n # 使用 xml.etree.ElementTree 将 xml 文件转换为对象\n element_tree = ET.parse(file_path)\n note_element = element_tree.getroot() # note Element\n\n # list_item 的 id 与 type 的对应\n list_item = {}\n for child in note_element[0]:\n if 'list' in child.tag:\n list_item[child.attrib['id']] = child.attrib['type']\n\n body_element = note_element[1] # Element\n new_content_list = []\n for element in list(body_element):\n text = XmlElementConvert.get_text_by_key(list(element))\n name = element.tag.replace('{http://note.youdao.com}', '').replace('-', '_')\n convert_func = getattr(XmlElementConvert, 'convert_{}_func'.format(name), None)\n # 如果没有转换,只保留文字\n if not convert_func:\n new_content_list.append(text)\n continue\n line_content = convert_func(text=text, element=element, list_item=list_item)\n new_content_list.append(line_content)\n return f'\\r\\n\\r\\n'.join(new_content_list) # 换行 1 行\n\n @staticmethod\n def covert_xml_to_markdown(file_path) -> str:\n \"\"\"\n 转换 XML 为 MarkDown\n :param file_path:\n :return:\n \"\"\"\n base = os.path.splitext(file_path)[0]\n new_file_path = ''.join([base, MARKDOWN_SUFFIX])\n # 如果文件为空,结束\n if os.path.getsize(file_path) == 0:\n os.rename(file_path, new_file_path)\n return False\n\n new_content = YoudaoNoteConvert.covert_xml_to_markdown_content(file_path)\n with open(new_file_path, 'wb') as f:\n f.write(new_content.encode('utf-8'))\n # 删除旧文件\n if os.path.exists(file_path):\n os.remove(file_path)\n return new_file_path\n \n \n @staticmethod\n def covert_json_to_markdown_content(file_path):\n new_content_list = []\n # 加载json文件\n with open(file_path, 'r', encoding='utf-8') as f:\n json_data = json.load(f)\n \n json_contents = json_data['5']\n for content in json_contents:\n type = content.get('6')\n # 根据类型处理,无类型的为普通文本\n if type:\n convert_func = getattr(jsonConvert(), 'convert_{}_func'.format(type), None)\n # 如果没有转换,显示错误\n if not convert_func:\n # line_content = f\"该类型{type},不支持转换!!!\"\n line_content = jsonConvert().convert_text_func(content)\n else:\n line_content = convert_func(content)\n else:\n line_content = jsonConvert().convert_text_func(content)\n \n # 判断是否有内容\n if line_content:\n new_content_list.append(line_content)\n return f'\\r\\n\\r\\n'.join(new_content_list) # 换行 1 行\n\n @staticmethod\n def covert_json_to_markdown(file_path) -> str:\n \"\"\"\n 转换 Json 为 MarkDown\n :param file_path:\n :return:\n \"\"\"\n base = os.path.splitext(file_path)[0]\n new_file_path = ''.join([base, MARKDOWN_SUFFIX])\n # 如果文件为空,结束\n if os.path.getsize(file_path) == 0:\n os.rename(file_path, new_file_path)\n return False\n new_content = YoudaoNoteConvert.covert_json_to_markdown_content(file_path)\n with open(new_file_path, 'wb') as f:\n f.write(new_content.encode('utf-8'))\n # 删除旧文件\n if os.path.exists(file_path):\n os.remove(file_path)\n return new_file_path\n \n \n @staticmethod\n def markdown_filter(file_path):\n filter_list = ['&#x20;']\n with open(file_path, 'r', encoding='utf-8') as f:\n content = f.read()\n \n for filter_text in filter_list:\n new_content = content.replace(filter_text,'')\n \n with open(file_path, 'wb') as f:\n f.write(new_content.encode('utf-8')) " }, { "identifier": "YoudaoNoteApi", "path": "youDaoNoteApi.py", "snippet": "class YoudaoNoteApi(object):\r\n \"\"\"\r\n 有道云笔记 API 封装\r\n 原理:https://depp.wang/2020/06/11/how-to-find-the-api-of-a-website-eg-note-youdao-com/\r\n \"\"\"\r\n\r\n ROOT_ID_URL = 'https://note.youdao.com/yws/api/personal/file?method=getByPath&keyfrom=web&cstk={cstk}'\r\n DIR_MES_URL = 'https://note.youdao.com/yws/api/personal/file/{dir_id}?all=true&f=true&len=1000&sort=1' \\\r\n '&isReverse=false&method=listPageByParentId&keyfrom=web&cstk={cstk}'\r\n FILE_URL = 'https://note.youdao.com/yws/api/personal/sync?method=download&_system=macos&_systemVersion=&' \\\r\n '_screenWidth=1280&_screenHeight=800&_appName=ynote&_appuser=0123456789abcdeffedcba9876543210&' \\\r\n '_vendor=official-website&_launch=16&_firstTime=&_deviceId=0123456789abcdef&_platform=web&' \\\r\n '_cityCode=110000&_cityName=&sev=j1&keyfrom=web&cstk={cstk}'\r\n\r\n def __init__(self, cookies_path=None):\r\n \"\"\"\r\n 初始化\r\n :param cookies_path:\r\n \"\"\"\r\n self.session = requests.session() # 使用 session 维持有道云笔记的登陆状态\r\n self.session.headers = {\r\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) '\r\n 'Chrome/100.0.4896.88 Safari/537.36',\r\n 'Accept': '*/*',\r\n 'Accept-Encoding': 'gzip, deflate',\r\n 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',\r\n 'sec-ch-ua': '\" Not A;Brand\";v=\"99\", \"Chromium\";v=\"100\", \"Google Chrome\";v=\"100\"',\r\n 'sec-ch-ua-mobile': '?0',\r\n 'sec-ch-ua-platform': '\"macOS\"',\r\n }\r\n\r\n self.cookies_path = cookies_path if cookies_path else 'cookies.json'\r\n self.cstk = None\r\n\r\n def login_by_cookies(self) -> str:\r\n \"\"\"\r\n 使用 Cookies 登录,其实就是设置 Session 的 Cookies\r\n :return: error_msg\r\n \"\"\"\r\n try:\r\n cookies = self._covert_cookies()\r\n except Exception as err:\r\n return format(err)\r\n for cookie in cookies:\r\n self.session.cookies.set(name=cookie[0], value=cookie[1], domain=cookie[2], path=cookie[3])\r\n self.cstk = cookies[0][1] if cookies[0][0] == 'YNOTE_CSTK' else None # cstk 用于请求时接口验证\r\n if not self.cstk:\r\n return 'YNOTE_CSTK 字段为空'\r\n print('本次使用 Cookies 登录')\r\n\r\n def _covert_cookies(self) -> list:\r\n \"\"\"\r\n 读取 cookies 文件的 cookies,并转换为字典\r\n :return: cookies\r\n \"\"\"\r\n with open(self.cookies_path, 'rb') as f:\r\n json_str = f.read().decode('utf-8')\r\n\r\n try:\r\n cookies_dict = json.loads(json_str) # 将字符串转换为字典\r\n cookies = cookies_dict['cookies']\r\n except Exception:\r\n raise Exception('转换「{}」为字典时出现错误'.format(self.cookies_path))\r\n return cookies\r\n\r\n def http_post(self, url, data=None, files=None):\r\n \"\"\"\r\n 封装 post 请求\r\n :param url:\r\n :param data:\r\n :param files:\r\n :return: response\r\n \"\"\"\r\n return self.session.post(url, data=data, files=files)\r\n\r\n def http_get(self, url):\r\n \"\"\"\r\n 封装 get 请求\r\n :param url:\r\n :return: response\r\n \"\"\"\r\n return self.session.get(url)\r\n\r\n def get_root_dir_info_id(self) -> dict:\r\n \"\"\"\r\n 获取有道云笔记根目录信息\r\n :return: {\r\n 'fileEntry': {'id': 'test_root_id', 'name': 'ROOT', ...},\r\n ...\r\n }\r\n \"\"\"\r\n data = {'path': '/', 'entire': 'true', 'purge': 'false', 'cstk': self.cstk}\r\n return self.http_post(self.ROOT_ID_URL.format(cstk=self.cstk), data=data).json()\r\n\r\n def get_dir_info_by_id(self, dir_id) -> dict:\r\n \"\"\"\r\n 根据目录 ID 获取目录下所有文件信息\r\n :return: {\r\n 'count': 3,\r\n 'entries': [\r\n {'fileEntry': {'id': 'test_dir_id', 'name': 'test_dir', 'dir': true, ...}},\r\n {'fileEntry': {'id': 'test_note_id', 'name': 'test_note', 'dir': false, ...}}\r\n ...\r\n ]\r\n }\r\n \"\"\"\r\n url = self.DIR_MES_URL.format(dir_id=dir_id, cstk=self.cstk)\r\n return self.http_get(url).json()\r\n\r\n def get_file_by_id(self, file_id):\r\n \"\"\"\r\n 根据文件 ID 获取文件内容\r\n :param file_id:\r\n :return: response,内容为笔记字节码\r\n \"\"\"\r\n data = {'fileId': file_id, 'version': -1, 'convert': 'true', 'editorType': 1, 'cstk': self.cstk}\r\n url = self.FILE_URL.format(cstk=self.cstk)\r\n return self.http_post(url, data=data)\r\n \r\n def checkin(self):\r\n \"\"\" 签到领空间\r\n return: {\r\n \"multiple\": 1,\r\n \"originSpace\": 2097152,\r\n \"total\": 424673280,\r\n \"time\": 1692543594831,\r\n \"success\": 1,\r\n \"space\": 2097152\r\n } \r\n \"\"\"\r\n checkin_url = 'https://note.youdao.com/yws/mapi/user?method=checkin'\r\n return self.http_post(checkin_url,data={})\r\n \r\n def note_rename(self,note_name,file_id):\r\n url = f'https://note.youdao.com/yws/api/personal/sync?method=push&name={note_name}fileId={file_id}&domain=0&rootVersion=-1&sessionId=&modifyTime=1692786849&transactionId={file_id}&transactionTime=1692786849&editorVersion=1692267502000&tags=&_system=windows&_systemVersion=&_screenWidth=1920&_screenHeight=1080&_appName=ynote&_appuser=019623eb3bfaff1f5ddc278090f8420d&_vendor=official-website&_launch=22279&_firstTime=2023/08/19 11:24:10&_deviceId=8cf8855c4105f937&_platform=web&_cityCode=440300&_cityName=深圳&sev=j1&sec=v1&keyfrom=web&cstk={self.cstk}'" }, { "identifier": "PullImages", "path": "pull_images.py", "snippet": "class PullImages():\r\n def __init__(self, youdaonote_api=None, smms_secret_token: str=None, is_relative_path: bool=None):\r\n self.youdaonote_api = youdaonote_api\r\n self.smms_secret_token = smms_secret_token\r\n self.is_relative_path = is_relative_path # 是否使用相对路径\r\n if not self.smms_secret_token and not self.is_relative_path:\r\n self.load_config()\r\n if not self.youdaonote_api:\r\n self.login()\r\n \r\n def migration_ydnote_url(self, file_path):\r\n \"\"\"\r\n 迁移有道云笔记文件 URL\r\n :param file_path:\r\n :return:\r\n \"\"\"\r\n with open(file_path, 'rb') as f:\r\n content = f.read().decode('utf-8')\r\n\r\n # 图片\r\n image_urls = REGEX_IMAGE_URL.findall(content)\r\n if len(image_urls) > 0:\r\n print('正在转换有道云笔记「{}」中的有道云图片链接...'.format(file_path))\r\n for index,image_url in enumerate(image_urls):\r\n image_path = self._get_new_image_path(file_path, image_url,index)\r\n if image_url == image_path:\r\n continue\r\n #将绝对路径替换为相对路径,实现满足 Obsidian 格式要求\r\n #将 image_path 路径中 images 之前的路径去掉,只保留以 images 开头的之后的路径\r\n if self.is_relative_path:\r\n image_path = image_path[image_path.find(IMAGES):]\r\n \r\n image_path = self.url_encode(image_path)\r\n content = content.replace(image_url, image_path)\r\n\r\n # 附件\r\n attach_name_and_url_list = REGEX_ATTACH.findall(content)\r\n if len(attach_name_and_url_list) > 0:\r\n print('正在转换有道云笔记「{}」中的有道云附件链接...'.format(file_path))\r\n for attach_name_and_url in attach_name_and_url_list:\r\n attach_url = attach_name_and_url[1]\r\n attach_path = self._download_attach_url(file_path, attach_url, attach_name_and_url[0])\r\n if not attach_path:\r\n continue\r\n # 将 attach_path 路径中 attachments 之前的路径去掉,只保留以 attachments 开头的之后的路径\r\n if self.is_relative_path:\r\n attach_path = attach_path[attach_path.find(ATTACH):]\r\n content = content.replace(attach_url, attach_path)\r\n\r\n with open(file_path, 'wb') as f:\r\n f.write(content.encode())\r\n return\r\n\r\n def _get_new_image_path(self, file_path, image_url,index) -> str:\r\n \"\"\"\r\n 将图片链接转换为新的链接\r\n :param file_path:\r\n :param image_url:\r\n :return: new_image_path\r\n \"\"\"\r\n # 当 smms_secret_token 为空(不上传到 SM.MS),下载到图片到本地\r\n if not self.smms_secret_token:\r\n image_path = self._download_image_url(file_path, image_url,index)\r\n return image_path or image_url\r\n\r\n # smms_secret_token 不为空,上传到 SM.MS\r\n new_file_url, error_msg = ImageUpload.upload_to_smms(youdaonote_api=self.youdaonote_api, image_url=image_url,\r\n smms_secret_token=self.smms_secret_token)\r\n # 如果上传失败,仍下载到本地\r\n if not error_msg:\r\n return new_file_url\r\n print(error_msg)\r\n image_path = self._download_image_url(file_path, image_url,index)\r\n return image_path or image_url\r\n \r\n def _download_image_url(self, file_path, url,index) -> str:\r\n \"\"\"\r\n 下载文件到本地,返回本地路径\r\n :param file_path:\r\n :param url:\r\n :param attach_name:\r\n :return: path\r\n \"\"\"\r\n try:\r\n response = self.youdaonote_api.http_get(url)\r\n except requests.exceptions.ProxyError as err:\r\n error_msg = '网络错误,「{}」下载失败。错误提示:{}'.format(url, format(err))\r\n print(error_msg)\r\n return ''\r\n\r\n content_type = response.headers.get('Content-Type')\r\n file_type = '图片'\r\n if response.status_code != 200 or not content_type:\r\n error_msg = '下载「{}」失败!{}可能已失效,可浏览器登录有道云笔记后,查看{}是否能正常加载'.format(url, file_type,\r\n file_type)\r\n print(error_msg)\r\n return ''\r\n\r\n # 默认下载图片到 images 文件夹\r\n file_dirname = IMAGES\r\n # 后缀 png 和 jpeg 后可能出现 ; `**.png;`, 原因未知\r\n content_type_arr = content_type.split('/')\r\n file_suffix = '.' + content_type_arr[1].replace(';', '') if len(content_type_arr) == 2 else \"jpg\"\r\n local_file_dir = os.path.join(os.path.dirname(file_path),file_dirname)\r\n\r\n if not os.path.exists(local_file_dir):\r\n os.mkdir(local_file_dir)\r\n \r\n file_name = os.path.basename(os.path.splitext(file_path)[0])\r\n file_name = self._optimize_file_name(file_name)\r\n #请求后的真实的URL中才有东西\r\n realUrl = parse.parse_qs(urlparse(response.url).query)\r\n real_filename = realUrl.get('filename')\r\n if real_filename:\r\n # dict 不为空时,去获取真实文件名称\r\n read_file_name = real_filename[0]\r\n file_suffix = '.' + read_file_name.split('.')[-1]\r\n file_name = os.path.basename(os.path.splitext(file_path)[0]) + '_image_' + str(index) + file_suffix \r\n else:\r\n file_name = os.path.basename(os.path.splitext(file_path)[0]) + '_image_' + str(index) + file_suffix\r\n \r\n local_file_path = os.path.join(local_file_dir, file_name)\r\n # 使md附件或者图片的路径分隔符为\"/\"\r\n local_file_path = local_file_path.replace('\\\\', '/')\r\n \r\n try:\r\n with open(local_file_path, 'wb') as f:\r\n f.write(response.content) # response.content 本身就为字节类型\r\n print('已将{}「{}」转换为「{}」'.format(file_type, url, local_file_path))\r\n except:\r\n error_msg = '{} {}有误!'.format(url, file_type)\r\n print(error_msg)\r\n return ''\r\n \r\n return local_file_path\r\n \r\n \r\n\r\n def _download_attach_url(self, file_path, url,attach_name=None) -> str:\r\n \"\"\"\r\n 下载文件到本地,返回本地路径\r\n :param file_path:\r\n :param url:\r\n :param attach_name:\r\n :return: path\r\n \"\"\"\r\n try:\r\n response = self.youdaonote_api.http_get(url)\r\n except requests.exceptions.ProxyError as err:\r\n error_msg = '网络错误,「{}」下载失败。错误提示:{}'.format(url, format(err))\r\n print(error_msg)\r\n return ''\r\n\r\n content_type = response.headers.get('Content-Type')\r\n file_type = '附件'\r\n if response.status_code != 200 or not content_type:\r\n error_msg = '下载「{}」失败!{}可能已失效,可浏览器登录有道云笔记后,查看{}是否能正常加载'.format(url, file_type,file_type)\r\n print(error_msg)\r\n return ''\r\n\r\n file_dirname = ATTACH\r\n attach_name = self._optimize_file_name(attach_name)\r\n file_suffix = attach_name\r\n local_file_dir = os.path.join(os.path.dirname(file_path),file_dirname)\r\n\r\n if not os.path.exists(local_file_dir):\r\n os.mkdir(local_file_dir)\r\n\r\n local_file_path: str = os.path.join(local_file_dir,file_suffix)\r\n # 使md附件或者图片的路径分隔符为\"/\"\r\n local_file_path = local_file_path.replace('\\\\', '/')\r\n \r\n try:\r\n with open(local_file_path, 'wb') as f:\r\n f.write(response.content) # response.content 本身就为字节类型\r\n print('已将{}「{}」转换为「{}」'.format(file_type, url, local_file_path))\r\n except:\r\n error_msg = '{} {}有误!'.format(url, file_type)\r\n print(error_msg)\r\n return ''\r\n\r\n return local_file_path\r\n \r\n def _optimize_file_name(self, name) -> str:\r\n \"\"\"\r\n 优化文件名,替换下划线\r\n :param name:\r\n :return:\r\n \"\"\"\r\n # 去除换行符,首尾的空格,文件名有空格识别不出图片\r\n name = name.strip()\r\n regex_symbol = re.compile(r'[\\\\/:\\*\\?\"<>\\|、]') # 符号:\\ / : * ? \" < > | ( )\r\n name = regex_symbol.sub('_', name)\r\n return name\r\n\r\n \r\n def login(self):\r\n self.youdaonote_api = YoudaoNoteApi()\r\n error_msg = self.youdaonote_api.login_by_cookies()\r\n if error_msg:\r\n return '', error_msg\r\n \r\n def load_config(self):\r\n config_dict, error_msg = covert_config(CONFIG_PATH)\r\n self.smms_secret_token = config_dict['smms_secret_token']\r\n self.is_relative_path = config_dict['is_relative_path']\r\n \r\n def more_pull_images(self,md_dir: str):\r\n \"\"\"遍历文件夹的md文件,拉取md文件有道云的图片和附件\r\n\r\n Args:\r\n md_dir (str): md文件的目录\r\n \"\"\"\r\n file_path = md_dir + \"/**/*.md\"\r\n # 匹配当前目录下所有的txt文件\r\n file_list = glob.glob(file_path,recursive=True)\r\n print(file_list)\r\n for md_file in file_list:\r\n self.migration_ydnote_url(md_file)\r\n \r\n @classmethod\r\n def url_encode(cls,path: str):\r\n \"\"\"对一些特殊字符url编码\r\n Args:\r\n path (str): \r\n \"\"\"\r\n path = path.replace(' ','%20')\r\n return path\r" }, { "identifier": "FileActionEnum", "path": "public.py", "snippet": "class FileActionEnum(Enum):\n CONTINUE = \"跳过\"\n ADD = \"新增\"\n UPDATE = \"更新\"" }, { "identifier": "covert_config", "path": "public.py", "snippet": "def covert_config(config_path=None) -> Tuple[dict, str]:\n \"\"\"\n 转换配置文件为 dict\n :param config_path: config 文件路径\n :return: (config_dict, error_msg)\n \"\"\"\n config_path = config_path if config_path else CONFIG_PATH\n with open(config_path, 'rb') as f:\n config_str = f.read().decode('utf-8')\n\n try:\n config_dict = json.loads(config_str)\n except:\n return {}, '请检查「config.json」格式是否为 utf-8 格式的 json!建议使用 Sublime 编辑「config.json」'\n\n key_list = ['local_dir', 'ydnote_dir', 'smms_secret_token', 'is_relative_path']\n if key_list != list(config_dict.keys()):\n return {}, '请检查「config.json」的 key 是否分别为 local_dir, ydnote_dir, smms_secret_token, is_relative_path'\n return config_dict, ''" } ]
import json import logging import os import re import sys import time import traceback import xml.etree.ElementTree as ET import requests from enum import Enum from typing import Tuple from convert import YoudaoNoteConvert from youDaoNoteApi import YoudaoNoteApi from pull_images import PullImages from public import FileActionEnum from public import covert_config
8,146
dir_info = self.youdaonote_api.get_dir_info_by_id(root_dir_id) for entry in dir_info['entries']: file_entry = entry['fileEntry'] if file_entry['name'] == ydnote_dir: return file_entry['id'], '' return '', '有道云笔记指定顶层目录不存在' def _add_or_update_file(self, file_id, file_name, local_dir, modify_time): """ 新增或更新文件 :param file_id: :param file_name: :param local_dir: :param modify_time: :return: """ youdao_file_suffix = os.path.splitext(file_name)[1] # 笔记后缀 note_type = self.judge_type(file_id,youdao_file_suffix) # print(f"{file_name}:{note_type}") is_note = True if note_type == 1 or note_type == 2 else False original_file_path = os.path.join(local_dir, file_name).replace('\\', '/') # 原后缀路径 # 生成.md后缀的文件的绝对路径 local_file_path = os.path.join(local_dir, ''.join([os.path.splitext(file_name)[0], MARKDOWN_SUFFIX])).replace( '\\', '/') if is_note else original_file_path # 如果有有道云笔记是「note」类型,则提示类型 tip = f'| 原文件: {file_name} | 类型:{note_type}' file_action = self._get_file_action(local_file_path, modify_time) if file_action == FileActionEnum.CONTINUE: return if file_action == FileActionEnum.UPDATE: # 考虑到使用 f.write() 直接覆盖原文件,在 Windows 下报错(WinError 183),先将其删除 os.remove(local_file_path) try: self._pull_file(file_id, original_file_path, note_type) print('{}「{}」{}'.format(file_action.value, local_file_path, tip)) except Exception as error: print('{}「{}」失败!请检查文件!错误提示:{}'.format(file_action.value, original_file_path, format(error))) def _judge_is_note(self, file_id, youdao_file_suffix): """ 判断是否是 note 类型 :param file_id: :param youdao_file_suffix: :return: """ is_note = False # 1、如果文件是 .note 类型 if youdao_file_suffix == NOTE_SUFFIX: is_note = True # 2、如果文件没有类型后缀,但以 `<?xml` 开头 if not youdao_file_suffix: response = self.youdaonote_api.get_file_by_id(file_id) content = response.content[:5] is_note = True if content == b"<?xml" else False return is_note # def judge_type(self, noteType: int, orgEditorType: int) -> int: # """ # 判断返回内容 # :param entryType: int # :param orgEditorType: int # :return: note_type: int # """ # note_type = 0 # # 返回xml格式的note笔记内容,noteType == 0 and orgEditorType == 1 # if noteType == 0 and orgEditorType == 1: # note_type = 1 # # 返回json格式的note笔记内容 # elif (noteType == 7 or noteType == 5) and orgEditorType == 1: # note_type = 2 # # 返回md文件内容 # elif noteType == 0 and orgEditorType == 0: # note_type = 3 # return note_type def judge_type(self,file_id: str ,youdao_file_suffix: str) -> int: """ 判断返回内容 :param entryType: int :param orgEditorType: int :return: note_type: int """ note_type = 0 is_xml = False if youdao_file_suffix == ".note": response = self.youdaonote_api.get_file_by_id(file_id) content = response.content[:5] is_xml = True if content == b"<?xml" else False if is_xml: # xml类型 note_type = 1 else: # json类型 note_type = 2 elif youdao_file_suffix == ".md": note_type = 3 else: print(f"文件后缀「{youdao_file_suffix}」不识别,请检查!") return note_type def _pull_file(self, file_id, file_path, note_type): """ 下载文件 :param file_id: :param file_path: :param itype: :return: """ # 1、所有的都先下载 response = self.youdaonote_api.get_file_by_id(file_id) with open(file_path, 'wb') as f: f.write(response.content) # response.content 本身就是字节类型 new_file_path = "" # 2、如果文件是 note 类型,将其转换为 MarkDown 类型 if note_type == 1: try:
#!/usr/bin/env python3 # -*- coding: utf-8 -*- MARKDOWN_SUFFIX = '.md' NOTE_SUFFIX = '.note' CONFIG_PATH = 'config.json' class YoudaoNotePull(object): """ 有道云笔记 Pull 封装 """ CONFIG_PATH = 'config.json' def __init__(self): self.root_local_dir = None # 本地文件根目录 self.youdaonote_api = None self.smms_secret_token = None self.is_relative_path = None # 是否使用相对路径 def get_ydnote_dir_id(self): """ 获取有道云笔记根目录或指定目录 ID :return: """ config_dict, error_msg = covert_config(CONFIG_PATH) if error_msg: return '', error_msg local_dir, error_msg = self._check_local_dir(local_dir=config_dict['local_dir']) if error_msg: return '', error_msg self.root_local_dir = local_dir self.youdaonote_api = YoudaoNoteApi() error_msg = self.youdaonote_api.login_by_cookies() if error_msg: return '', error_msg self.smms_secret_token = config_dict['smms_secret_token'] self.is_relative_path = config_dict['is_relative_path'] return self._get_ydnote_dir_id(ydnote_dir=config_dict['ydnote_dir']) def pull_dir_by_id_recursively(self, dir_id, local_dir): """ 根据目录 ID 循环遍历下载目录下所有文件 :param dir_id: :param local_dir: 本地目录 :return: error_msg """ dir_info = self.youdaonote_api.get_dir_info_by_id(dir_id) try: entries = dir_info['entries'] except KeyError: raise KeyError('有道云笔记修改了接口地址,此脚本暂时不能使用!请提 issue') for entry in entries: file_entry = entry['fileEntry'] id = file_entry['id'] file_name = file_entry['name'] file_name = self._optimize_file_name(file_name) # noteType = file_entry['noteType'] # orgEditorType = file_entry['orgEditorType'] if file_entry['dir']: sub_dir = os.path.join(local_dir, file_name).replace('\\', '/') # 判断本地文件夹是否存在 if not os.path.exists(sub_dir): os.mkdir(sub_dir) self.pull_dir_by_id_recursively(id, sub_dir) else: modify_time = file_entry['modifyTimeForSort'] self._add_or_update_file(id, file_name, local_dir, modify_time) def _check_local_dir(self, local_dir, test_default_dir=None) -> Tuple[str, str]: """ 检查本地文件夹 :param local_dir: 本地文件夹名(绝对路径) :return: local_dir, error_msg """ # 如果没有指定本地文件夹,当前目录新增 youdaonote 目录 if not local_dir: add_dir = test_default_dir if test_default_dir else 'youdaonote' # 兼容 Windows 系统,将路径分隔符(\\)替换为 / local_dir = os.path.join(os.getcwd(), add_dir).replace('\\', '/') # 如果指定的本地文件夹不存在,创建文件夹 if not os.path.exists(local_dir): try: os.mkdir(local_dir) except: return '', '请检查「{}」上层文件夹是否存在,并使用绝对路径!'.format(local_dir) return local_dir, '' def _get_ydnote_dir_id(self, ydnote_dir) -> Tuple[str, str]: """ 获取指定有道云笔记指定目录 ID :param ydnote_dir: 指定有道云笔记指定目录 :return: dir_id, error_msg """ root_dir_info = self.youdaonote_api.get_root_dir_info_id() root_dir_id = root_dir_info['fileEntry']['id'] # 如果不指定文件夹,取根目录 ID if not ydnote_dir: return root_dir_id, '' dir_info = self.youdaonote_api.get_dir_info_by_id(root_dir_id) for entry in dir_info['entries']: file_entry = entry['fileEntry'] if file_entry['name'] == ydnote_dir: return file_entry['id'], '' return '', '有道云笔记指定顶层目录不存在' def _add_or_update_file(self, file_id, file_name, local_dir, modify_time): """ 新增或更新文件 :param file_id: :param file_name: :param local_dir: :param modify_time: :return: """ youdao_file_suffix = os.path.splitext(file_name)[1] # 笔记后缀 note_type = self.judge_type(file_id,youdao_file_suffix) # print(f"{file_name}:{note_type}") is_note = True if note_type == 1 or note_type == 2 else False original_file_path = os.path.join(local_dir, file_name).replace('\\', '/') # 原后缀路径 # 生成.md后缀的文件的绝对路径 local_file_path = os.path.join(local_dir, ''.join([os.path.splitext(file_name)[0], MARKDOWN_SUFFIX])).replace( '\\', '/') if is_note else original_file_path # 如果有有道云笔记是「note」类型,则提示类型 tip = f'| 原文件: {file_name} | 类型:{note_type}' file_action = self._get_file_action(local_file_path, modify_time) if file_action == FileActionEnum.CONTINUE: return if file_action == FileActionEnum.UPDATE: # 考虑到使用 f.write() 直接覆盖原文件,在 Windows 下报错(WinError 183),先将其删除 os.remove(local_file_path) try: self._pull_file(file_id, original_file_path, note_type) print('{}「{}」{}'.format(file_action.value, local_file_path, tip)) except Exception as error: print('{}「{}」失败!请检查文件!错误提示:{}'.format(file_action.value, original_file_path, format(error))) def _judge_is_note(self, file_id, youdao_file_suffix): """ 判断是否是 note 类型 :param file_id: :param youdao_file_suffix: :return: """ is_note = False # 1、如果文件是 .note 类型 if youdao_file_suffix == NOTE_SUFFIX: is_note = True # 2、如果文件没有类型后缀,但以 `<?xml` 开头 if not youdao_file_suffix: response = self.youdaonote_api.get_file_by_id(file_id) content = response.content[:5] is_note = True if content == b"<?xml" else False return is_note # def judge_type(self, noteType: int, orgEditorType: int) -> int: # """ # 判断返回内容 # :param entryType: int # :param orgEditorType: int # :return: note_type: int # """ # note_type = 0 # # 返回xml格式的note笔记内容,noteType == 0 and orgEditorType == 1 # if noteType == 0 and orgEditorType == 1: # note_type = 1 # # 返回json格式的note笔记内容 # elif (noteType == 7 or noteType == 5) and orgEditorType == 1: # note_type = 2 # # 返回md文件内容 # elif noteType == 0 and orgEditorType == 0: # note_type = 3 # return note_type def judge_type(self,file_id: str ,youdao_file_suffix: str) -> int: """ 判断返回内容 :param entryType: int :param orgEditorType: int :return: note_type: int """ note_type = 0 is_xml = False if youdao_file_suffix == ".note": response = self.youdaonote_api.get_file_by_id(file_id) content = response.content[:5] is_xml = True if content == b"<?xml" else False if is_xml: # xml类型 note_type = 1 else: # json类型 note_type = 2 elif youdao_file_suffix == ".md": note_type = 3 else: print(f"文件后缀「{youdao_file_suffix}」不识别,请检查!") return note_type def _pull_file(self, file_id, file_path, note_type): """ 下载文件 :param file_id: :param file_path: :param itype: :return: """ # 1、所有的都先下载 response = self.youdaonote_api.get_file_by_id(file_id) with open(file_path, 'wb') as f: f.write(response.content) # response.content 本身就是字节类型 new_file_path = "" # 2、如果文件是 note 类型,将其转换为 MarkDown 类型 if note_type == 1: try:
new_file_path = YoudaoNoteConvert.covert_xml_to_markdown(file_path)
0
2023-10-17 11:21:50+00:00
12k
S-LoRA/S-LoRA
slora/models/llama/layer_infer/transformer_layer_infer.py
[ { "identifier": "LlamaTransformerLayerWeight", "path": "slora/models/llama/layer_weights/transformer_layer_weight.py", "snippet": "class LlamaTransformerLayerWeight(TransformerLayerWeight):\n def __init__(self, layer_num, tp_rank, world_size, data_type, network_config, mode=[]):\n super().__init__(layer_num, tp_rank, world_size, data_type, network_config, mode)\n\n\n def load_hf_weights(self, weights, dummy=False):\n if dummy:\n self._load_qkvo_dummy_weights()\n self._load_ffn_dummy_weights()\n else:\n self._load_qkvo_weights(weights)\n self._load_ffn_weights(weights)\n\n \n def verify_load(self):\n errors = \"weights load not ok\"\n weights = [self.att_norm_weight_,\n self.q_weight_,\n self.k_weight_,\n self.v_weight_,\n self.o_weight_,\n self.ffn_norm_weight_,\n self.up_proj,\n self.gate_proj,\n self.down_proj\n ]\n for i in range(len(weights)):\n assert weights[i] is not None, \"index:\" + str(i) + \" \" + errors\n\n\n def _load_qkvo_dummy_weights(self):\n n_embed = self.network_config_[\"hidden_size\"]\n split_n_embed = n_embed // self.world_size_\n # input layernorm params\n self.att_norm_weight_ = (torch.rand((n_embed), dtype=self.data_type_, device=\"cuda\") * 2 - 1) * 1e-3\n # attention params\n self.q_weight_ = (torch.rand((split_n_embed, n_embed), \n dtype=self.data_type_, device=\"cuda\").transpose(0, 1).contiguous() * 2 - 1) * 1e-3\n self.k_weight_ = (torch.rand((split_n_embed, n_embed), \n dtype=self.data_type_, device=\"cuda\").transpose(0, 1).contiguous() * 2 - 1) * 1e-3\n self.v_weight_ = (torch.rand((split_n_embed, n_embed), \n dtype=self.data_type_, device=\"cuda\").transpose(0, 1).contiguous() * 2 - 1) * 1e-3\n # attention output dense params\n self.o_weight_ = (torch.rand((n_embed, split_n_embed),\n dtype=self.data_type_, device=\"cuda\").transpose(0, 1).contiguous() * 2 - 1) * 1e-3\n \n\n def _load_ffn_dummy_weights(self):\n n_embed = self.network_config_[\"hidden_size\"]\n inter_size = self.network_config_['intermediate_size']\n split_inter_size = inter_size // self.world_size_\n\n self.ffn_norm_weight_ = (torch.rand((n_embed), dtype=self.data_type_, device=\"cuda\") * 2 - 1) * 1e-3\n\n self.up_proj = (torch.rand((split_inter_size, n_embed),\n dtype=self.data_type_, device=\"cuda\").transpose(0, 1).contiguous() * 2 - 1) * 1e-3\n self.gate_proj = (torch.rand((split_inter_size, n_embed),\n dtype=self.data_type_, device=\"cuda\").transpose(0, 1).contiguous() * 2 - 1) * 1e-3\n self.down_proj = (torch.rand((n_embed, split_inter_size),\n dtype=self.data_type_, device=\"cuda\").transpose(0, 1).contiguous() * 2 - 1) * 1e-3\n\n\n def _load_qkvo_weights(self, weights):\n # input layernorm params\n if f\"model.layers.{self.layer_num_}.input_layernorm.weight\" in weights:\n self.att_norm_weight_ = self._cuda(weights[f\"model.layers.{self.layer_num_}.input_layernorm.weight\"])\n\n n_embed = self.network_config_[\"hidden_size\"]\n split_n_embed = n_embed // self.world_size_\n # q k v weights for llama\n if f\"model.layers.{self.layer_num_}.self_attn.q_proj.weight\" in weights:\n self.q_weight_ = weights[f\"model.layers.{self.layer_num_}.self_attn.q_proj.weight\"][split_n_embed *\n self.tp_rank_: split_n_embed * (self.tp_rank_ + 1), :]\n self.q_weight_ = self._cuda(self.q_weight_.transpose(0, 1))\n if f\"model.layers.{self.layer_num_}.self_attn.k_proj.weight\" in weights:\n self.k_weight_ = weights[f\"model.layers.{self.layer_num_}.self_attn.k_proj.weight\"][split_n_embed *\n self.tp_rank_: split_n_embed * (self.tp_rank_ + 1), :]\n self.k_weight_ = self._cuda(self.k_weight_.transpose(0, 1))\n\n if f\"model.layers.{self.layer_num_}.self_attn.v_proj.weight\" in weights:\n self.v_weight_ = weights[f\"model.layers.{self.layer_num_}.self_attn.v_proj.weight\"][split_n_embed *\n self.tp_rank_: split_n_embed * (self.tp_rank_ + 1), :]\n self.v_weight_ = self._cuda(self.v_weight_.transpose(0, 1))\n \n # attention output dense params\n if f\"model.layers.{self.layer_num_}.self_attn.o_proj.weight\" in weights:\n self.o_weight_ = weights[f\"model.layers.{self.layer_num_}.self_attn.o_proj.weight\"][:,\n split_n_embed * self.tp_rank_: split_n_embed * (self.tp_rank_ + 1)]\n self.o_weight_ = self._cuda(self.o_weight_.transpose(0, 1))\n \n\n def _load_ffn_weights(self, weights):\n if f\"model.layers.{self.layer_num_}.post_attention_layernorm.weight\" in weights:\n self.ffn_norm_weight_ = self._cuda(weights[f\"model.layers.{self.layer_num_}.post_attention_layernorm.weight\"])\n \n inter_size = self.network_config_['intermediate_size']\n split_inter_size = inter_size // self.world_size_\n\n if f\"model.layers.{self.layer_num_}.mlp.up_proj.weight\" in weights:\n self.up_proj = weights[f\"model.layers.{self.layer_num_}.mlp.up_proj.weight\"][split_inter_size *\n self.tp_rank_: split_inter_size * (self.tp_rank_ + 1), :]\n self.up_proj = self._cuda(self.up_proj.transpose(0, 1))\n\n if f\"model.layers.{self.layer_num_}.mlp.gate_proj.weight\" in weights:\n self.gate_proj = weights[f\"model.layers.{self.layer_num_}.mlp.gate_proj.weight\"][split_inter_size *\n self.tp_rank_: split_inter_size * (self.tp_rank_ + 1), :]\n self.gate_proj = self._cuda(self.gate_proj.transpose(0, 1))\n\n if f\"model.layers.{self.layer_num_}.mlp.down_proj.weight\" in weights:\n self.down_proj = weights[f\"model.layers.{self.layer_num_}.mlp.down_proj.weight\"][:,\n split_inter_size * self.tp_rank_: split_inter_size * (self.tp_rank_ + 1)]\n self.down_proj = self._cuda(self.down_proj.transpose(0, 1))" }, { "identifier": "context_attention_fwd", "path": "slora/models/llama/triton_kernel/context_flashattention_nopad.py", "snippet": "@torch.no_grad()\ndef context_attention_fwd(q, k, v, o, b_start_loc, b_seq_len, max_input_len):\n BLOCK = 128\n # shape constraints\n Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1]\n assert Lq == Lk and Lk == Lv\n assert Lk in {16, 32, 64, 128}\n\n sm_scale = 1.0 / (Lq**0.5) # 计算scale系数\n batch, head = b_seq_len.shape[0], q.shape[1]\n\n grid = (batch, head, triton.cdiv(max_input_len, BLOCK)) # batch, head,\n\n num_warps = 4 if Lk <= 64 else 8\n _fwd_kernel[grid](\n q, k, v, sm_scale, b_start_loc, b_seq_len,\n o,\n q.stride(0), q.stride(1), q.stride(2),\n k.stride(0), k.stride(1), k.stride(2),\n v.stride(0), v.stride(1), v.stride(2),\n o.stride(0), o.stride(1), o.stride(2),\n BLOCK_M=BLOCK,\n BLOCK_DMODEL=Lk,\n BLOCK_N=BLOCK,\n num_warps=num_warps,\n num_stages=1,\n )\n return" }, { "identifier": "token_att_fwd", "path": "slora/models/llama/triton_kernel/token_attention_nopad_att1.py", "snippet": "@torch.no_grad()\ndef token_att_fwd(q, k, att_out, B_Loc, B_Start_Loc, B_Seqlen, max_input_len):\n BLOCK = 32\n # shape constraints\n Lq, Lk = q.shape[-1], k.shape[-1]\n assert Lq == Lk\n assert Lk in {16, 32, 64, 128}\n sm_scale = 1.0 / (Lk ** 0.5)\n\n batch, head_num = B_Loc.shape[0], q.shape[1]\n\n grid = (batch, head_num, triton.cdiv(max_input_len, BLOCK))\n\n num_warps = 4\n \n _fwd_kernel_token_att1[grid](\n q, k, sm_scale, B_Loc, B_Start_Loc, B_Seqlen, max_input_len,\n att_out,\n B_Loc.stride(0), B_Loc.stride(1),\n q.stride(0), q.stride(1), q.stride(2),\n k.stride(0), k.stride(1), k.stride(2),\n att_out.stride(0), att_out.stride(1),\n BLOCK_DMODEL=Lk,\n BLOCK_N=BLOCK,\n num_warps=num_warps,\n num_stages=1,\n )\n return" }, { "identifier": "token_att_fwd_int8k", "path": "slora/models/llama/triton_kernel/token_attention_nopad_att1.py", "snippet": "@torch.no_grad()\ndef token_att_fwd_int8k(q, k, k_scale, att_out, B_Loc, B_Start_Loc, B_Seqlen, max_input_len):\n BLOCK = 32\n # shape constraints\n Lq, Lk = q.shape[-1], k.shape[-1]\n assert Lq == Lk\n assert Lk in {16, 32, 64, 128}\n sm_scale = 1.0 / (Lk ** 0.5)\n\n batch, head_num = B_Loc.shape[0], q.shape[1]\n\n grid = (batch, head_num, triton.cdiv(max_input_len, BLOCK))\n\n num_warps = 4 if Lk <= 64 else 8\n num_warps = 2\n\n _fwd_kernel_token_att1_int8[grid](\n q, k, k_scale, sm_scale, B_Loc, B_Start_Loc, B_Seqlen, max_input_len,\n att_out,\n B_Loc.stride(0), B_Loc.stride(1),\n q.stride(0), q.stride(1), q.stride(2),\n k.stride(0), k.stride(1), k.stride(2),\n k_scale.stride(0), k_scale.stride(1), k_scale.stride(2),\n att_out.stride(0), att_out.stride(1),\n BLOCK_DMODEL=Lk,\n BLOCK_N=BLOCK,\n num_warps=num_warps,\n num_stages=1,\n )\n return" }, { "identifier": "token_softmax_fwd", "path": "slora/models/llama/triton_kernel/token_attention_nopad_softmax.py", "snippet": "@torch.no_grad()\ndef token_softmax_fwd(Logics, B_Start_Loc, B_Seqlen, Prob_Out, max_input_len):\n BLOCK_SIZE = triton.next_power_of_2(max_input_len)\n batch, head_num = B_Start_Loc.shape[0], Logics.shape[0]\n\n num_warps = 4\n if BLOCK_SIZE >= 2048:\n num_warps = 8\n if BLOCK_SIZE >= 4096:\n num_warps = 16\n\n _fwd_kernel_token_softmax[(batch, head_num)](\n Logics, B_Start_Loc, B_Seqlen,\n Prob_Out,\n Logics.stride(0), Logics.stride(1),\n Prob_Out.stride(0), Prob_Out.stride(1),\n num_warps=num_warps,\n BLOCK_SIZE=BLOCK_SIZE,\n )\n return" }, { "identifier": "token_att_fwd2", "path": "slora/models/llama/triton_kernel/token_attention_nopad_reduceV.py", "snippet": "@torch.no_grad()\ndef token_att_fwd2(prob, v, out, B_Loc, B_Start_Loc, B_Seqlen, max_input_len):\n if triton.__version__ >= \"2.1.0\":\n BLOCK = 128\n else:\n BLOCK = 64\n batch, head = B_Loc.shape[0], v.shape[1]\n grid = (batch, head)\n num_warps = 4\n dim = v.shape[-1]\n\n _fwd_kernel_token_att2[grid](\n prob, v, out, B_Loc, B_Start_Loc, B_Seqlen, max_input_len,\n B_Loc.stride(0), B_Loc.stride(1),\n prob.stride(0), prob.stride(1),\n v.stride(0), v.stride(1), v.stride(2),\n out.stride(0), out.stride(1), out.stride(2),\n BLOCK_DMODEL=dim,\n BLOCK_N=BLOCK,\n num_warps=num_warps,\n num_stages=1,\n )\n return" }, { "identifier": "token_att_fwd2_int8v", "path": "slora/models/llama/triton_kernel/token_attention_nopad_reduceV.py", "snippet": "@torch.no_grad()\ndef token_att_fwd2_int8v(prob, v, v_scale, out, B_Loc, B_Start_Loc, B_Seqlen, max_input_len):\n if max_input_len < 512:\n BLOCK = triton.next_power_of_2(max_input_len)\n else:\n BLOCK = 512\n batch, head = B_Loc.shape[0], v.shape[1]\n grid = (batch, head)\n num_warps = 4\n dim = v.shape[-1]\n\n _fwd_kernel_token_att2_int8v[grid](\n prob, v, v_scale, out, B_Loc, B_Start_Loc, B_Seqlen, max_input_len,\n B_Loc.stride(0), B_Loc.stride(1),\n prob.stride(0), prob.stride(1),\n v.stride(0), v.stride(1), v.stride(2),\n v_scale.stride(0), v_scale.stride(1), v_scale.stride(2),\n out.stride(0), out.stride(1), out.stride(2),\n BLOCK_DMODEL=dim,\n BLOCK_N=BLOCK,\n num_warps=num_warps,\n num_stages=1,\n )\n return" }, { "identifier": "rmsnorm_forward", "path": "slora/models/llama/triton_kernel/rmsnorm.py", "snippet": "def rmsnorm_forward(x, weight, eps):\n # allocate output\n y = torch.empty_like(x)\n # reshape input data into 2D tensor\n x_arg = x.view(-1, x.shape[-1])\n M, N = x_arg.shape\n # Less than 64KB per feature: enqueue fused kernel\n MAX_FUSED_SIZE = 65536 // x.element_size()\n BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))\n # print(\"BLOCK_SIZE:\", BLOCK_SIZE)\n if N > BLOCK_SIZE:\n raise RuntimeError(\"This layer norm doesn't support feature dim >= 64KB.\")\n # heuristics for number of warps\n num_warps = min(max(BLOCK_SIZE // 256, 1), 8)\n # print(BLOCK_SIZE, num_warps, \"block_size, numwarps\")\n BLOCK_SIZE = 128 * 2 * 2 * 2 * 2 * 2 * 2 * 2\n num_warps = 8\n # enqueue kernel\n _rms_norm_fwd_fused[(M,)](x_arg, y, weight,\n x_arg.stride(0), N, eps,\n BLOCK_SIZE=BLOCK_SIZE, num_warps=num_warps)\n return y" }, { "identifier": "rotary_emb_fwd", "path": "slora/models/llama/triton_kernel/rotary_emb.py", "snippet": "@torch.no_grad()\ndef rotary_emb_fwd(q, cos, sin):\n total_len = q.shape[0]\n head_num = q.shape[1]\n head_dim = q.shape[2]\n assert q.shape[0] == cos.shape[0] and q.shape[0] == sin.shape[0], f\"q shape {q.shape} cos shape {cos.shape}\"\n BLOCK_HEAD = 4\n BLOCK_SEQ = 32\n grid = (triton.cdiv(head_num, BLOCK_HEAD), triton.cdiv(total_len, BLOCK_SEQ))\n if head_dim >= 128:\n num_warps = 8\n else:\n num_warps = 4\n\n _rotary_kernel[grid](\n q, cos, sin,\n q.stride(0), q.stride(1), q.stride(2),\n cos.stride(0), cos.stride(1),\n sin.stride(0), sin.stride(1),\n total_len, head_num,\n BLOCK_HEAD=BLOCK_HEAD,\n BLOCK_SEQ=BLOCK_SEQ,\n BLOCK_DMODEL=head_dim,\n num_warps=num_warps,\n num_stages=1,\n )\n return" }, { "identifier": "LlamaInferStateInfo", "path": "slora/models/llama/infer_struct.py", "snippet": "class LlamaInferStateInfo(InferStateInfo):\n def __init__(self):\n super().__init__()\n self.position_cos = None\n self.position_sin = None\n self.other_kv_index = None\n \n def init_some_extra_state(self, \n model, \n batch_size, \n total_token_num,\n max_len_in_batch,\n input_ids : torch.Tensor,\n b_loc : torch.Tensor,\n b_start_loc : torch.Tensor,\n b_seq_len : torch.Tensor,\n is_prefill):\n if is_prefill:\n b_seq_len_numpy = b_seq_len.cpu().numpy()\n position_ids = torch.from_numpy(np.concatenate([np.arange(0, b_seq_len_numpy[i])\n for i in range(len(b_seq_len_numpy))], axis=0)).cuda()\n self.position_cos = torch.index_select(model._cos_cached, 0, position_ids).view(position_ids.shape[0], -1)\n self.position_sin = torch.index_select(model._sin_cached, 0, position_ids).view(position_ids.shape[0], -1)\n position_ids = None\n else:\n self.position_cos = torch.index_select(model._cos_cached, 0, b_seq_len - 1).view(b_seq_len.shape[0], -1)\n self.position_sin = torch.index_select(model._sin_cached, 0, b_seq_len - 1).view(b_seq_len.shape[0], -1)\n self.other_kv_index = b_loc[0, max_len_in_batch - 1].item()\n return" }, { "identifier": "destindex_copy_kv", "path": "slora/common/basemodel/triton_kernel/destindex_copy_kv.py", "snippet": "@torch.no_grad()\ndef destindex_copy_kv(K, DestLoc, Out):\n seq_len = DestLoc.shape[0]\n head_num = K.shape[1]\n head_dim = K.shape[2]\n assert K.shape[1] == Out.shape[1] and K.shape[2] == Out.shape[2]\n BLOCK_HEAD = triton.next_power_of_2(head_num)\n grid = (seq_len,)\n num_warps = 1\n\n _fwd_kernel_destindex_copy_kv[grid](\n K, DestLoc, Out,\n K.stride(0), K.stride(1), K.stride(2),\n Out.stride(0), Out.stride(1), Out.stride(2),\n head_num,\n BLOCK_DMODEL=head_dim,\n BLOCK_HEAD=BLOCK_HEAD,\n num_warps=num_warps,\n num_stages=1,\n )\n return" }, { "identifier": "destindex_copy_quantize_kv", "path": "slora/common/basemodel/triton_kernel/destindex_copy_kv.py", "snippet": "@torch.no_grad()\ndef destindex_copy_quantize_kv(K, DestLoc, Out, Out_scale):\n seq_len = DestLoc.shape[0]\n head_num = K.shape[1]\n head_dim = K.shape[2]\n assert K.shape[1] == Out.shape[1] and K.shape[2] == Out.shape[2]\n BLOCK_HEAD = triton.next_power_of_2(head_num)\n grid = (seq_len,)\n num_warps = 1\n\n _fwd_kernel_destindex_copy_quantize_kv[grid](\n K, DestLoc, Out, Out_scale,\n K.stride(0), K.stride(1), K.stride(2),\n Out.stride(0), Out.stride(1), Out.stride(2),\n Out_scale.stride(0), Out_scale.stride(1), Out_scale.stride(2),\n head_num,\n BLOCK_DMODEL=head_dim,\n BLOCK_HEAD=BLOCK_HEAD,\n num_warps=num_warps,\n num_stages=1,\n )\n return" }, { "identifier": "TransformerLayerInferTpl", "path": "slora/common/basemodel/layer_infer/template/transformer_layer_infer_template.py", "snippet": "class TransformerLayerInferTpl(TransformerLayerInfer):\n \"\"\"\n \"\"\"\n def __init__(self, layer_num, tp_rank, world_size, network_config, mode):\n super().__init__(layer_num, tp_rank, world_size, network_config, mode)\n # need to set by subclass\n self.eps_ = 1e-5 \n self.tp_q_head_num_ = -1\n self.tp_k_head_num_ = -1\n self.tp_v_head_num_ = -1\n self.tp_o_head_num_ = -1\n self.head_dim_ = -1\n self.embed_dim_ = -1\n return\n \n def _att_norm(self, input, infer_state:InferStateInfo, layer_weight)->torch.Tensor:\n raise Exception(\"need to impl\")\n \n def _ffn_norm(self, input, infer_state:InferStateInfo, layer_weight)->torch.Tensor:\n raise Exception(\"need to impl\")\n \n def _pre_cache_kv(self, infer_state:InferStateInfo, layer_weight)->Tuple[torch.Tensor, torch.Tensor]:\n # prefill cache_k cache_v\n if infer_state.is_prefill:\n cache_k = infer_state.prefill_key_buffer\n cache_v = infer_state.prefill_value_buffer\n return cache_k, cache_v\n # decode cache_k cache_v\n else:\n if infer_state.decode_is_contiguous:\n cache_k = infer_state.mem_manager.key_buffer[self.layer_num_][infer_state.decode_mem_start:infer_state.decode_mem_end, :, :]\n cache_v = infer_state.mem_manager.value_buffer[self.layer_num_][infer_state.decode_mem_start:infer_state.decode_mem_end, :, :]\n else:\n cache_k = infer_state.decode_key_buffer\n cache_v = infer_state.decode_value_buffer\n return cache_k, cache_v\n return\n\n def _get_qkv(self, input, cache_k, cache_v, infer_state:InferStateInfo, layer_weight)->torch.Tensor:\n raise Exception(\"need to impl\")\n \n def _post_cache_kv(self, cache_k, cache_v, infer_state:InferStateInfo, layer_weight):\n mem_manager = infer_state.mem_manager\n if infer_state.is_prefill:\n destindex_copy_kv(cache_k, infer_state.prefill_mem_index, mem_manager.key_buffer[self.layer_num_])\n destindex_copy_kv(cache_v, infer_state.prefill_mem_index, mem_manager.value_buffer[self.layer_num_])\n return\n else:\n if not infer_state.decode_is_contiguous:\n destindex_copy_kv(cache_k, infer_state.decode_mem_index, mem_manager.key_buffer[self.layer_num_])\n destindex_copy_kv(cache_v, infer_state.decode_mem_index, mem_manager.value_buffer[self.layer_num_])\n return\n return\n \n def _context_attention_kernel(self, q, k, v, infer_state:InferStateInfo, layer_weight)->torch.Tensor:\n raise Exception(\"need to impl\")\n \n def _token_attention_kernel(self, q, infer_state:InferStateInfo, layer_weight)->torch.Tensor:\n raise Exception(\"need to impl\")\n\n def _get_o(self, input, infer_state:InferStateInfo, layer_weight)->torch.Tensor:\n raise Exception(\"need to impl\")\n\n def _ffn(self, input, infer_state:InferStateInfo, layer_weight)->torch.Tensor:\n raise Exception(\"need to impl\")\n\n\n @mark_cost_time(\"trans context flash forward time cost\") # dont to remove this, will make performence down, did not know why\n def _context_attention(self, input_embding, infer_state: InferStateInfo, layer_weight):\n input1 = self._att_norm(input_embding, infer_state, layer_weight)\n cache_k, cache_v = self._pre_cache_kv(infer_state, layer_weight)\n q = self._get_qkv(input1, cache_k, cache_v, infer_state, layer_weight)\n input1 = None\n self._post_cache_kv(cache_k, cache_v, infer_state, layer_weight)\n o = self._context_attention_kernel(q, cache_k, cache_v, infer_state, layer_weight)\n q = None\n o = self._get_o(o, infer_state, layer_weight)\n if self.world_size_ > 1:\n dist.all_reduce(o, op=dist.ReduceOp.SUM, async_op=False)\n input_embding.add_(o.view(-1, self.embed_dim_))\n return\n\n @mark_cost_time(\"trans context ffn forward time cost\") # dont to remove this, will make performence down, did not know why\n def _context_ffn(self, input_embdings, infer_state: InferStateInfo, layer_weight):\n input1 = self._ffn_norm(input_embdings, infer_state, layer_weight)\n ffn_out = self._ffn(input1, infer_state, layer_weight)\n input1 = None\n if self.world_size_ > 1:\n dist.all_reduce(ffn_out, op=dist.ReduceOp.SUM, async_op=False)\n input_embdings.add_(ffn_out.view(-1, self.embed_dim_))\n return\n\n # this impl dont to use @mark_cost_time\n def _token_attention(self, input_embding, infer_state: InferStateInfo, layer_weight):\n input1 = self._att_norm(input_embding, infer_state, layer_weight)\n cache_k, cache_v = self._pre_cache_kv(infer_state, layer_weight)\n q = self._get_qkv(input1, cache_k, cache_v, infer_state, layer_weight)\n input1 = None\n self._post_cache_kv(cache_k, cache_v, infer_state, layer_weight)\n o = self._token_attention_kernel(q, infer_state, layer_weight)\n q = None\n o = self._get_o(o, infer_state, layer_weight)\n if self.world_size_ > 1:\n dist.all_reduce(o, op=dist.ReduceOp.SUM, async_op=False)\n input_embding.add_(o.view(-1, self.embed_dim_))\n return\n\n # this impl dont to use @mark_cost_time\n def _token_ffn(self, input_embdings, infer_state: InferStateInfo, layer_weight):\n input1 = self._ffn_norm(input_embdings, infer_state, layer_weight)\n ffn_out = self._ffn(input1, infer_state, layer_weight)\n input1 = None\n if self.world_size_ > 1:\n dist.all_reduce(ffn_out, op=dist.ReduceOp.SUM, async_op=False)\n input_embdings.add_(ffn_out.view(-1, self.embed_dim_))\n return\n \n\n def context_forward(self, input_embdings, infer_state: InferStateInfo, layer_weight):\n self._context_attention(input_embdings,\n infer_state,\n layer_weight=layer_weight)\n self._context_ffn(input_embdings, infer_state, layer_weight)\n return input_embdings\n\n def token_forward(self, input_embdings, infer_state: InferStateInfo, layer_weight):\n self._token_attention(input_embdings,\n infer_state,\n layer_weight=layer_weight)\n self._token_ffn(input_embdings, infer_state, layer_weight)\n return input_embdings" } ]
import torch import torch.functional as F import torch.distributed as dist import numpy as np import triton from typing import Tuple from slora.models.llama.layer_weights.transformer_layer_weight import LlamaTransformerLayerWeight from slora.models.llama.triton_kernel.context_flashattention_nopad import context_attention_fwd from slora.models.llama.triton_kernel.token_attention_nopad_att1 import token_att_fwd, token_att_fwd_int8k from slora.models.llama.triton_kernel.token_attention_nopad_softmax import token_softmax_fwd from slora.models.llama.triton_kernel.token_attention_nopad_reduceV import token_att_fwd2, token_att_fwd2_int8v from slora.models.llama.triton_kernel.rmsnorm import rmsnorm_forward from slora.models.llama.triton_kernel.rotary_emb import rotary_emb_fwd from slora.models.llama.infer_struct import LlamaInferStateInfo from slora.common.basemodel.triton_kernel.destindex_copy_kv import destindex_copy_kv, destindex_copy_quantize_kv from slora.common.basemodel import TransformerLayerInferTpl from slora.models.llama.triton_kernel.token_attention_softmax_and_reducev import token_softmax_reducev_fwd
8,352
class LlamaTransformerLayerInfer(TransformerLayerInferTpl): """ """ def __init__(self, layer_num, tp_rank, world_size, network_config, mode=[]): super().__init__(layer_num, tp_rank, world_size, network_config, mode) self.eps_ = network_config["rms_norm_eps"] self.tp_q_head_num_ = network_config["num_attention_heads"] // self.world_size_ self.tp_k_head_num_ = self.tp_q_head_num_ self.tp_v_head_num_ = self.tp_q_head_num_ self.tp_o_head_num_ = self.tp_q_head_num_ self.head_dim_ = network_config["hidden_size"] // network_config["num_attention_heads"] self.embed_dim_ = network_config["hidden_size"] return def _att_norm(self, input, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight)->torch.Tensor: return rmsnorm_forward(input, weight=layer_weight.att_norm_weight_, eps=self.eps_) def _ffn_norm(self, input, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight)->torch.Tensor: return rmsnorm_forward(input, weight=layer_weight.ffn_norm_weight_, eps=self.eps_) def _get_qkv(self, input, cache_k, cache_v, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight)->torch.Tensor: q = torch.mm(input.view(-1, self.embed_dim_), layer_weight.q_weight_) rotary_emb_fwd(q.view(-1, self.tp_q_head_num_, self.head_dim_), infer_state.position_cos, infer_state.position_sin) torch.mm(input.view(-1, self.embed_dim_), layer_weight.k_weight_, out=cache_k.view(-1, self.tp_k_head_num_ * self.head_dim_)) rotary_emb_fwd(cache_k, infer_state.position_cos, infer_state.position_sin) torch.mm(input.view(-1, self.embed_dim_), layer_weight.v_weight_, out=cache_v.view(-1, self.tp_v_head_num_ * self.head_dim_)) return q def _post_cache_kv(self, cache_k, cache_v, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight): mem_manager = infer_state.mem_manager if infer_state.is_prefill: self._copy_kv_to_mem_cache(cache_k, cache_v, infer_state.prefill_mem_index, mem_manager) return else: if not infer_state.decode_is_contiguous: self._copy_kv_to_mem_cache(cache_k, cache_v, infer_state.decode_mem_index, mem_manager) return return def _context_attention_kernel(self, q, k, v, infer_state:LlamaInferStateInfo, layer_weight)->torch.Tensor: o_tensor = torch.empty_like(q) context_attention_fwd(q.view(-1, self.tp_q_head_num_, self.head_dim_), k.view(-1, self.tp_k_head_num_, self.head_dim_), v.view(-1, self.tp_v_head_num_, self.head_dim_), o_tensor.view(-1, self.tp_q_head_num_, self.head_dim_), infer_state.b_start_loc, infer_state.b_seq_len, infer_state.max_len_in_batch) return o_tensor def _token_attention_kernel(self, q, infer_state:LlamaInferStateInfo, layer_weight)->torch.Tensor: return self._token_decode_attention_mode(q, infer_state) def _get_o(self, input, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight)->torch.Tensor: o_tensor = torch.mm(input.view(-1, self.tp_o_head_num_ * self.head_dim_), layer_weight.o_weight_) return o_tensor def _ffn(self, input, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight)->torch.Tensor: gate_out = torch.mm(input.view(-1, self.embed_dim_), layer_weight.gate_proj) torch.nn.functional.silu(gate_out, inplace=True) up_out = torch.mm(input.view(-1, self.embed_dim_), layer_weight.up_proj) input = None ffn1_out = gate_out * up_out gate_out, up_out = None, None ffn2_out = torch.mm(ffn1_out, layer_weight.down_proj) ffn1_out = None return ffn2_out def _copy_kv_to_mem_cache(self, key_buffer, value_buffer, mem_index, mem_manager): if "int8kv" in self.mode: destindex_copy_quantize_kv(key_buffer, mem_index, mem_manager.key_buffer[self.layer_num_], mem_manager.key_scale_buffer[self.layer_num_]) destindex_copy_quantize_kv(value_buffer, mem_index, mem_manager.value_buffer[self.layer_num_], mem_manager.value_scale_buffer[self.layer_num_]) else: destindex_copy_kv(key_buffer, mem_index, mem_manager.key_buffer[self.layer_num_]) destindex_copy_kv(value_buffer, mem_index, mem_manager.value_buffer[self.layer_num_]) def _token_decode_attention_normal(self, q, infer_state: LlamaInferStateInfo): total_token_num = infer_state.total_token_num batch_size = infer_state.batch_size calcu_shape1 = (batch_size, self.tp_q_head_num_, self.head_dim_) att_m_tensor = torch.empty((self.tp_q_head_num_, total_token_num), dtype=q.dtype, device="cuda") token_att_fwd(q.view(calcu_shape1), infer_state.mem_manager.key_buffer[self.layer_num_], att_m_tensor, infer_state.b_loc, infer_state.b_start_loc, infer_state.b_seq_len, infer_state.max_len_in_batch) if triton.__version__ == "2.0.0": prob = torch.empty_like(att_m_tensor)
class LlamaTransformerLayerInfer(TransformerLayerInferTpl): """ """ def __init__(self, layer_num, tp_rank, world_size, network_config, mode=[]): super().__init__(layer_num, tp_rank, world_size, network_config, mode) self.eps_ = network_config["rms_norm_eps"] self.tp_q_head_num_ = network_config["num_attention_heads"] // self.world_size_ self.tp_k_head_num_ = self.tp_q_head_num_ self.tp_v_head_num_ = self.tp_q_head_num_ self.tp_o_head_num_ = self.tp_q_head_num_ self.head_dim_ = network_config["hidden_size"] // network_config["num_attention_heads"] self.embed_dim_ = network_config["hidden_size"] return def _att_norm(self, input, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight)->torch.Tensor: return rmsnorm_forward(input, weight=layer_weight.att_norm_weight_, eps=self.eps_) def _ffn_norm(self, input, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight)->torch.Tensor: return rmsnorm_forward(input, weight=layer_weight.ffn_norm_weight_, eps=self.eps_) def _get_qkv(self, input, cache_k, cache_v, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight)->torch.Tensor: q = torch.mm(input.view(-1, self.embed_dim_), layer_weight.q_weight_) rotary_emb_fwd(q.view(-1, self.tp_q_head_num_, self.head_dim_), infer_state.position_cos, infer_state.position_sin) torch.mm(input.view(-1, self.embed_dim_), layer_weight.k_weight_, out=cache_k.view(-1, self.tp_k_head_num_ * self.head_dim_)) rotary_emb_fwd(cache_k, infer_state.position_cos, infer_state.position_sin) torch.mm(input.view(-1, self.embed_dim_), layer_weight.v_weight_, out=cache_v.view(-1, self.tp_v_head_num_ * self.head_dim_)) return q def _post_cache_kv(self, cache_k, cache_v, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight): mem_manager = infer_state.mem_manager if infer_state.is_prefill: self._copy_kv_to_mem_cache(cache_k, cache_v, infer_state.prefill_mem_index, mem_manager) return else: if not infer_state.decode_is_contiguous: self._copy_kv_to_mem_cache(cache_k, cache_v, infer_state.decode_mem_index, mem_manager) return return def _context_attention_kernel(self, q, k, v, infer_state:LlamaInferStateInfo, layer_weight)->torch.Tensor: o_tensor = torch.empty_like(q) context_attention_fwd(q.view(-1, self.tp_q_head_num_, self.head_dim_), k.view(-1, self.tp_k_head_num_, self.head_dim_), v.view(-1, self.tp_v_head_num_, self.head_dim_), o_tensor.view(-1, self.tp_q_head_num_, self.head_dim_), infer_state.b_start_loc, infer_state.b_seq_len, infer_state.max_len_in_batch) return o_tensor def _token_attention_kernel(self, q, infer_state:LlamaInferStateInfo, layer_weight)->torch.Tensor: return self._token_decode_attention_mode(q, infer_state) def _get_o(self, input, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight)->torch.Tensor: o_tensor = torch.mm(input.view(-1, self.tp_o_head_num_ * self.head_dim_), layer_weight.o_weight_) return o_tensor def _ffn(self, input, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight)->torch.Tensor: gate_out = torch.mm(input.view(-1, self.embed_dim_), layer_weight.gate_proj) torch.nn.functional.silu(gate_out, inplace=True) up_out = torch.mm(input.view(-1, self.embed_dim_), layer_weight.up_proj) input = None ffn1_out = gate_out * up_out gate_out, up_out = None, None ffn2_out = torch.mm(ffn1_out, layer_weight.down_proj) ffn1_out = None return ffn2_out def _copy_kv_to_mem_cache(self, key_buffer, value_buffer, mem_index, mem_manager): if "int8kv" in self.mode: destindex_copy_quantize_kv(key_buffer, mem_index, mem_manager.key_buffer[self.layer_num_], mem_manager.key_scale_buffer[self.layer_num_]) destindex_copy_quantize_kv(value_buffer, mem_index, mem_manager.value_buffer[self.layer_num_], mem_manager.value_scale_buffer[self.layer_num_]) else: destindex_copy_kv(key_buffer, mem_index, mem_manager.key_buffer[self.layer_num_]) destindex_copy_kv(value_buffer, mem_index, mem_manager.value_buffer[self.layer_num_]) def _token_decode_attention_normal(self, q, infer_state: LlamaInferStateInfo): total_token_num = infer_state.total_token_num batch_size = infer_state.batch_size calcu_shape1 = (batch_size, self.tp_q_head_num_, self.head_dim_) att_m_tensor = torch.empty((self.tp_q_head_num_, total_token_num), dtype=q.dtype, device="cuda") token_att_fwd(q.view(calcu_shape1), infer_state.mem_manager.key_buffer[self.layer_num_], att_m_tensor, infer_state.b_loc, infer_state.b_start_loc, infer_state.b_seq_len, infer_state.max_len_in_batch) if triton.__version__ == "2.0.0": prob = torch.empty_like(att_m_tensor)
token_softmax_fwd(att_m_tensor, infer_state.b_start_loc, infer_state.b_seq_len, prob, infer_state.max_len_in_batch)
4
2023-11-05 04:08:36+00:00
12k
Yuliang-Liu/Monkey
finetune_multitask.py
[ { "identifier": "MonkeyLMHeadModel", "path": "monkey_model/modeling_monkey.py", "snippet": "class MonkeyLMHeadModel(QWenLMHeadModel):\n _keys_to_ignore_on_load_missing = [r\"h\\.\\d+\\.attn\\.rotary_emb\\.inv_freq\"]\n _keys_to_ignore_on_load_unexpected = [r\"h\\.\\d+\\.attn\\.masked_bias\"]\n\n def __init__(self, config):\n super().__init__(config)\n assert (\n config.bf16 + config.fp16 + config.fp32 <= 1\n ), \"Only one of \\\"bf16\\\", \\\"fp16\\\", \\\"fp32\\\" can be true\"\n\n autoset_precision = config.bf16 + config.fp16 + config.fp32 == 0\n\n if autoset_precision:\n if SUPPORT_BF16:\n logger.warn(\n \"The model is automatically converting to bf16 for faster inference. \"\n \"If you want to disable the automatic precision, please manually add bf16/fp16/fp32=True to \\\"AutoModelForCausalLM.from_pretrained\\\".\"\n )\n config.bf16 = True\n elif SUPPORT_FP16:\n logger.warn(\n \"The model is automatically converting to fp16 for faster inference. \"\n \"If you want to disable the automatic precision, please manually add bf16/fp16/fp32=True to \\\"AutoModelForCausalLM.from_pretrained\\\".\"\n )\n config.fp16 = True\n else:\n config.fp32 = True\n\n if config.bf16 and SUPPORT_CUDA and not SUPPORT_BF16:\n logger.warn(\"Your device does NOT seem to support bf16, you can switch to fp16 or fp32 by by passing fp16/fp32=True in \\\"AutoModelForCausalLM.from_pretrained\\\".\")\n if config.fp16 and SUPPORT_CUDA and not SUPPORT_FP16:\n logger.warn(\"Your device does NOT support faster inference with fp16, please switch to fp32 which is likely to be faster\")\n if config.fp32:\n if SUPPORT_BF16:\n logger.warn(\"Your device support faster inference by passing bf16=True in \\\"AutoModelForCausalLM.from_pretrained\\\".\")\n elif SUPPORT_FP16:\n logger.warn(\"Your device support faster inference by passing fp16=True in \\\"AutoModelForCausalLM.from_pretrained\\\".\")\n\n self.transformer = MonkeyModel(config)\n self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n if config.bf16:\n self.transformer.bfloat16()\n self.lm_head.bfloat16()\n if config.fp16:\n self.transformer.half()\n self.lm_head.half()\n self.post_init()" }, { "identifier": "QWenTokenizer", "path": "monkey_model/tokenization_qwen.py", "snippet": "class QWenTokenizer(PreTrainedTokenizer):\n \"\"\"QWen tokenizer.\"\"\"\n\n vocab_files_names = VOCAB_FILES_NAMES\n\n def __init__(\n self,\n vocab_file,\n errors=\"replace\",\n image_start_tag='<img>',\n image_end_tag='</img>',\n image_pad_tag='<imgpad>',\n ref_start_tag='<ref>',\n ref_end_tag='</ref>',\n box_start_tag='<box>',\n box_end_tag='</box>',\n quad_start_tag='<quad>',\n quad_end_tag='</quad>',\n **kwargs,\n ):\n super().__init__(**kwargs)\n self.image_start_tag = image_start_tag\n self.image_end_tag = image_end_tag\n self.image_pad_tag = image_pad_tag\n self.ref_start_tag = ref_start_tag\n self.ref_end_tag = ref_end_tag\n self.box_start_tag = box_start_tag\n self.box_end_tag = box_end_tag\n self.quad_start_tag = quad_start_tag\n self.quad_end_tag = quad_end_tag\n self.IMAGE_ST = (\n ref_start_tag, ref_end_tag,\n box_start_tag, box_end_tag,\n quad_start_tag, quad_end_tag,\n image_start_tag, image_end_tag,\n image_pad_tag\n )\n\n self.errors = errors # how to handle errors in decoding\n\n self.mergeable_ranks = _load_tiktoken_bpe(vocab_file) # type: dict[bytes, int]\n self.special_tokens = {\n token: index\n for index, token in enumerate(\n SPECIAL_TOKENS + self.IMAGE_ST, start=len(self.mergeable_ranks)\n )\n }\n self.img_start_id = self.special_tokens[self.image_start_tag]\n self.img_end_id = self.special_tokens[self.image_end_tag]\n self.img_pad_id = self.special_tokens[self.image_pad_tag]\n self.ref_start_id = self.special_tokens[self.ref_start_tag]\n self.ref_end_id = self.special_tokens[self.ref_end_tag]\n self.box_start_id = self.special_tokens[self.box_start_tag]\n self.box_end_id = self.special_tokens[self.box_end_tag]\n self.quad_start_id = self.special_tokens[self.quad_start_tag]\n self.quad_end_id = self.special_tokens[self.quad_end_tag]\n\n enc = tiktoken.Encoding(\n \"Qwen\",\n pat_str=PAT_STR,\n mergeable_ranks=self.mergeable_ranks,\n special_tokens=self.special_tokens,\n )\n assert (\n len(self.mergeable_ranks) + len(self.special_tokens) == enc.n_vocab\n ), f\"{len(self.mergeable_ranks) + len(self.special_tokens)} != {enc.n_vocab} in encoding\"\n\n self.decoder = {\n v: k for k, v in self.mergeable_ranks.items()\n } # type: dict[int, bytes|str]\n self.decoder.update({v: k for k, v in self.special_tokens.items()})\n\n self.tokenizer = enc # type: tiktoken.Encoding\n\n self.eod_id = self.tokenizer.eot_token\n self.im_start_id = self.special_tokens[IMSTART]\n self.im_end_id = self.special_tokens[IMEND]\n\n def __getstate__(self):\n # for pickle lovers\n state = self.__dict__.copy()\n del state['tokenizer']\n return state\n\n def __setstate__(self, state):\n # tokenizer is not python native; don't pass it; rebuild it\n self.__dict__.update(state)\n enc = tiktoken.Encoding(\n \"Qwen\",\n pat_str=PAT_STR,\n mergeable_ranks=self.mergeable_ranks,\n special_tokens=self.special_tokens,\n )\n self.tokenizer = enc\n\n\n def __len__(self) -> int:\n return self.tokenizer.n_vocab\n\n def get_vocab(self) -> Dict[bytes, int]:\n return self.mergeable_ranks\n\n def convert_tokens_to_ids(\n self, tokens: Union[bytes, str, List[Union[bytes, str]]]\n ) -> List[int]:\n ids = []\n if isinstance(tokens, (str, bytes)):\n if tokens in self.special_tokens:\n return self.special_tokens[tokens]\n else:\n return self.mergeable_ranks.get(tokens)\n for token in tokens:\n if token in self.special_tokens:\n ids.append(self.special_tokens[token])\n else:\n ids.append(self.mergeable_ranks.get(token))\n return ids\n\n def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int:\n if not special_tokens and new_tokens:\n raise ValueError('Adding regular tokens is not supported')\n for token in new_tokens:\n surface_form = token.content if isinstance(token, AddedToken) else token\n if surface_form not in SPECIAL_TOKENS + self.IMAGE_ST:\n raise ValueError('Adding unknown special tokens is not supported')\n return 0\n\n def save_vocabulary(self, save_directory: str, **kwargs) -> Tuple[str]:\n \"\"\"\n Save only the vocabulary of the tokenizer (vocabulary).\n\n Returns:\n `Tuple(str)`: Paths to the files saved.\n \"\"\"\n file_path = os.path.join(save_directory, \"qwen.tiktoken\")\n with open(file_path, \"w\", encoding=\"utf8\") as w:\n for k, v in self.mergeable_ranks.items():\n line = base64.b64encode(k).decode(\"utf8\") + \" \" + str(v) + \"\\n\"\n w.write(line)\n return (file_path,)\n\n def tokenize(\n self,\n text: str,\n allowed_special: Union[Set, str] = \"all\",\n disallowed_special: Union[Collection, str] = (),\n **kwargs,\n ) -> List[Union[bytes, str]]:\n \"\"\"\n Converts a string in a sequence of tokens.\n\n Args:\n text (`str`):\n The sequence to be encoded.\n allowed_special (`Literal[\"all\"]` or `set`):\n The surface forms of the tokens to be encoded as special tokens in regular texts.\n Default to \"all\".\n disallowed_special (`Literal[\"all\"]` or `Collection`):\n The surface forms of the tokens that should not be in regular texts and trigger errors.\n Default to an empty tuple.\n\n kwargs (additional keyword arguments, *optional*):\n Will be passed to the underlying model specific encode method.\n\n Returns:\n `List[bytes|str]`: The list of tokens.\n \"\"\"\n tokens = []\n text = unicodedata.normalize(\"NFC\", text)\n\n # this implementation takes a detour: text -> token id -> token surface forms\n for t in self.tokenizer.encode(\n text, allowed_special=allowed_special, disallowed_special=disallowed_special\n ):\n tokens.append(self.decoder[t])\n\n def _encode_imgurl(img_tokens):\n assert img_tokens[0] == self.image_start_tag and img_tokens[-1] == self.image_end_tag\n img_tokens = img_tokens[1:-1]\n img_url = b''.join(img_tokens)\n out_img_tokens = list(map(self.decoder.get, img_url))\n if len(out_img_tokens) > IMG_TOKEN_SPAN:\n raise ValueError(\"The content in {}..{} is too long\".format(\n self.image_start_tag, self.image_end_tag))\n out_img_tokens.extend([self.image_pad_tag] * (IMG_TOKEN_SPAN - len(out_img_tokens)))\n out_img_tokens = [self.image_start_tag] + out_img_tokens + [self.image_end_tag]\n return out_img_tokens\n\n return _replace_closed_tag(tokens, self.image_start_tag, self.image_end_tag, _encode_imgurl)\n\n def convert_tokens_to_string(self, tokens: List[Union[bytes, str]]) -> str:\n \"\"\"\n Converts a sequence of tokens in a single string.\n \"\"\"\n text = \"\"\n temp = b\"\"\n for t in tokens:\n if isinstance(t, str):\n if temp:\n text += temp.decode(\"utf-8\", errors=self.errors)\n temp = b\"\"\n text += t\n elif isinstance(t, bytes):\n temp += t\n else:\n raise TypeError(\"token should only be of type types or str\")\n if temp:\n text += temp.decode(\"utf-8\", errors=self.errors)\n return text\n\n @property\n def vocab_size(self):\n return self.tokenizer.n_vocab\n\n def _convert_id_to_token(self, index: int) -> Union[bytes, str]:\n \"\"\"Converts an id to a token, special tokens included\"\"\"\n if index in self.decoder:\n return self.decoder[index]\n raise ValueError(\"unknown ids\")\n\n def _convert_token_to_id(self, token: Union[bytes, str]) -> int:\n \"\"\"Converts a token to an id using the vocab, special tokens included\"\"\"\n if token in self.special_tokens:\n return self.special_tokens[token]\n if token in self.mergeable_ranks:\n return self.mergeable_ranks[token]\n raise ValueError(\"unknown token\")\n\n def _tokenize(self, text: str, **kwargs):\n \"\"\"\n Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for word-based\n vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces).\n\n Do NOT take care of added tokens.\n \"\"\"\n raise NotImplementedError\n\n def _decode(\n self,\n token_ids: Union[int, List[int]],\n skip_special_tokens: bool = False,\n errors: str = None,\n **kwargs,\n ) -> str:\n if isinstance(token_ids, int):\n token_ids = [token_ids]\n\n def _decode_imgurl(img_token_ids):\n assert img_token_ids[0] == self.img_start_id and img_token_ids[-1] == self.img_end_id\n img_token_ids = img_token_ids[1:-1]\n img_token_ids = img_token_ids[ : img_token_ids.index(self.img_pad_id)]\n img_url = bytes(img_token_ids).decode('utf-8')\n return [self.img_start_id] + self.tokenizer.encode(img_url) + [self.img_end_id]\n\n token_ids = _replace_closed_tag(token_ids, self.img_start_id, self.img_end_id, _decode_imgurl)\n\n if skip_special_tokens:\n token_ids = [i for i in token_ids if i < self.eod_id]\n return self.tokenizer.decode(token_ids, errors=errors or self.errors)\n\n def to_list_format(self, text: str):\n text = unicodedata.normalize(\"NFC\", text)\n token_ids = self.tokenizer.encode(\n text, allowed_special=set(self.IMAGE_ST + (ENDOFTEXT,)))\n\n def _encode_vl_info(tokens):\n if len(tokens) == 0:\n return []\n if tokens[0] == self.img_start_id and tokens[-1] == self.img_end_id:\n key = 'image'\n elif tokens[0] == self.ref_start_id and tokens[-1] == self.ref_end_id:\n key = 'ref'\n elif tokens[0] == self.box_start_id and tokens[-1] == self.box_end_id:\n key = 'box'\n elif tokens[0] == self.quad_start_id and tokens[-1] == self.quad_end_id:\n key = 'quad'\n else:\n _tobytes = lambda x: x.encode('utf-8') if isinstance(x, str) else x\n return [{'text': b''.join(map(_tobytes, map(self.decoder.get, tokens))).decode('utf-8')}]\n _tobytes = lambda x: x.encode('utf-8') if isinstance(x, str) else x\n val = b''.join(map(_tobytes, map(self.decoder.get, tokens[1:-1]))).decode('utf-8')\n return [{key: val}]\n\n return _replace_closed_tag(\n token_ids,\n (self.img_start_id, self.ref_start_id, self.box_start_id, self.quad_start_id),\n (self.img_end_id, self.ref_end_id, self.box_end_id, self.quad_end_id),\n _encode_vl_info,\n _encode_vl_info,\n )\n\n def from_list_format(self, list_format: List[Dict]):\n text = ''\n num_images = 0\n for ele in list_format:\n if 'image' in ele:\n num_images += 1\n text += f'Picture {num_images}:'\n text += self.image_start_tag + ele['image'] + self.image_end_tag\n text += '\\n'\n elif 'text' in ele:\n text += ele['text']\n elif 'box' in ele:\n if 'ref' in ele:\n text += self.ref_start_tag + ele['ref'] + self.ref_end_tag\n for box in ele['box']:\n text += self.box_start_tag + '(%d,%d),(%d,%d)' % (box[0], box[1], box[2], box[3]) + self.box_end_tag\n else:\n raise ValueError(\"Unsupport element: \" + str(ele))\n return text\n\n def _fetch_latest_picture(self, response, history):\n if history is None:\n history = []\n _history = history + [(response, None)]\n for q, r in _history[::-1]:\n for ele in self.to_list_format(q)[::-1]:\n if 'image' in ele:\n return ele['image']\n return None\n\n def _fetch_all_box_with_ref(self, text):\n list_format = self.to_list_format(text)\n output = []\n for i, ele in enumerate(list_format):\n if 'box' in ele:\n bbox = tuple(map(int, ele['box'].replace('(', '').replace(')', '').split(',')))\n assert len(bbox) == 4\n output.append({'box': bbox})\n if i > 0 and 'ref' in list_format[i-1]:\n output[-1]['ref'] = list_format[i-1]['ref'].strip()\n return output\n\n def draw_bbox_on_latest_picture(\n self,\n response,\n history=None,\n ) -> Optional[Image.Image]:\n image = self._fetch_latest_picture(response, history)\n if image is None:\n return None\n if image.startswith(\"http://\") or image.startswith(\"https://\"):\n image = Image.open(requests.get(image, stream=True).raw).convert(\"RGB\")\n h, w = image.height, image.width\n else:\n image = np.asarray(Image.open(image).convert(\"RGB\"))\n h, w = image.shape[0], image.shape[1]\n visualizer = Visualizer(image)\n\n boxes = self._fetch_all_box_with_ref(response)\n if not boxes:\n return None\n color = random.choice([_ for _ in mcolors.TABLEAU_COLORS.keys()]) # init color\n for box in boxes:\n if 'ref' in box: # random new color for new refexps\n color = random.choice([_ for _ in mcolors.TABLEAU_COLORS.keys()])\n x1, y1, x2, y2 = box['box']\n x1, y1, x2, y2 = (int(x1 / 1000 * w), int(y1 / 1000 * h), int(x2 / 1000 * w), int(y2 / 1000 * h))\n visualizer.draw_box((x1, y1, x2, y2), alpha=1, edge_color=color)\n if 'ref' in box:\n visualizer.draw_text(box['ref'], (x1, y1), color=color, horizontal_alignment=\"left\")\n return visualizer.output" }, { "identifier": "MonkeyConfig", "path": "monkey_model/configuration_monkey.py", "snippet": "class MonkeyConfig(PretrainedConfig):\n model_type = \"monkey\"\n keys_to_ignore_at_inference = [\"past_key_values\"]\n\n def __init__(\n self,\n vocab_size=151936,\n hidden_size=4096,\n num_hidden_layers=32,\n num_attention_heads=32,\n emb_dropout_prob=0.0,\n attn_dropout_prob=0.0,\n layer_norm_epsilon=1e-6,\n initializer_range=0.02,\n max_position_embeddings=8192,\n scale_attn_weights=True,\n use_cache=True,\n bf16=False,\n fp16=False,\n fp32=False,\n kv_channels=128,\n rotary_pct=1.0,\n rotary_emb_base=10000,\n use_dynamic_ntk=True,\n use_logn_attn=True,\n use_flash_attn=\"auto\",\n intermediate_size=22016,\n no_bias=True,\n tie_word_embeddings=False,\n **kwargs,\n ):\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.intermediate_size = intermediate_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.emb_dropout_prob = emb_dropout_prob\n self.attn_dropout_prob = attn_dropout_prob\n self.layer_norm_epsilon = layer_norm_epsilon\n self.initializer_range = initializer_range\n self.scale_attn_weights = scale_attn_weights\n self.use_cache = use_cache\n self.max_position_embeddings = max_position_embeddings\n self.bf16 = bf16\n self.fp16 = fp16\n self.fp32 = fp32\n self.kv_channels = kv_channels\n self.rotary_pct = rotary_pct\n self.rotary_emb_base = rotary_emb_base\n self.use_dynamic_ntk = use_dynamic_ntk\n self.use_logn_attn = use_logn_attn\n self.use_flash_attn = use_flash_attn\n self.no_bias = no_bias\n super().__init__(\n tie_word_embeddings=tie_word_embeddings,\n **kwargs\n )" } ]
from dataclasses import dataclass, field from typing import Dict, Optional, List from torch.utils.data import Dataset from deepspeed import zero from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus from transformers import Trainer, GPTQConfig, deepspeed from transformers.trainer_pt_utils import LabelSmoother from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training from accelerate.utils import DistributedType from monkey_model.modeling_monkey import MonkeyLMHeadModel from monkey_model.tokenization_qwen import QWenTokenizer from monkey_model.configuration_monkey import MonkeyConfig import json import math import logging import os import torch import transformers import numpy as np import random
7,345
def __len__(self): return len(self.input_ids) def __getitem__(self, i) -> Dict[str, torch.Tensor]: return dict( input_ids=self.input_ids[i], labels=self.labels[i], attention_mask=self.attention_mask[i], ) class LazySupervisedDataset(Dataset): """Dataset for supervised fine-tuning.""" def __init__(self, raw_data, tokenizer: transformers.PreTrainedTokenizer, max_len: int): super(LazySupervisedDataset, self).__init__() self.tokenizer = tokenizer self.max_len = max_len rank0_print("Formatting inputs...Skip in lazy mode") self.tokenizer = tokenizer self.raw_data = raw_data self.cached_data_dict = {} def __len__(self): return len(self.raw_data) def __getitem__(self, i) -> Dict[str, torch.Tensor]: if i in self.cached_data_dict: return self.cached_data_dict[i] ret = preprocess(self.raw_data[i]["conversations"], self.tokenizer, self.max_len) ret = dict( input_ids=ret["input_ids"], labels=ret["labels"], attention_mask=ret["attention_mask"], ) self.cached_data_dict[i] = ret return ret def make_supervised_data_module( tokenizer: transformers.PreTrainedTokenizer, data_args, max_len, ) -> Dict: """Make dataset and collator for supervised fine-tuning.""" dataset_cls = ( LazySupervisedDataset if data_args.lazy_preprocess else SupervisedDataset ) rank0_print("Loading data...") train_json = json.load(open(data_args.data_path, "r")) train_dataset = dataset_cls(train_json, tokenizer=tokenizer, max_len=max_len) if data_args.eval_data_path: eval_json = json.load(open(data_args.eval_data_path, "r")) eval_dataset = dataset_cls(eval_json, tokenizer=tokenizer, max_len=max_len) else: eval_dataset = None return dict(train_dataset=train_dataset, eval_dataset=eval_dataset) def print_trainable_params(model: torch.nn.Module): trainable_params, all_param = 0, 0 for param in model.parameters(): num_params = param.numel() all_param += num_params if param.requires_grad: trainable_params += num_params rank0_print("trainable params: {:d} || all params: {:d} || trainable%: {:.4f}".format( trainable_params, all_param, 100 * trainable_params / all_param)) # for name,p in model.named_parameters(): # if p.requires_grad and "transformer.h" not in name: # print(name) def train(): global local_rank parser = transformers.HfArgumentParser( (ModelArguments, DataArguments, TrainingArguments, LoraArguments) ) ( model_args, data_args, training_args, lora_args, ) = parser.parse_args_into_dataclasses() if getattr(training_args, 'deepspeed', None) and getattr(lora_args, 'q_lora', False): training_args.distributed_state.distributed_type = DistributedType.DEEPSPEED compute_dtype = ( torch.float16 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32) ) local_rank = training_args.local_rank device_map = None world_size = int(os.environ.get("WORLD_SIZE", 1)) ddp = world_size != 1 if lora_args.q_lora: device_map = {"": int(os.environ.get("LOCAL_RANK") or 0)} if ddp else None if len(training_args.fsdp) > 0 or deepspeed.is_deepspeed_zero3_enabled(): logging.warning( "FSDP or ZeRO3 are not incompatible with QLoRA." ) # Set RoPE scaling factor config = MonkeyConfig.from_pretrained( "monkey_model", cache_dir=training_args.cache_dir, trust_remote_code=True, ) rank0_print(config) config.use_cache = False # Load model and tokenizer rank0_print("loading base model")
# This code is based on the revised code from fastchat based on tatsu-lab/stanford_alpaca. IGNORE_TOKEN_ID = LabelSmoother.ignore_index @dataclass class ModelArguments: model_name_or_path: Optional[str] = field(default="") @dataclass class DataArguments: data_path: str = field( default=None, metadata={"help": "Path to the training data."} ) eval_data_path: str = field( default=None, metadata={"help": "Path to the evaluation data."} ) lazy_preprocess: bool = False @dataclass class TrainingArguments(transformers.TrainingArguments): cache_dir: Optional[str] = field(default=None) optim: str = field(default="adamw_torch") model_max_length: int = field( default=8192, metadata={ "help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)." }, ) use_lora: bool = False fix_vit: bool = True @dataclass class LoraArguments: lora_r: int = 16 lora_alpha: int = 32 lora_dropout: float = 0.05 lora_target_modules: List[str] = field( default_factory=lambda: ["in_proj","out_proj","c_fc"] ##["in_proj","out_proj","c_fc"] ) lora_weight_path: str = "" lora_bias: str = "none" q_lora: bool = False def maybe_zero_3(param): if hasattr(param, "ds_id"): assert param.ds_status == ZeroParamStatus.NOT_AVAILABLE with zero.GatheredParameters([param]): param = param.data.detach().cpu().clone() else: param = param.detach().cpu().clone() return param # Borrowed from peft.utils.get_peft_model_state_dict def get_peft_state_maybe_zero_3(named_params, bias): if bias == "none": to_return = {k: t for k, t in named_params if "lora_" in k} elif bias == "all": to_return = {k: t for k, t in named_params if "lora_" in k or "bias" in k} elif bias == "lora_only": to_return = {} maybe_lora_bias = {} lora_bias_names = set() for k, t in named_params: if "lora_" in k: to_return[k] = t bias_name = k.split("lora_")[0] + "bias" lora_bias_names.add(bias_name) elif "bias" in k: maybe_lora_bias[k] = t for k, t in maybe_lora_bias: if bias_name in lora_bias_names: to_return[bias_name] = t else: raise NotImplementedError to_return = {k: maybe_zero_3(v) for k, v in to_return.items()} return to_return local_rank = None def rank0_print(*args): if local_rank == 0: print(*args) def safe_save_model_for_hf_trainer(trainer: transformers.Trainer, output_dir: str, bias="none"): """Collects the state dict and dump to disk.""" # check if zero3 mode enabled if deepspeed.is_deepspeed_zero3_enabled(): state_dict = trainer.model_wrapped._zero3_consolidated_16bit_state_dict() else: state_dict = trainer.model.state_dict() if trainer.args.should_save and trainer.args.local_rank == 0: trainer._save(output_dir, state_dict=state_dict) def format_tokenizer(tokenizer, message, return_target=False, label=False): _input_ids = tokenizer(message).input_ids input_ids = _input_ids if return_target: if label: target = input_ids else: target = [IGNORE_TOKEN_ID] * (len(_input_ids)) return input_ids, target else: return input_ids def preprocess( source, tokenizer, max_len, system_message: str = "You are a helpful assistant.", padding=True ): # Apply prompt templates input_ids, targets = [], [] user, assistant = source[0], source[1] user_input = user['value'] assistant_input = assistant['value'] message_l = [user_input, assistant_input] for i, message in enumerate(message_l): try: _input_ids, _target = format_tokenizer(tokenizer, message, return_target=True, label=True if i == len(message_l) - 1 else False) # <img> 有些text会有img标签,所以使用<img>作为特殊id有问题,标签数量不对等会报错 except Exception as e: print(e) continue input_ids += _input_ids targets += _target assert len(_input_ids) == len(_input_ids) if padding: input_ids += [-1]+[tokenizer.pad_token_id] * (max_len - len(input_ids)-1) targets += [tokenizer.pad_token_id] +[IGNORE_TOKEN_ID] * (max_len - len(targets)-1) targets = targets[:max_len] input_ids = input_ids[:max_len] input_ids = torch.tensor(input_ids, dtype=torch.int) targets = torch.tensor(targets, dtype=torch.int) attention_mask=input_ids.ne(tokenizer.pad_token_id) input_ids[input_ids == -1 ] = tokenizer.pad_token_id return dict( input_ids=input_ids, labels=targets, attention_mask=attention_mask, ) class SupervisedDataset(Dataset): """Dataset for supervised fine-tuning.""" def __init__(self, raw_data, tokenizer: transformers.PreTrainedTokenizer, max_len: int): super(SupervisedDataset, self).__init__() rank0_print("Formatting inputs...") sources = [example["conversations"] for example in raw_data] data_dict = preprocess(sources, tokenizer, max_len) self.input_ids = data_dict["input_ids"] self.labels = data_dict["labels"] self.attention_mask = data_dict["attention_mask"] def __len__(self): return len(self.input_ids) def __getitem__(self, i) -> Dict[str, torch.Tensor]: return dict( input_ids=self.input_ids[i], labels=self.labels[i], attention_mask=self.attention_mask[i], ) class LazySupervisedDataset(Dataset): """Dataset for supervised fine-tuning.""" def __init__(self, raw_data, tokenizer: transformers.PreTrainedTokenizer, max_len: int): super(LazySupervisedDataset, self).__init__() self.tokenizer = tokenizer self.max_len = max_len rank0_print("Formatting inputs...Skip in lazy mode") self.tokenizer = tokenizer self.raw_data = raw_data self.cached_data_dict = {} def __len__(self): return len(self.raw_data) def __getitem__(self, i) -> Dict[str, torch.Tensor]: if i in self.cached_data_dict: return self.cached_data_dict[i] ret = preprocess(self.raw_data[i]["conversations"], self.tokenizer, self.max_len) ret = dict( input_ids=ret["input_ids"], labels=ret["labels"], attention_mask=ret["attention_mask"], ) self.cached_data_dict[i] = ret return ret def make_supervised_data_module( tokenizer: transformers.PreTrainedTokenizer, data_args, max_len, ) -> Dict: """Make dataset and collator for supervised fine-tuning.""" dataset_cls = ( LazySupervisedDataset if data_args.lazy_preprocess else SupervisedDataset ) rank0_print("Loading data...") train_json = json.load(open(data_args.data_path, "r")) train_dataset = dataset_cls(train_json, tokenizer=tokenizer, max_len=max_len) if data_args.eval_data_path: eval_json = json.load(open(data_args.eval_data_path, "r")) eval_dataset = dataset_cls(eval_json, tokenizer=tokenizer, max_len=max_len) else: eval_dataset = None return dict(train_dataset=train_dataset, eval_dataset=eval_dataset) def print_trainable_params(model: torch.nn.Module): trainable_params, all_param = 0, 0 for param in model.parameters(): num_params = param.numel() all_param += num_params if param.requires_grad: trainable_params += num_params rank0_print("trainable params: {:d} || all params: {:d} || trainable%: {:.4f}".format( trainable_params, all_param, 100 * trainable_params / all_param)) # for name,p in model.named_parameters(): # if p.requires_grad and "transformer.h" not in name: # print(name) def train(): global local_rank parser = transformers.HfArgumentParser( (ModelArguments, DataArguments, TrainingArguments, LoraArguments) ) ( model_args, data_args, training_args, lora_args, ) = parser.parse_args_into_dataclasses() if getattr(training_args, 'deepspeed', None) and getattr(lora_args, 'q_lora', False): training_args.distributed_state.distributed_type = DistributedType.DEEPSPEED compute_dtype = ( torch.float16 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32) ) local_rank = training_args.local_rank device_map = None world_size = int(os.environ.get("WORLD_SIZE", 1)) ddp = world_size != 1 if lora_args.q_lora: device_map = {"": int(os.environ.get("LOCAL_RANK") or 0)} if ddp else None if len(training_args.fsdp) > 0 or deepspeed.is_deepspeed_zero3_enabled(): logging.warning( "FSDP or ZeRO3 are not incompatible with QLoRA." ) # Set RoPE scaling factor config = MonkeyConfig.from_pretrained( "monkey_model", cache_dir=training_args.cache_dir, trust_remote_code=True, ) rank0_print(config) config.use_cache = False # Load model and tokenizer rank0_print("loading base model")
model = MonkeyLMHeadModel.from_pretrained(
0
2023-11-09 14:31:48+00:00
12k
OpenBMB/ProAgent
ProAgent/n8n_parser/compiler.py
[ { "identifier": "ENVIRONMENT", "path": "ProAgent/router/utils.py", "snippet": "class ENVIRONMENT(Enum):\n '''\n 决定了 record cache 的访问形式\n - Development:不访问缓存,从头开始\n - Refine:访问缓存,但 user messages 必须一致,若不一致(例如节点返回值变化)则停止访问缓存\n - Production:无条件访问缓存,将 record 重播一遍\n '''\n # how to handle with different query?now it's loading the query defined in code instead of loading from cache.\n Development = auto() # ok\n Refine = auto() # ok\n Production = auto() # ok" }, { "identifier": "NodeType", "path": "ProAgent/utils.py", "snippet": "class NodeType(Enum):\n action = auto()\n trigger = auto()" }, { "identifier": "ToolCallStatus", "path": "ProAgent/utils.py", "snippet": "class ToolCallStatus(Enum):\n ToolCallSuccess = auto()\n ToolCallPartlySuccess = auto()\n NoSuchTool = auto()\n NoSuchFunction = auto()\n InputCannotParsed = auto()\n\n \n UndefinedParam = auto() \n ParamTypeError = auto() \n UnSupportedParam = auto() \n UnsupportedExpression = auto() \n ExpressionError = auto() \n RequiredParamUnprovided = auto() " }, { "identifier": "Action", "path": "ProAgent/utils.py", "snippet": "class Action():\n content: str = \"\"\n thought: str = \"\"\n plan: List[str] = field(default_factory=lambda: [])\n criticism: str = \"\"\n tool_name: str = \"\"\n tool_input: dict = field(default_factory=lambda: {})\n\n tool_output_status: ToolCallStatus = ToolCallStatus.ToolCallSuccess\n tool_output: str = \"\"\n\n def to_json(self):\n try:\n tool_output = json.loads(self.tool_output)\n except:\n tool_output = self.tool_output\n return {\n \"thought\": self.thought,\n \"plan\": self.plan,\n \"criticism\": self.criticism,\n \"tool_name\": self.tool_name,\n \"tool_input\": self.tool_input,\n \"tool_output_status\": self.tool_output_status.name,\n \"tool_output\": tool_output,\n }" }, { "identifier": "WorkflowType", "path": "ProAgent/utils.py", "snippet": "class WorkflowType(Enum):\n Main = auto()\n Sub = auto()" }, { "identifier": "TestResult", "path": "ProAgent/utils.py", "snippet": "class TestResult():\n \"\"\"\n Responsible for handling the data structure of [{}]\n \"\"\"\n data_type: TestDataType = TestDataType.ActionInput\n\n input_data: Optional[list] = field(default_factory=lambda: [])\n\n runtime_status: RunTimeStatus = RunTimeStatus.DidNotBeenCalled\n visit_times: int = 0\n\n error_message: str = \"\"\n output_data: Optional[list] = field(default_factory=lambda: [])\n\n\n def load_from_json(self):\n pass\n\n\n def to_json(self):\n pass\n\n def to_str(self):\n prompt = f\"\"\"\nThis function has been executed for {self.visit_times} times. Last execution:\n1.Status: {self.runtime_status.name}\n2.Input: \n{self.input_data}\n\n3.Output:\n{self.output_data}\"\"\"\n return prompt" }, { "identifier": "RunTimeStatus", "path": "ProAgent/utils.py", "snippet": "class RunTimeStatus(Enum):\n FunctionExecuteSuccess = auto()\n TriggerAcivatedSuccess = auto()\n ErrorRaisedHere = auto()\n ErrorRaisedInner = auto()\n DidNotImplemented = auto()\n DidNotBeenCalled = auto()" }, { "identifier": "TestDataType", "path": "ProAgent/utils.py", "snippet": "class TestDataType(Enum):\n NoInput = auto()\n TriggerInput = auto()\n ActionInput = auto()\n SubWorkflowInput = auto()" }, { "identifier": "n8nPythonNode", "path": "ProAgent/n8n_parser/node.py", "snippet": "class n8nPythonNode():\n \"\"\"将n8n node转化为一个python-function\n \"\"\"\n node_id: int = 1\n node_meta: n8nNodeMeta = field(default_factory=n8nNodeMeta())\n node_comments: str = \"\"\n note_todo: List[str] = field(default_factory=lambda: [])\n node_json: dict = field(default_factory=lambda: {})\n params: Dict[str, n8nParameter] = field(default_factory=lambda: {})\n\n implemented: bool = False\n \n last_runtime_info: TestResult = field(default_factory= lambda: TestResult())\n\n def get_name(self):\n \"\"\"\n Returns a string representing the name of the node.\n \n Parameters:\n self (Node): The Node object.\n \n Returns:\n str: The name of the node, which is a combination of the node type and the node ID.\n \"\"\"\n return f\"{self.node_meta.node_type.name}_{self.node_id}\"\n\n def get_runtime_description(self) -> str:\n \"\"\"\n Get the information about the last runtime of the Workflow.\n\n Returns:\n str: The description of the last runtime.\n\n \"\"\"\n if self.last_runtime_info == RunTimeStatus.DidNotImplemented:\n return f\"This {self.node_meta.node_type} has not been implemented\"\n\n def update_implement_info(self):\n if len(self.params) == 0:\n self.implemented = True\n return\n for key, value in self.params.items():\n if value.data_is_set:\n self.implemented = True\n return\n\n\n def print_self_clean(self):\n \"\"\"Returns a multiline text.\"\"\"\n lines = []\n input_data = \"input_data: List[Dict] = [{...}]\" if self.node_meta.node_type == NodeType.action else \"\"\n define_line = f\"def {self.get_name()}({input_data}):\"\n lines.append(define_line)\n param_json = {}\n for key, value in self.params.items():\n param = value.to_json()\n if param != None:\n param_json[key] = param\n\n\n param_str = json.dumps(param_json, indent = 2, ensure_ascii=False)\n param_str = param_str.splitlines(True)\n param_str = [line.strip(\"\\n\") for line in param_str]\n prefix = \" params = \"\n param_str[0] = prefix + param_str[0]\n if not self.implemented:\n if len(self.params) > 0:\n param_str[0] += \" # to be Implemented\"\n else:\n param_str[0] += \" # This function doesn't need spesific param\"\n for i in range(1, len(param_str)):\n param_str[i] = \" \"*len(prefix) + param_str[i]\n lines.extend(param_str)\n\n lines.append(f\" function = transparent_{self.node_meta.node_type.name}(integration=\\\"{self.node_meta.integration_name}\\\", resource=\\\"{self.node_meta.resource_name}\\\", operation=\\\"{self.node_meta.operation_name}\\\")\")\n \n if self.node_meta.node_type == NodeType.action:\n lines.append( \" output_data = function.run(input_data=input_data, params=params)\")\n else:\n lines.append( \" output_data = function.run(input_data=None, params=params)\")\n\n lines.append(\" return output_data\")\n\n return lines \n \n\n def print_self(self):\n \"\"\"Returns a multiline text.\"\"\"\n lines = []\n input_data = \"input_data: List[Dict] = [{...}]\" if self.node_meta.node_type == NodeType.action else \"\"\n define_line = f\"def {self.get_name()}({input_data}):\"\n lines.append(define_line)\n if self.node_comments != \"\" or self.note_todo != []:\n lines.append(f\" \\\"\\\"\\\"\")\n if self.node_comments != \"\":\n lines.append(f\" comments: {self.node_comments}\")\n \n if self.note_todo != []:\n lines.append(f\" TODOs: \")\n for todo in self.note_todo:\n lines.append(f\" - {todo}\")\n lines.append(f\" \\\"\\\"\\\"\")\n \n param_json = {}\n for key, value in self.params.items():\n param = value.to_json()\n if param != None:\n param_json[key] = param\n\n\n param_str = json.dumps(param_json, indent = 2, ensure_ascii=False)\n param_str = param_str.splitlines(True)\n param_str = [line.strip(\"\\n\") for line in param_str]\n prefix = \" params = \"\n param_str[0] = prefix + param_str[0]\n if not self.implemented:\n if len(self.params) > 0:\n param_str[0] += \" # to be Implemented\"\n else:\n param_str[0] += \" # This function doesn't need spesific param\"\n for i in range(1, len(param_str)):\n param_str[i] = \" \"*len(prefix) + param_str[i]\n lines.extend(param_str)\n\n lines.append(f\" function = transparent_{self.node_meta.node_type.name}(integration=\\\"{self.node_meta.integration_name}\\\", resource=\\\"{self.node_meta.resource_name}\\\", operation=\\\"{self.node_meta.operation_name}\\\")\")\n \n if self.node_meta.node_type == NodeType.action:\n lines.append( \" output_data = function.run(input_data=input_data, params=params)\")\n else:\n lines.append( \" output_data = function.run(input_data=None, params=params)\")\n\n lines.append(\" return output_data\")\n\n return lines \n \n def parse_parameters(self, param_json: dict) -> (ToolCallStatus, str):\n \"\"\"\n Parses the input parameters and checks if they conform to the expected format.\n Args:\n param_json (dict): The input parameters in JSON format.\n Returns:\n tuple: A tuple containing the status of the tool call and a JSON string\n representing the result.\n Raises:\n TypeError: If the input parameter is not of type dict.\n \"\"\"\n new_params = deepcopy(self.params)\n for key in new_params:\n new_params[key].refresh()\n\n tool_call_result = []\n\n if not isinstance(param_json, dict):\n tool_status = ToolCallStatus.ParamTypeError\n return tool_status, json.dumps({\"error\": f\"Parameter Type Error: The parameter is expected to be a json format string which can be parsed as dict type. However, you are giving string parsed as {type(param_json)}\", \"result\": \"Nothing happened.\", \"status\": tool_status.name})\n\n for key in param_json.keys():\n if key not in new_params.keys():\n tool_status = ToolCallStatus.UndefinedParam\n return tool_status, json.dumps({\"error\": f\"Undefined input parameter \\\"{key}\\\" for {self.get_name()}.Supported parameters: {list(new_params.keys())}\", \"result\": \"Nothing happened.\", \"status\": tool_status.name})\n if type(param_json[key]) == str and (len(param_json[key]) == 0):\n tool_status = ToolCallStatus.RequiredParamUnprovided\n return tool_status, json.dumps({\"error\": f\"input parameter is null, \\\"{key}\\\" for {self.get_name()}. You should put something in it.\", \"result\": \"Nothing happened.\", \"status\": tool_status.name})\n parse_status, parse_output = new_params[key].parse_value(param_json[key])\n if parse_status != ToolCallStatus.ToolCallSuccess:\n tool_status = parse_status\n return tool_status, json.dumps({\"error\": f\"{parse_output}\", \"result\": \"Nothing Happened\", \"status\": tool_status.name})\n tool_call_result.append(parse_output)\n\n self.params = new_params\n tool_status = ToolCallStatus.ToolCallSuccess\n\n self.update_implement_info()\n return tool_status, json.dumps({\"result\": tool_call_result, \"status\": tool_status.name})" }, { "identifier": "n8nNodeMeta", "path": "ProAgent/n8n_parser/node.py", "snippet": "class n8nNodeMeta():\n node_type: NodeType = NodeType.action\n integration_name: str = \"\"\n resource_name: str = \"\"\n operation_name: str = \"\"\n operation_description: str = \"\"\n\n def to_action_string(self):\n \"\"\"\n Generates a string representation of the action performed by the node.\n \n Returns:\n str: The string representation of the action.\n \"\"\"\n output = f\"{self.node_type.name}(resource={self.resource_name}, operation={self.operation_name})\"\n if self.operation_description != \"\":\n output += f\": {self.operation_description}\"\n return output" }, { "identifier": "n8nPythonWorkflow", "path": "ProAgent/n8n_parser/workflow.py", "snippet": "class n8nPythonWorkflow():\n workflow_name: str = \"\"\n workflow_type: WorkflowType = WorkflowType.Main\n implement_code: str = \"\"\n implement_code_clean: str = \"\"\n\n last_runtime_info: TestResult = field(default_factory= lambda: TestResult())\n\n def print_self_clean(self):\n \"\"\"\n Cleans the implement code by removing single-line comments and multi-line comments.\n\n Returns:\n str: The cleaned implement code.\n \"\"\"\n implement_code_clean = self.implement_code\n implement_code_clean = re.sub(r'\"\"\".*?\"\"\"', '', implement_code_clean, flags=re.DOTALL)\n return implement_code_clean\n\n def print_self(self):\n \"\"\"\n Print the value of the `implement_code` attribute of the current instance.\n\n Returns:\n The value of the `implement_code` attribute.\n \"\"\"\n return self.implement_code\n\n def print_self_old(self):\n \"\"\"\n Generates the function comment for the given function body.\n\n Args:\n self (object): The current instance of the class.\n \n Returns:\n list: The lines of the generated function comment.\n \"\"\"\n lines = []\n name = \"mainWorkflow\" if self.workflow_type == WorkflowType.Main else f\"subworkflow_{self.workflow_id}\"\n input_name = \"trigger_input\" if self.workflow_type == WorkflowType.Main else f\"father_workflow_input\"\n line1 = f\"def {name}({input_name}: [{{...}}]):\"\n lines.append(line1)\n if self.comments != \"\" or self.TODOS != []:\n lines.append(f\" \\\"\\\"\\\"\")\n if self.comments != \"\":\n lines.append(f\" comments: {self.comments}\")\n \n if self.TODOS != []:\n lines.append(f\" TODOs: \")\n for todo in self.TODOS:\n lines.append(f\" - {todo}\")\n lines.append(f\" \\\"\\\"\\\"\")\n\n if self.implement_code != \"\":\n lines.append(self.implement_code)\n else:\n lines.append(\" raise NotImplementedError\")\n\n return lines" }, { "identifier": "parse_properties", "path": "ProAgent/n8n_parser/param_parser.py", "snippet": "def parse_properties(node: n8nPythonNode):\n \"\"\"\n This function parses the properties of a given node and returns a dictionary with parameter descriptions for the model.\n Args:\n node (n8nPythonNode): The node object containing the properties to parse.\n Returns:\n dict: A dictionary containing the parameter descriptions.\n \"\"\"\n node_json = node.node_json\n parameter_descriptions = {}\n\n for content in node_json[\"properties\"]:\n assert type(content) == dict\n parameter_name = content[\"name\"]\n\n if parameter_name in [\"resource\", \"operation\", \"authentication\"]:\n continue\n \n if \"displayOptions\" in content.keys() and (parse_display_options(content[\"displayOptions\"], node) == False):\n continue\n\n parameter_type = content[\"type\"]\n\n new_param = visit_parameter(content)\n if new_param != None:\n parameter_descriptions[parameter_name] = new_param\n return parameter_descriptions" }, { "identifier": "n8nPythonCodeRunner", "path": "ProAgent/n8n_tester/run_code.py", "snippet": "class n8nPythonCodeRunner():\n def __init__(self):\n self.error_stack_str = []\n self.mock_interface = MockInput(\n \n )\n\n def flash(self, main_workflow: n8nPythonWorkflow,workflows: dict[str, n8nPythonWorkflow], nodes: [n8nPythonNode]):\n \"\"\"\n Pull the latest n8n-python-code.\n Initializes the `workflows` and `nodes` attributes of the class.\n\n Parameters:\n main_workflow (n8nPythonWorkflow): The main workflow object.\n workflows (dict[str, n8nPythonWorkflow]): A dictionary containing the workflows.\n nodes ([n8nPythonNode]): A list of nodes.\n\n Returns:\n None\n \"\"\"\n self.workflows = {}\n for key, value in workflows.items():\n self.workflows[key] = value\n self.workflows[\"mainWorkflow\"] = main_workflow\n\n self.nodes = nodes\n\n\n def run_code(self):\n \"\"\"\n 1. Initialize the runtime information for all functions.\n 2. Execute the current code and modify the information of all accessed nodes.\n \"\"\"\n\n for workflow_name, workflow in self.workflows.items():\n workflow.last_runtime_info = TestResult(\n data_type=TestDataType.NoInput,\n runtime_status=RunTimeStatus.DidNotBeenCalled,\n )\n for node in self.nodes:\n node.last_runtime_info = TestResult(\n data_type=TestDataType.NoInput,\n runtime_status=RunTimeStatus.DidNotBeenCalled,\n )\n\n trigger_input = None\n for node in self.nodes:\n if node.node_meta.node_type == NodeType.trigger and node.implemented:\n trigger_input = self.mock_interface.get_node_example_input(node, top_k=1)\n node.last_runtime_info = TestResult(\n data_type=TestDataType.NoInput,\n runtime_status=RunTimeStatus.TriggerAcivatedSuccess,\n visit_times=1,\n output_data=deepcopy(trigger_input),\n )\n break\n if trigger_input == None:\n pass\n\n name_space = {}\n for node in self.nodes:\n \n name_space[node.get_name()] = n8nNodeRunner(node=node, name_space={})\n for workflow_name, workflow in self.workflows.items():\n name_space[workflow_name] = n8nWorkflowRunner(workflow_name=workflow_name, workflow=workflow, name_space={})\n for key in name_space.keys():\n name_space[key].name_space = name_space\n \n\n self.error_stack_str = []\n self.std_output = \"\"\n try:\n name_space[\"mainWorkflow\"](trigger_input)\n except n8nRunningException as e:\n for code_lines in reversed(e.code_stack):\n self.error_stack_str.extend(code_lines)\n self.error_stack_str.append(\"------------------------\")\n self.error_stack_str.append(e.error_message)\n self.error_stack_str = \"\\n\".join(self.error_stack_str)\n\n\n def print_clean_code(self, indent = 0):\n '''\n 打印 clean code. 无注释。\n '''\n lines = []\n for node in self.nodes:\n lines.extend(node.print_self())\n lines.append(\"\\n\\n\")\n \n for workflow_name, workflow in self.workflows.items():\n lines.append(workflow.print_self())\n lines.append(\"\\n\\n\")\n\n lines = [\" \"*indent + line for line in lines]\n return \"\\n\".join(lines)\n \n def print_code(self, indent = 0):\n \"\"\"目前的代码长什么样子?\n \"\"\"\n lines = []\n for node in self.nodes:\n lines.append(\"\\\"\\\"\\\"Function param descriptions: \")\n if len(node.params) > 0:\n for k, (key, value) in enumerate(node.params.items()):\n param_des_lines = value.to_description(prefix_ids=f\"{k}\", indent=0, max_depth=1)\n lines.extend(param_des_lines)\n else:\n lines.append(\"This function doesn't need params\")\n lines.append(node.last_runtime_info.to_str())\n\n # lines.append(\"Avaliable example inputs for this function\")\n # example_inout_pair = mock_interface.get_node_example_input(node, top_k=2)\n # for k, data in enumerate(example_inout_pair):\n # pass\n lines.append(\"\\\"\\\"\\\"\")\n lines.extend(node.print_self())\n lines.append(\"\\n\\n\")\n \n \n for workflow_name, workflow in self.workflows.items():\n lines.append(\"\\\"\\\"\\\"\")\n lines.append(workflow.last_runtime_info.to_str())\n lines.append(\"\\\"\\\"\\\"\")\n\n lines.append(workflow.print_self())\n lines.append(\"\\n\\n\")\n\n\n running_prompt = f\"\"\"\nThe directly running result for now codes with print results are as following:\n\n{self.std_output}\n{self.error_stack_str}\n\nYou can also see the runnning result for all functions in there comments.\"\"\"\n\n lines.append(\"\\\"\\\"\\\"\")\n lines.append(running_prompt)\n lines.append(\"\\\"\\\"\\\"\")\n\n lines = [\" \"*indent + line for line in lines]\n return \"\\n\".join(lines)" }, { "identifier": "mainWorkflow_code", "path": "ProAgent/n8n_parser/intrinsic_functions.py", "snippet": "def get_intrinsic_functions():" }, { "identifier": "print_action_base", "path": "ProAgent/loggers/logs.py", "snippet": "def print_action_base(action: Action):\n \"\"\"\n Print the different properties of an Action object.\n\n Parameters:\n action (Action): The Action object to print.\n\n Returns:\n None\n \"\"\"\n if action.content != \"\":\n logger.typewriter_log(\n f\"content:\", Fore.YELLOW, f\"{action.content}\"\n )\n logger.typewriter_log(\n f\"Thought:\", Fore.YELLOW, f\"{action.thought}\"\n )\n if len(action.plan) > 0:\n logger.typewriter_log(\n f\"Plan:\", Fore.YELLOW,\n )\n for line in action.plan:\n line = line.lstrip(\"- \")\n logger.typewriter_log(\"- \", Fore.GREEN, line.strip())\n logger.typewriter_log(\n f\"Criticism:\", Fore.YELLOW, f\"{action.criticism}\"\n )" }, { "identifier": "print_action_tool", "path": "ProAgent/loggers/logs.py", "snippet": "def print_action_tool(action: Action):\n \"\"\"\n Prints the details of an action tool.\n\n Args:\n action (Action): The action object containing the tool details.\n\n Returns:\n None\n \"\"\"\n logger.typewriter_log(\n f\"Tool:\", Fore.BLUE, f\"{action.tool_name}\"\n )\n logger.typewriter_log(\n f\"Tool Input:\", Fore.BLUE, f\"{action.tool_input}\"\n )\n\n output = action.tool_output if action.tool_output != \"\" else \"None\"\n logger.typewriter_log(\n f\"Tool Output:\", Fore.BLUE, f\"{output}\"\n )\n\n color = Fore.RED\n if action.tool_output_status == ToolCallStatus.ToolCallSuccess:\n color = Fore.GREEN\n elif action.tool_output_status == ToolCallStatus.InputCannotParsed:\n color = Fore.YELLOW\n\n logger.typewriter_log(\n f\"Tool Call Status:\", Fore.BLUE, f\"{color}{action.tool_output_status.name}{Style.RESET_ALL}\"\n )" }, { "identifier": "RunningRecoder", "path": "ProAgent/running_recorder.py", "snippet": "class RunningRecoder():\n def __init__(self, record_base_dir = \"./records\"):\n \"\"\"\n Initializes the object with the given record base directory.\n\n Parameters:\n record_base_dir (str): The base directory for the records. Defaults to \"./records\".\n\n Returns:\n None\n \"\"\"\n\n self.llm_record_cache = [] # Get cached records\n\n self.llm_interface_id = 0\n self.llm_server_cache = [] # Runtime records\n self.tool_call_id = 0\n self.tool_call_cache = []\n self.is_cached = True # Assume to be true at first\n self.newly_start = True\n\n now = int(round(time.time()*1000))\n strip = time.strftime('%Y_%m_%d_%H_%M_%S',time.localtime(now/1000))\n\n self.record_root_dir = os.path.join(record_base_dir,strip)\n os.makedirs(self.record_root_dir,exist_ok=True)\n\n print(colored(f\"Recorder Mode: {CONFIG.environment.name}\", color='yellow'))\n\n for subdir_name in [\"LLM_inout_pair\",\"tool_call_logs\"]:\n os.makedirs(os.path.join(self.record_root_dir,subdir_name),exist_ok=True)\n \n\n def save_meta(self):\n \"\"\"\n Saves the meta information of the record.\n\n This function writes the meta information of the record to a file in the\n record root directory. The meta information includes the tool call ID and\n the LLM inference ID.\n\n Parameters:\n None\n\n Returns:\n None\n \"\"\"\n with open(os.path.join(self.record_root_dir, \"meta.meta\"), \"w\", encoding=\"utf-8\") as writer:\n tool_call_log = {\n \"tool_call_id\": self.tool_call_id,\n \"llm_inference_id\": self.llm_interface_id,\n }\n json.dump(tool_call_log,writer,indent=2, ensure_ascii=False)\n\n def load_from_disk(self, record_dir: str, cfg):\n \"\"\"\n Load data from disk into memory cache.\n\n Args:\n record_dir (str): The directory path where the data is stored.\n cfg: The configuration object.\n\n Returns:\n None\n \"\"\"\n logger.typewriter_log(\n \"load from a disk record\",\n Fore.RED,\n record_dir,\n )\n self.newly_start = False\n for dir_name in os.listdir(record_dir):\n if dir_name == \"LLM_inout_pair\":\n inout_pair_list = os.listdir(os.path.join(record_dir,dir_name))\n inout_pair_list.sort()\n for file_name in inout_pair_list:\n with open(os.path.join(record_dir,dir_name,file_name), \"r\", encoding=\"utf-8\") as reader:\n llm_pair = json.load(reader)\n self.llm_record_cache.append(llm_pair)\n elif dir_name == \"meta.meta\":\n with open(os.path.join(record_dir, \"meta.meta\"), \"r\", encoding=\"utf-8\") as reader:\n tool_call_log = json.load(reader)\n \n \n def regist_llm_inout(self, base_kwargs, messages, functions, function_call, stop, other_args, output_data, uuid=\"\"):\n \"\"\"\n Registers the LLM input and output data for the specified function call. \n\n Args:\n base_kwargs (dict): The base keyword arguments for the function call.\n messages (list): The list of messages associated with the function call.\n functions (list): The list of functions called during the function call.\n function_call (str): The function call being registered.\n stop (bool): A flag indicating whether the function call should stop.\n other_args (list): The list of other arguments for the function call.\n output_data (Any): The output data for the function call.\n uuid (str, optional): The UUID associated with the function call. Defaults to \"\".\n\n Returns:\n None\n\n Raises:\n None\n \"\"\"\n with open(os.path.join(self.record_root_dir, \"LLM_inout_pair\", f\"{self.llm_interface_id:05d}.json\"), \"w\", encoding=\"utf-8\") as writer:\n llm_inout_record = {\n \"input\": {\n \"base_kwargs\": dump_common_things(base_kwargs),\n \"messages\":dump_common_things(messages),\n \"functions\":dump_common_things(functions),\n \"function_call\":dump_common_things(function_call),\n \"stop\":dump_common_things(stop),\n \"other_args\":dump_common_things(other_args),\n # 'uuid': dump_common_things(uuid)\n },\n \"output\": dump_common_things(output_data),\n \"llm_interface_id\": self.llm_interface_id,\n }\n json.dump(llm_inout_record,writer,indent=2, ensure_ascii=False)\n self.llm_server_cache.append(llm_inout_record)\n\n self.llm_interface_id += 1\n self.save_meta()\n\n\n def query_llm_inout(self, restrict_cache_query, base_kwargs, messages, functions, function_call, stop, other_args, uuid=\"\"):\n \"\"\"\n Query the LLM server for input and output data based on the given parameters.\n \n Parameters:\n - restrict_cache_query (bool): Whether to restrict the cache query.\n - base_kwargs (dict): A dictionary of base keyword arguments.\n - messages (list): A list of messages.\n - functions (list): A list of functions.\n - function_call (dict): A dictionary representing the function call.\n - stop (bool): Whether to stop the query.\n - other_args (dict): A dictionary of other arguments.\n - uuid (str): A string representing the UUID (optional).\n \n Returns:\n - object: The output data from the LLM server, or None if not found.\n \"\"\"\n\n \n if CONFIG.environment == ENVIRONMENT.Development or self.newly_start:\n self.is_cached = False\n return None\n elif CONFIG.environment == ENVIRONMENT.Refine:\n input_data = {\n \"base_kwargs\": dump_common_things(base_kwargs),\n \"messages\":dump_common_things(messages),\n \"functions\":dump_common_things(functions),\n \"function_call\":dump_common_things(function_call),\n \"stop\":dump_common_things(stop),\n \"other_args\":dump_common_things(other_args),\n }\n for cache in self.llm_record_cache:\n # compare user messages only\n input_data_user_messages = [item for item in input_data['messages'] if item['role'] == 'user']\n cache_data_user_messages = [item for item in cache[\"input\"]['messages'] if item['role'] == 'user']\n if input_data_user_messages == cache_data_user_messages:\n if restrict_cache_query and self.llm_interface_id != cache[\"llm_interface_id\"]:\n continue\n logger.typewriter_log(\n f\"get a llm_server response from Record {cache['llm_interface_id']}\",\n Fore.RED,\n )\n self.is_cached = True\n return cache[\"output\"]\n self.is_cached = False\n return None\n elif CONFIG.environment == ENVIRONMENT.Production:\n if self.llm_interface_id < len(self.llm_record_cache):\n logger.typewriter_log(\n \"get a llm_server response from Record\",\n Fore.RED,\n )\n self.is_cached = True\n return self.llm_record_cache[self.llm_interface_id]['output']\n else:\n self.is_cached = False\n return None\n else:\n self.is_cached = False\n return None\n \n\n def regist_tool_call(self, action: Action, now_code: str):\n \"\"\"\n Registers a tool call by saving the action and code to files.\n\n Args:\n action (Action): The action to be saved.\n now_code (str): The current code to be saved.\n\n Returns:\n None\n \"\"\"\n with open(os.path.join(self.record_root_dir, \"tool_call_logs\", f\"{self.tool_call_id:05d}_tool.json\"), \"w\", encoding=\"utf-8\") as writer:\n tool_call_log = action.to_json()\n json.dump(tool_call_log,writer,indent=2, ensure_ascii=False)\n with open(os.path.join(self.record_root_dir, \"tool_call_logs\", f\"{self.tool_call_id:05d}_code.py\"), \"w\", encoding=\"utf-8\") as writer:\n writer.write(now_code)\n\n self.tool_call_id += 1\n\n self.save_meta()\n\n def save_markdown(self, markdown):\n \"\"\"\n Save the given markdown content to a file.\n\n Parameters:\n markdown (str): The markdown content to be saved.\n\n Returns:\n None\n \"\"\"\n with open(os.path.join(self.record_root_dir, f\"README.md\"), \"w\", encoding=\"utf-8\") as writer:\n writer.write(markdown)\n \n def is_final_cache(self):\n \"\"\"\n Check if the current cache is the final cache.\n\n Returns:\n bool: True if the current cache is the final cache, False otherwise.\n \"\"\"\n return self.llm_interface_id + 1 >= len(self.llm_record_cache)" }, { "identifier": "CONFIG", "path": "ProAgent/config.py", "snippet": "CONFIG = RPAgentConfig.get_default_config()" } ]
import omegaconf import json from typing import List, Dict from copy import deepcopy from termcolor import colored from ProAgent.router.utils import ENVIRONMENT from ProAgent.utils import NodeType, ToolCallStatus, Action, WorkflowType, TestResult, RunTimeStatus, TestDataType from ProAgent.n8n_parser.node import n8nPythonNode, n8nNodeMeta from ProAgent.n8n_parser.workflow import n8nPythonWorkflow from ProAgent.n8n_parser.param_parser import parse_properties from ProAgent.n8n_tester.run_code import n8nPythonCodeRunner from ProAgent.n8n_parser.intrinsic_functions import mainWorkflow_code from ProAgent.loggers.logs import print_action_base, print_action_tool from ProAgent.running_recorder import RunningRecoder from ProAgent.config import CONFIG
8,583
class Compiler(): """和nodes.json交互,同时存储目前所有的数据结构 """ def __init__(self, cfg: omegaconf.DictConfig, recorder: RunningRecoder): """ Initializes the class with the given configuration and recorder. Parameters: cfg (omegaconf.DictConfig): The configuration object. recorder (RunningRecoder): The recorder object. Returns: None """ self.cfg = cfg self.recorder = recorder self.nodes: List[n8nPythonNode] = [] self.trigger_id = 0 self.action_id = 0 self.workflows: Dict[n8nPythonWorkflow] = {} self.mainWorkflow: n8nPythonWorkflow = n8nPythonWorkflow( implement_code = mainWorkflow_code ) self.resolve() self.code_runner = n8nPythonCodeRunner() self.code_runner.flash( main_workflow = self.mainWorkflow, workflows=self.workflows, nodes = self.nodes ) self.update_runtime() def resolve_integration(self, integration_json): """ Generates a function comment for the given function body. Args: integration_json (dict): A dictionary containing information about the integration. Returns: dict: A dictionary containing the resolved integration data. Raises: AssertionError: If the target resource name is not found in the integration data. """ integration_name = integration_json["name"].split(".")[-1] integration_data = {} no_resource = True no_operation = True for property in integration_json["properties"]: if property["name"] == "resource": for resource in property["options"]: integration_data[resource["value"]] = {} no_resource = False break if no_resource: integration_data["default"] = {} for property in integration_json["properties"]: if property["name"] == "operation": target_resource_name = "default" if "displayOptions" in property.keys(): assert "show" in property["displayOptions"].keys() and "resource" in property["displayOptions"]["show"].keys() assert len(property["displayOptions"]["show"]["resource"]) == 1 target_resource_name = property["displayOptions"]["show"]["resource"][0] assert target_resource_name in integration_data.keys(), f"{target_resource_name} in {integration_data.keys()}" target_resource = integration_data[target_resource_name] for operation in property["options"]: operation_name = operation["value"] operation_description = "" if "description" in operation.keys(): operation_description = operation["description"] node_type = NodeType.trigger if "trigger" in integration_name.lower() or "webhook" in integration_name.lower() else NodeType.action target_resource[operation_name] = n8nNodeMeta( node_type=node_type, integration_name=integration_name, resource_name=target_resource_name, operation_name=operation_name, operation_description=operation_description ) no_operation = False if no_operation: assert no_resource node_type = NodeType.trigger if "trigger" in integration_name.lower() or "webhook" in integration_name.lower() else NodeType.action integration_data["default"]["default"] = n8nNodeMeta( node_type=node_type, integration_name=integration_name, resource_name="default", operation_name="default", operation_description="" ) return integration_data def print_flatten_tools(self): """ Generates a function comment for the given function body in a markdown code block with the correct language syntax. Returns: str: The function comment in markdown format. """ output_description_list = [] for k1, integration_name in enumerate(list(self.flattened_tools.keys())): operation_counter = 1 data = self.flattened_tools[integration_name]["data"] des = self.flattened_tools[integration_name]["meta"]["description"]
class Compiler(): """和nodes.json交互,同时存储目前所有的数据结构 """ def __init__(self, cfg: omegaconf.DictConfig, recorder: RunningRecoder): """ Initializes the class with the given configuration and recorder. Parameters: cfg (omegaconf.DictConfig): The configuration object. recorder (RunningRecoder): The recorder object. Returns: None """ self.cfg = cfg self.recorder = recorder self.nodes: List[n8nPythonNode] = [] self.trigger_id = 0 self.action_id = 0 self.workflows: Dict[n8nPythonWorkflow] = {} self.mainWorkflow: n8nPythonWorkflow = n8nPythonWorkflow( implement_code = mainWorkflow_code ) self.resolve() self.code_runner = n8nPythonCodeRunner() self.code_runner.flash( main_workflow = self.mainWorkflow, workflows=self.workflows, nodes = self.nodes ) self.update_runtime() def resolve_integration(self, integration_json): """ Generates a function comment for the given function body. Args: integration_json (dict): A dictionary containing information about the integration. Returns: dict: A dictionary containing the resolved integration data. Raises: AssertionError: If the target resource name is not found in the integration data. """ integration_name = integration_json["name"].split(".")[-1] integration_data = {} no_resource = True no_operation = True for property in integration_json["properties"]: if property["name"] == "resource": for resource in property["options"]: integration_data[resource["value"]] = {} no_resource = False break if no_resource: integration_data["default"] = {} for property in integration_json["properties"]: if property["name"] == "operation": target_resource_name = "default" if "displayOptions" in property.keys(): assert "show" in property["displayOptions"].keys() and "resource" in property["displayOptions"]["show"].keys() assert len(property["displayOptions"]["show"]["resource"]) == 1 target_resource_name = property["displayOptions"]["show"]["resource"][0] assert target_resource_name in integration_data.keys(), f"{target_resource_name} in {integration_data.keys()}" target_resource = integration_data[target_resource_name] for operation in property["options"]: operation_name = operation["value"] operation_description = "" if "description" in operation.keys(): operation_description = operation["description"] node_type = NodeType.trigger if "trigger" in integration_name.lower() or "webhook" in integration_name.lower() else NodeType.action target_resource[operation_name] = n8nNodeMeta( node_type=node_type, integration_name=integration_name, resource_name=target_resource_name, operation_name=operation_name, operation_description=operation_description ) no_operation = False if no_operation: assert no_resource node_type = NodeType.trigger if "trigger" in integration_name.lower() or "webhook" in integration_name.lower() else NodeType.action integration_data["default"]["default"] = n8nNodeMeta( node_type=node_type, integration_name=integration_name, resource_name="default", operation_name="default", operation_description="" ) return integration_data def print_flatten_tools(self): """ Generates a function comment for the given function body in a markdown code block with the correct language syntax. Returns: str: The function comment in markdown format. """ output_description_list = [] for k1, integration_name in enumerate(list(self.flattened_tools.keys())): operation_counter = 1 data = self.flattened_tools[integration_name]["data"] des = self.flattened_tools[integration_name]["meta"]["description"]
if integration_name in CONFIG.default_knowledge.keys():
17
2023-11-03 01:20:14+00:00
12k
LLaVA-VL/LLaVA-Plus-Codebase
llava/model/language_model/mpt/modeling_mpt.py
[ { "identifier": "attn_bias_shape", "path": "llava/model/language_model/mpt/attention.py", "snippet": "def attn_bias_shape(attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n if (prefix_lm or not causal) or use_sequence_id:\n return (1, n_heads, seq_len, seq_len)\n return (1, n_heads, 1, seq_len)\n elif prefix_lm or use_sequence_id:\n return (1, 1, seq_len, seq_len)\n return None\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')" }, { "identifier": "build_attn_bias", "path": "llava/model/language_model/mpt/attention.py", "snippet": "def build_attn_bias(attn_impl, attn_bias, n_heads, seq_len, causal=False, alibi=False, alibi_bias_max=8):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n (device, dtype) = (attn_bias.device, attn_bias.dtype)\n attn_bias = attn_bias.add(build_alibi_bias(n_heads, seq_len, full=not causal, alibi_bias_max=alibi_bias_max, device=device, dtype=dtype))\n return attn_bias\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')" }, { "identifier": "MPTBlock", "path": "llava/model/language_model/mpt/blocks.py", "snippet": "class MPTBlock(nn.Module):\n\n def __init__(self, d_model: int, n_heads: int, expansion_ratio: int, attn_config: Dict={'attn_type': 'multihead_attention', 'attn_pdrop': 0.0, 'attn_impl': 'triton', 'qk_ln': False, 'clip_qkv': None, 'softmax_scale': None, 'prefix_lm': False, 'attn_uses_sequence_id': False, 'alibi': False, 'alibi_bias_max': 8}, resid_pdrop: float=0.0, norm_type: str='low_precision_layernorm', verbose: int=0, device: Optional[str]=None, **kwargs):\n del kwargs\n super().__init__()\n norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]\n attn_class = ATTN_CLASS_REGISTRY[attn_config['attn_type']]\n self.norm_1 = norm_class(d_model, device=device)\n self.attn = attn_class(attn_impl=attn_config['attn_impl'], clip_qkv=attn_config['clip_qkv'], qk_ln=attn_config['qk_ln'], softmax_scale=attn_config['softmax_scale'], attn_pdrop=attn_config['attn_pdrop'], d_model=d_model, n_heads=n_heads, verbose=verbose, device=device)\n self.norm_2 = norm_class(d_model, device=device)\n self.ffn = MPTMLP(d_model=d_model, expansion_ratio=expansion_ratio, device=device)\n self.resid_attn_dropout = nn.Dropout(resid_pdrop)\n self.resid_ffn_dropout = nn.Dropout(resid_pdrop)\n\n def forward(self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, attention_mask: Optional[torch.ByteTensor]=None, is_causal: bool=True) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]:\n a = self.norm_1(x)\n (b, attn_weights, past_key_value) = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=is_causal)\n x = x + self.resid_attn_dropout(b)\n m = self.norm_2(x)\n n = self.ffn(m)\n x = x + self.resid_ffn_dropout(n)\n return (x, attn_weights, past_key_value)" }, { "identifier": "SharedEmbedding", "path": "llava/model/language_model/mpt/custom_embedding.py", "snippet": "class SharedEmbedding(nn.Embedding):\n\n def forward(self, input: Tensor, unembed: bool=False) -> Tensor:\n if unembed:\n return F.linear(input, self.weight)\n return super().forward(input)" }, { "identifier": "NORM_CLASS_REGISTRY", "path": "llava/model/language_model/mpt/norm.py", "snippet": "NORM_CLASS_REGISTRY = {'layernorm': torch.nn.LayerNorm, 'low_precision_layernorm': LPLayerNorm, 'rmsnorm': RMSNorm, 'low_precision_rmsnorm': LPRMSNorm}" }, { "identifier": "MPTConfig", "path": "llava/model/language_model/mpt/configuration_mpt.py", "snippet": "class MPTConfig(PretrainedConfig):\n model_type = 'mpt'\n\n def __init__(self, d_model: int=2048, n_heads: int=16, n_layers: int=24, expansion_ratio: int=4, max_seq_len: int=2048, vocab_size: int=50368, resid_pdrop: float=0.0, emb_pdrop: float=0.0, learned_pos_emb: bool=True, attn_config: Dict=attn_config_defaults, init_device: str='cpu', logit_scale: Optional[Union[float, str]]=None, no_bias: bool=False, verbose: int=0, embedding_fraction: float=1.0, norm_type: str='low_precision_layernorm', use_cache: bool=False, init_config: Dict=init_config_defaults, **kwargs):\n \"\"\"The MPT configuration class.\n\n Args:\n d_model (int): The size of the embedding dimension of the model.\n n_heads (int): The number of attention heads.\n n_layers (int): The number of layers in the model.\n expansion_ratio (int): The ratio of the up/down scale in the MLP.\n max_seq_len (int): The maximum sequence length of the model.\n vocab_size (int): The size of the vocabulary.\n resid_pdrop (float): The dropout probability applied to the attention output before combining with residual.\n emb_pdrop (float): The dropout probability for the embedding layer.\n learned_pos_emb (bool): Whether to use learned positional embeddings\n attn_config (Dict): A dictionary used to configure the model's attention module:\n attn_type (str): type of attention to use. Options: multihead_attention, multiquery_attention\n attn_pdrop (float): The dropout probability for the attention layers.\n attn_impl (str): The attention implementation to use. One of 'torch', 'flash', or 'triton'.\n qk_ln (bool): Whether to apply layer normalization to the queries and keys in the attention layer.\n clip_qkv (Optional[float]): If not None, clip the queries, keys, and values in the attention layer to\n this value.\n softmax_scale (Optional[float]): If not None, scale the softmax in the attention layer by this value. If None,\n use the default scale of ``1/sqrt(d_keys)``.\n prefix_lm (Optional[bool]): Whether the model should operate as a Prefix LM. This requires passing an\n extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix\n can attend to one another bi-directionally. Tokens outside the prefix use causal attention.\n attn_uses_sequence_id (Optional[bool]): Whether to restrict attention to tokens that have the same sequence_id.\n When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates\n which sub-sequence each token belongs to.\n Defaults to ``False`` meaning any provided `sequence_id` will be ignored.\n alibi (bool): Whether to use the alibi bias instead of position embeddings.\n alibi_bias_max (int): The maximum value of the alibi bias.\n init_device (str): The device to use for parameter initialization.\n logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value.\n no_bias (bool): Whether to use bias in all layers.\n verbose (int): The verbosity level. 0 is silent.\n embedding_fraction (float): The fraction to scale the gradients of the embedding layer by.\n norm_type (str): choose type of norm to use\n multiquery_attention (bool): Whether to use multiquery attention implementation.\n use_cache (bool): Whether or not the model should return the last key/values attentions\n init_config (Dict): A dictionary used to configure the model initialization:\n init_config.name: The parameter initialization scheme to use. Options: 'default_', 'baseline_',\n 'kaiming_uniform_', 'kaiming_normal_', 'neox_init_', 'small_init_', 'xavier_uniform_', or\n 'xavier_normal_'. These mimic the parameter initialization methods in PyTorch.\n init_div_is_residual (Union[int, float, str, bool]): Value to divide initial weights by if ``module._is_residual`` is True.\n emb_init_std (Optional[float]): The standard deviation of the normal distribution used to initialize the embedding layer.\n emb_init_uniform_lim (Optional[Union[Tuple[float, float], float]]): The lower and upper limits of the uniform distribution\n used to initialize the embedding layer. Mutually exclusive with ``emb_init_std``.\n init_std (float): The standard deviation of the normal distribution used to initialize the model,\n if using the baseline_ parameter initialization scheme.\n init_gain (float): The gain to use for parameter initialization with kaiming or xavier initialization schemes.\n fan_mode (str): The fan mode to use for parameter initialization with kaiming initialization schemes.\n init_nonlinearity (str): The nonlinearity to use for parameter initialization with kaiming initialization schemes.\n ---\n See llmfoundry.models.utils.param_init_fns.py for info on other param init config options\n \"\"\"\n self.d_model = d_model\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.expansion_ratio = expansion_ratio\n self.max_seq_len = max_seq_len\n self.vocab_size = vocab_size\n self.resid_pdrop = resid_pdrop\n self.emb_pdrop = emb_pdrop\n self.learned_pos_emb = learned_pos_emb\n self.attn_config = attn_config\n self.init_device = init_device\n self.logit_scale = logit_scale\n self.no_bias = no_bias\n self.verbose = verbose\n self.embedding_fraction = embedding_fraction\n self.norm_type = norm_type\n self.use_cache = use_cache\n self.init_config = init_config\n if 'name' in kwargs:\n del kwargs['name']\n if 'loss_fn' in kwargs:\n del kwargs['loss_fn']\n super().__init__(**kwargs)\n self._validate_config()\n\n def _set_config_defaults(self, config, config_defaults):\n for (k, v) in config_defaults.items():\n if k not in config:\n config[k] = v\n return config\n\n def _validate_config(self):\n self.attn_config = self._set_config_defaults(self.attn_config, attn_config_defaults)\n self.init_config = self._set_config_defaults(self.init_config, init_config_defaults)\n if self.d_model % self.n_heads != 0:\n raise ValueError('d_model must be divisible by n_heads')\n if any((prob < 0 or prob > 1 for prob in [self.attn_config['attn_pdrop'], self.resid_pdrop, self.emb_pdrop])):\n raise ValueError(\"self.attn_config['attn_pdrop'], resid_pdrop, emb_pdrop are probabilities and must be between 0 and 1\")\n if self.attn_config['attn_impl'] not in ['torch', 'flash', 'triton']:\n raise ValueError(f\"Unknown attn_impl={self.attn_config['attn_impl']}\")\n if self.attn_config['prefix_lm'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('prefix_lm only implemented with torch and triton attention.')\n if self.attn_config['alibi'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('alibi only implemented with torch and triton attention.')\n if self.attn_config['attn_uses_sequence_id'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('attn_uses_sequence_id only implemented with torch and triton attention.')\n if self.embedding_fraction > 1 or self.embedding_fraction <= 0:\n raise ValueError('model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!')\n if isinstance(self.logit_scale, str) and self.logit_scale != 'inv_sqrt_d_model':\n raise ValueError(f\"self.logit_scale={self.logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.\")\n if self.init_config.get('name', None) is None:\n raise ValueError(f\"self.init_config={self.init_config!r} 'name' needs to be set.\")\n if not self.learned_pos_emb and (not self.attn_config['alibi']):\n raise ValueError(f'Positional information must be provided to the model using either learned_pos_emb or alibi.')" }, { "identifier": "AutoTokenizerForMOD", "path": "llava/model/language_model/mpt/adapt_tokenizer.py", "snippet": "class AutoTokenizerForMOD(AutoTokenizer):\n \"\"\"AutoTokenizer + Adaptation for MOD.\n\n A simple wrapper around AutoTokenizer to make instantiating\n an MOD-adapted tokenizer a bit easier.\n\n MOD-adapted tokenizers have sentinel tokens (e.g., <extra_id_0>),\n a padding token, and a property to get the token ids of the\n sentinel tokens.\n \"\"\"\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n \"\"\"See `AutoTokenizer.from_pretrained` docstring.\"\"\"\n tokenizer = super().from_pretrained(*args, **kwargs)\n adapt_tokenizer_for_denoising(tokenizer)\n return tokenizer" }, { "identifier": "adapt_tokenizer_for_denoising", "path": "llava/model/language_model/mpt/adapt_tokenizer.py", "snippet": "def adapt_tokenizer_for_denoising(tokenizer: Tokenizer):\n \"\"\"Adds sentinel tokens and padding token (if missing).\n\n Expands the tokenizer vocabulary to include sentinel tokens\n used in mixture-of-denoiser tasks as well as a padding token.\n\n All added tokens are added as special tokens. No tokens are\n added if sentinel tokens and padding token already exist.\n \"\"\"\n sentinels_to_add = [f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)]\n tokenizer.add_tokens(sentinels_to_add, special_tokens=True)\n if tokenizer.pad_token is None:\n tokenizer.add_tokens('<pad>', special_tokens=True)\n tokenizer.pad_token = '<pad>'\n assert tokenizer.pad_token_id is not None\n sentinels = ''.join([f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)])\n _sentinel_token_ids = tokenizer(sentinels, add_special_tokens=False).input_ids\n tokenizer.sentinel_token_ids = _sentinel_token_ids" }, { "identifier": "add_bidirectional_mask_if_missing", "path": "llava/model/language_model/mpt/hf_prefixlm_converter.py", "snippet": "def add_bidirectional_mask_if_missing(batch: Dict[str, Any]):\n \"\"\"Attempts to add bidirectional_mask to batch if missing.\n\n Raises:\n KeyError if bidirectional_mask is missing and can't be inferred\n \"\"\"\n if 'bidirectional_mask' not in batch:\n if batch.get('mode', None) == 'icl_task':\n batch['bidirectional_mask'] = batch['attention_mask'].clone()\n for (i, continuation_indices) in enumerate(batch['continuation_indices']):\n batch['bidirectional_mask'][i, continuation_indices] = 0\n elif 'labels' in batch and 'attention_mask' in batch:\n batch['bidirectional_mask'] = torch.logical_and(torch.eq(batch['attention_mask'], 1), torch.eq(batch['labels'], -100)).type_as(batch['attention_mask'])\n else:\n raise KeyError('No bidirectional_mask in batch and not sure how to construct one.')" }, { "identifier": "convert_hf_causal_lm_to_prefix_lm", "path": "llava/model/language_model/mpt/hf_prefixlm_converter.py", "snippet": "def convert_hf_causal_lm_to_prefix_lm(model: CAUSAL_LM_TYPES) -> CAUSAL_LM_TYPES:\n \"\"\"Converts a HuggingFace Causal LM to a Prefix LM.\n\n Supported HuggingFace model classes:\n - `GPT2LMHeadModel`\n - `GPTNeoForCausalLM`\n - `GPTNeoXForCausalLM`\n - `GPTJForCausalLM`\n - `BloomForCausalLM`\n - `OPTForCausalLM`\n\n Conversion to a Prefix LM is done by modifying the `forward` method, and possibly also the\n `generate` method and/or select underlying methods depending on the model class.\n\n These changes preserve the model API, but add a new input to `forward`: \"bidirectional_mask\".\n\n Notes on training:\n To actually train the converted model as a Prefix LM, training batches will need to indicate\n the prefix/target structure by including `bidirectional_mask` as part of the batch inputs.\n\n **This is not a standard input and requires custom layers either within or after your dataloader.**\n\n In addition to adding `bidirectional_mask` to the batch, this custom code should modify `labels`\n such that `batch['labels'][batch['bidirectional_mask'] == 1] == -100`.\n That is, the prefix portion of the sequence should not generate any loss. Loss should only be\n generated by the target portion of the sequence.\n\n Notes on `GPTNeoForCausalLM`:\n To simplify the implementation, \"global\" and \"local\" attention layers are handled differently.\n For \"global\" layers, we handle conversion as described above. For \"local\" layers, which use a\n causal attention mask within a restricted local window, we do not alter the masking.\n\n Notes on `forward` method conversion:\n After conversion, the `forward` method will handle a new input, `bidirectional_mask`,\n which should be a [batch_size, seq_length] byte tensor, where 1 indicates token positions\n belonging to the prefix (prefix tokens can attend to one another bidirectionally), and\n 0 indicates token positions belonging to the target.\n\n The new `forward` method will incorporate `bidirectional_mask` (if supplied) into the existing\n causal mask, call the original `forward` method, and (if the causal mask is a buffer) reset\n the causal masks before returning the result.\n\n Notes on `generate` method conversion:\n After conversion, the `generate` method will have the same signature but will internally\n convert all causal masks to be purely bidirectional, call the original `generate` method, and\n (where appropriate) reset the causal masks before returning the result.\n\n This works thanks to the logic of the HuggingFace `generate` API, which first encodes the token\n \"prompt\" passed to `generate` (which is treated as the prefix) and then sequentially generates\n each new token. Encodings are cached as generation happens, so all prefix tokens can attend to one\n another (as expected in a Prefix LM) and generated tokens can only attend to prefix tokens and\n previously-generated tokens (also as expected in a Prefix LM).\n\n To preserve the API, the original methods are renamed to `_original_forward` and\n `_original_generate`, and replaced with new `forward` and `generate` methods that wrap\n them, respectively. Although implementation details vary by model class.\n \"\"\"\n if isinstance(model, _SUPPORTED_GPT_MODELS):\n return _convert_gpt_causal_lm_to_prefix_lm(model)\n elif isinstance(model, BloomForCausalLM):\n return _convert_bloom_causal_lm_to_prefix_lm(model)\n elif isinstance(model, OPTForCausalLM):\n return _convert_opt_causal_lm_to_prefix_lm(model)\n else:\n raise TypeError(f'Cannot convert model to Prefix LM. ' + f'Model does not belong to set of supported HF models:' + f'\\n{_SUPPORTED_HF_MODELS}')" }, { "identifier": "init_empty_weights", "path": "llava/model/language_model/mpt/meta_init_context.py", "snippet": "@contextmanager\ndef init_empty_weights(include_buffers: bool=False):\n \"\"\"Meta initialization context manager.\n\n A context manager under which models are initialized with all parameters\n on the meta device, therefore creating an empty model. Useful when just\n initializing the model would blow the available RAM.\n\n Args:\n include_buffers (`bool`, *optional*, defaults to `False`): Whether or\n not to also put all buffers on the meta device while initializing.\n\n Example:\n ```python\n import torch.nn as nn\n\n # Initialize a model with 100 billions parameters in no time and without using any RAM.\n with init_empty_weights():\n tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n ```\n\n <Tip warning={true}>\n\n Any model created under this context manager has no weights. As such you can't do something like\n `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].\n\n </Tip>\n \"\"\"\n with init_on_device(torch.device('meta'), include_buffers=include_buffers) as f:\n yield f" }, { "identifier": "MODEL_INIT_REGISTRY", "path": "llava/model/language_model/mpt/param_init_fns.py", "snippet": "MODEL_INIT_REGISTRY = {'default_': torch_default_param_init_fn_, 'baseline_': baseline_param_init_fn_, 'kaiming_uniform_': kaiming_uniform_param_init_fn_, 'kaiming_normal_': kaiming_normal_param_init_fn_, 'neox_init_': neox_param_init_fn_, 'small_init_': small_param_init_fn_, 'xavier_uniform_': xavier_uniform_param_init_fn_, 'xavier_normal_': xavier_normal_param_init_fn_}" }, { "identifier": "generic_param_init_fn_", "path": "llava/model/language_model/mpt/param_init_fns.py", "snippet": "def generic_param_init_fn_(module: nn.Module, init_fn_, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, verbose: int=0, **kwargs):\n del kwargs\n if verbose > 1:\n warnings.warn(f'If model has bias parameters they are initialized to 0.')\n init_div_is_residual = init_div_is_residual\n if init_div_is_residual is False:\n div_is_residual = 1.0\n elif init_div_is_residual is True:\n div_is_residual = math.sqrt(2 * n_layers)\n elif isinstance(init_div_is_residual, float) or isinstance(init_div_is_residual, int):\n div_is_residual = init_div_is_residual\n elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric():\n div_is_residual = float(init_div_is_residual)\n else:\n div_is_residual = 1.0\n raise ValueError(f'Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}')\n if init_div_is_residual is not False:\n if verbose > 1:\n warnings.warn(f'Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. ' + f'Set `init_div_is_residual: false` in init config to disable this.')\n if isinstance(module, nn.Linear):\n if hasattr(module, '_fused'):\n fused_init_helper_(module, init_fn_)\n else:\n init_fn_(module.weight)\n if module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n if init_div_is_residual is not False and getattr(module, '_is_residual', False):\n with torch.no_grad():\n module.weight.div_(div_is_residual)\n elif isinstance(module, nn.Embedding):\n if emb_init_std is not None:\n std = emb_init_std\n if std == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using normal distribution with mean=0 and std={std!r}.')\n elif emb_init_uniform_lim is not None:\n lim = emb_init_uniform_lim\n if isinstance(lim, Sequence):\n if len(lim) > 2:\n raise ValueError(f'Uniform init requires a min and a max limit. User input: {lim}.')\n if lim[0] == lim[1]:\n warnings.warn(f'Embedding layer initialized to {lim[0]}.')\n else:\n if lim == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n lim = [-lim, lim]\n (a, b) = lim\n emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using uniform distribution in range {lim}.')\n else:\n emb_init_fn_ = init_fn_\n emb_init_fn_(module.weight)\n elif isinstance(module, tuple(set(NORM_CLASS_REGISTRY.values()))):\n if verbose > 1:\n warnings.warn(f'Norm weights are set to 1. If norm layer has a bias it is initialized to 0.')\n if hasattr(module, 'weight') and module.weight is not None:\n torch.nn.init.ones_(module.weight)\n if hasattr(module, 'bias') and module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n elif isinstance(module, nn.MultiheadAttention):\n if module._qkv_same_embed_dim:\n assert module.in_proj_weight is not None\n assert module.q_proj_weight is None and module.k_proj_weight is None and (module.v_proj_weight is None)\n assert d_model is not None\n _d = d_model\n splits = (0, _d, 2 * _d, 3 * _d)\n for (s, e) in zip(splits[:-1], splits[1:]):\n init_fn_(module.in_proj_weight[s:e])\n else:\n assert module.q_proj_weight is not None and module.k_proj_weight is not None and (module.v_proj_weight is not None)\n assert module.in_proj_weight is None\n init_fn_(module.q_proj_weight)\n init_fn_(module.k_proj_weight)\n init_fn_(module.v_proj_weight)\n if module.in_proj_bias is not None:\n torch.nn.init.zeros_(module.in_proj_bias)\n if module.bias_k is not None:\n torch.nn.init.zeros_(module.bias_k)\n if module.bias_v is not None:\n torch.nn.init.zeros_(module.bias_v)\n init_fn_(module.out_proj.weight)\n if init_div_is_residual is not False and getattr(module.out_proj, '_is_residual', False):\n with torch.no_grad():\n module.out_proj.weight.div_(div_is_residual)\n if module.out_proj.bias is not None:\n torch.nn.init.zeros_(module.out_proj.bias)\n else:\n for _ in module.parameters(recurse=False):\n raise NotImplementedError(f'{module.__class__.__name__} parameters are not initialized by param_init_fn.')" } ]
import math import warnings import torch import torch.nn as nn import torch.nn.functional as F from typing import List, Optional, Tuple, Union from transformers import PreTrainedModel, PreTrainedTokenizer, PreTrainedTokenizerFast from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from .attention import attn_bias_shape, build_attn_bias from .blocks import MPTBlock from .custom_embedding import SharedEmbedding from .norm import NORM_CLASS_REGISTRY from .configuration_mpt import MPTConfig from .adapt_tokenizer import AutoTokenizerForMOD, adapt_tokenizer_for_denoising from .hf_prefixlm_converter import add_bidirectional_mask_if_missing, convert_hf_causal_lm_to_prefix_lm from .meta_init_context import init_empty_weights from .param_init_fns import MODEL_INIT_REGISTRY, generic_param_init_fn_ from .flash_attn_triton import flash_attn_func
9,446
assert isinstance(attn_bias, torch.Tensor) attn_bias = self._apply_sequence_id(attn_bias, sequence_id) if attention_mask is not None: s_k = attention_mask.shape[-1] if attn_bias is None: attn_bias = torch.zeros((1, 1, 1, s_k), device=device, dtype=dtype) else: _s_k = max(0, attn_bias.size(-1) - s_k) attn_bias = attn_bias[:, :, :, _s_k:] if prefix_mask is not None and attention_mask.shape != prefix_mask.shape: raise ValueError(f'attention_mask shape={attention_mask.shape} ' + f'and prefix_mask shape={prefix_mask.shape} are not equal.') min_val = torch.finfo(attn_bias.dtype).min attn_bias = attn_bias.masked_fill(~attention_mask.view(-1, 1, 1, s_k), min_val) return (attn_bias, None) def _apply_prefix_mask(self, attn_bias: torch.Tensor, prefix_mask: torch.Tensor): (s_k, s_q) = attn_bias.shape[-2:] if s_k != self.config.max_seq_len or s_q != self.config.max_seq_len: raise ValueError('attn_bias does not match the expected shape. ' + f'The last two dimensions should both be {self.config.max_length} ' + f'but are {s_k} and {s_q}.') seq_len = prefix_mask.shape[-1] if seq_len > self.config.max_seq_len: raise ValueError(f'prefix_mask sequence length cannot exceed max_seq_len={self.config.max_seq_len}') attn_bias = attn_bias[..., :seq_len, :seq_len] causal = torch.tril(torch.ones((seq_len, seq_len), dtype=torch.bool, device=prefix_mask.device)).view(1, 1, seq_len, seq_len) prefix = prefix_mask.view(-1, 1, 1, seq_len) cannot_attend = ~torch.logical_or(causal, prefix.bool()) min_val = torch.finfo(attn_bias.dtype).min attn_bias = attn_bias.masked_fill(cannot_attend, min_val) return attn_bias def _apply_sequence_id(self, attn_bias: torch.Tensor, sequence_id: torch.LongTensor): seq_len = sequence_id.shape[-1] if seq_len > self.config.max_seq_len: raise ValueError(f'sequence_id sequence length cannot exceed max_seq_len={self.config.max_seq_len}') attn_bias = attn_bias[..., :seq_len, :seq_len] cannot_attend = torch.logical_not(torch.eq(sequence_id.view(-1, seq_len, 1), sequence_id.view(-1, 1, seq_len))).unsqueeze(1) min_val = torch.finfo(attn_bias.dtype).min attn_bias = attn_bias.masked_fill(cannot_attend, min_val) return attn_bias def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, inputs_embeds: Optional[torch.Tensor]=None): return_dict = return_dict if return_dict is not None else self.config.return_dict use_cache = use_cache if use_cache is not None else self.config.use_cache if attention_mask is not None: attention_mask = attention_mask.bool() if prefix_mask is not None: prefix_mask = prefix_mask.bool() if not return_dict: raise NotImplementedError('return_dict False is not implemented yet for MPT') if output_attentions: if self.attn_impl != 'torch': raise NotImplementedError('output_attentions is not implemented for MPT when using attn_impl `flash` or `triton`.') if attention_mask is not None and attention_mask[:, 0].sum() != attention_mask.shape[0] and self.training: raise NotImplementedError('MPT does not support training with left padding.') if self.prefix_lm and prefix_mask is None: raise ValueError('prefix_mask is a required argument when MPT is configured with prefix_lm=True.') if self.training: if self.attn_uses_sequence_id and sequence_id is None: raise ValueError('sequence_id is a required argument when MPT is configured with attn_uses_sequence_id=True ' + 'and the model is in train mode.') elif self.attn_uses_sequence_id is False and sequence_id is not None: warnings.warn('MPT received non-None input for `sequence_id` but is configured with attn_uses_sequence_id=False. ' + 'This input will be ignored. If you want the model to use `sequence_id`, set attn_uses_sequence_id to True.') if input_ids is not None: S = input_ids.size(1) assert S <= self.config.max_seq_len, f'Cannot forward input with seq_len={S}, this model only supports seq_len<={self.config.max_seq_len}' tok_emb = self.wte(input_ids) else: assert inputs_embeds is not None assert self.alibi, 'inputs_embeds is not implemented for MPT unless for alibi.' S = inputs_embeds.size(1) tok_emb = inputs_embeds if self.alibi: x = tok_emb else: past_position = 0 if past_key_values is not None: if len(past_key_values) != self.config.n_layers: raise ValueError(f'past_key_values must provide a past_key_value for each attention ' + f'layer in the network (len(past_key_values)={len(past_key_values)!r}; self.config.n_layers={self.config.n_layers!r}).') past_position = past_key_values[0][0].size(1) if self.attn_impl == 'torch': past_position = past_key_values[0][0].size(3) if S + past_position > self.config.max_seq_len: raise ValueError(f'Cannot forward input with past sequence length {past_position} and current sequence length {S + 1}, this model only supports total sequence length <= {self.config.max_seq_len}.') pos = torch.arange(past_position, S + past_position, dtype=torch.long, device=input_ids.device).unsqueeze(0) if attention_mask is not None: pos = torch.clamp(pos - torch.cumsum((~attention_mask).to(torch.int32), dim=1)[:, past_position:], min=0) pos_emb = self.wpe(pos) x = tok_emb + pos_emb if self.embedding_fraction == 1: x = self.emb_drop(x) else: x_shrunk = x * self.embedding_fraction + x.detach() * (1 - self.embedding_fraction) assert isinstance(self.emb_drop, nn.Module) x = self.emb_drop(x_shrunk) (attn_bias, attention_mask) = self._attn_bias(device=x.device, dtype=torch.float32, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id) if use_cache and past_key_values is None: past_key_values = [() for _ in range(self.config.n_layers)] all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for (b_idx, block) in enumerate(self.blocks): if output_hidden_states: assert all_hidden_states is not None all_hidden_states = all_hidden_states + (x,) past_key_value = past_key_values[b_idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: (x, attn_weights, past_key_value) = torch.utils.checkpoint.checkpoint(block, x, past_key_value, attn_bias, attention_mask, self.is_causal) else: (x, attn_weights, past_key_value) = block(x, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=self.is_causal) if past_key_values is not None: past_key_values[b_idx] = past_key_value if output_attentions: assert all_self_attns is not None all_self_attns = all_self_attns + (attn_weights,) x = self.norm_f(x) if output_hidden_states: assert all_hidden_states is not None all_hidden_states = all_hidden_states + (x,) return BaseModelOutputWithPast(last_hidden_state=x, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns) def param_init_fn(self, module): init_fn_name = self.config.init_config['name']
"""A simple, flexible implementation of a GPT model. Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py """ try: except: pass Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast] class MPTPreTrainedModel(PreTrainedModel): config_class = MPTConfig base_model_prefix = 'model' _no_split_modules = ['MPTBlock'] class MPTModel(MPTPreTrainedModel): def __init__(self, config: MPTConfig): config._validate_config() super().__init__(config) self.attn_impl = config.attn_config['attn_impl'] self.prefix_lm = config.attn_config['prefix_lm'] self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id'] self.alibi = config.attn_config['alibi'] self.alibi_bias_max = config.attn_config['alibi_bias_max'] if config.init_device == 'mixed': if dist.get_local_rank() == 0: config.init_device = 'cpu' else: config.init_device = 'meta' if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys(): norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys()) raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).') norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()] self.embedding_fraction = config.embedding_fraction self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device) if not self.alibi: self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device) self.emb_drop = nn.Dropout(config.emb_pdrop) self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)]) self.norm_f = norm_class(config.d_model, device=config.init_device) if config.init_device != 'meta': print(f'You are using config.init_device={config.init_device!r}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.') self.apply(self.param_init_fn) self.is_causal = not self.prefix_lm self._attn_bias_initialized = False self.attn_bias = None self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id) if config.no_bias: for module in self.modules(): if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter): if config.verbose: warnings.warn(f'Removing bias ({module.bias}) from {module}.') module.register_parameter('bias', None) if config.verbose and config.verbose > 2: print(self) if 'verbose' not in self.config.init_config: self.config.init_config['verbose'] = self.config.verbose if self.config.init_config['verbose'] > 1: init_fn_name = self.config.init_config['name'] warnings.warn(f'Using {init_fn_name} initialization.') self.gradient_checkpointing = False def get_input_embeddings(self): return self.wte def set_input_embeddings(self, value): self.wte = value @torch.no_grad() def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None): if not self._attn_bias_initialized: if self.attn_bias_shape: self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype) self.attn_bias = build_attn_bias(self.attn_impl, self.attn_bias, self.config.n_heads, self.config.max_seq_len, causal=self.is_causal, alibi=self.alibi, alibi_bias_max=self.alibi_bias_max) self._attn_bias_initialized = True if self.attn_impl == 'flash': return (self.attn_bias, attention_mask) if self.attn_bias is not None: self.attn_bias = self.attn_bias.to(dtype=dtype, device=device) attn_bias = self.attn_bias if self.prefix_lm: assert isinstance(attn_bias, torch.Tensor) assert isinstance(prefix_mask, torch.Tensor) attn_bias = self._apply_prefix_mask(attn_bias, prefix_mask) if self.attn_uses_sequence_id and sequence_id is not None: assert isinstance(attn_bias, torch.Tensor) attn_bias = self._apply_sequence_id(attn_bias, sequence_id) if attention_mask is not None: s_k = attention_mask.shape[-1] if attn_bias is None: attn_bias = torch.zeros((1, 1, 1, s_k), device=device, dtype=dtype) else: _s_k = max(0, attn_bias.size(-1) - s_k) attn_bias = attn_bias[:, :, :, _s_k:] if prefix_mask is not None and attention_mask.shape != prefix_mask.shape: raise ValueError(f'attention_mask shape={attention_mask.shape} ' + f'and prefix_mask shape={prefix_mask.shape} are not equal.') min_val = torch.finfo(attn_bias.dtype).min attn_bias = attn_bias.masked_fill(~attention_mask.view(-1, 1, 1, s_k), min_val) return (attn_bias, None) def _apply_prefix_mask(self, attn_bias: torch.Tensor, prefix_mask: torch.Tensor): (s_k, s_q) = attn_bias.shape[-2:] if s_k != self.config.max_seq_len or s_q != self.config.max_seq_len: raise ValueError('attn_bias does not match the expected shape. ' + f'The last two dimensions should both be {self.config.max_length} ' + f'but are {s_k} and {s_q}.') seq_len = prefix_mask.shape[-1] if seq_len > self.config.max_seq_len: raise ValueError(f'prefix_mask sequence length cannot exceed max_seq_len={self.config.max_seq_len}') attn_bias = attn_bias[..., :seq_len, :seq_len] causal = torch.tril(torch.ones((seq_len, seq_len), dtype=torch.bool, device=prefix_mask.device)).view(1, 1, seq_len, seq_len) prefix = prefix_mask.view(-1, 1, 1, seq_len) cannot_attend = ~torch.logical_or(causal, prefix.bool()) min_val = torch.finfo(attn_bias.dtype).min attn_bias = attn_bias.masked_fill(cannot_attend, min_val) return attn_bias def _apply_sequence_id(self, attn_bias: torch.Tensor, sequence_id: torch.LongTensor): seq_len = sequence_id.shape[-1] if seq_len > self.config.max_seq_len: raise ValueError(f'sequence_id sequence length cannot exceed max_seq_len={self.config.max_seq_len}') attn_bias = attn_bias[..., :seq_len, :seq_len] cannot_attend = torch.logical_not(torch.eq(sequence_id.view(-1, seq_len, 1), sequence_id.view(-1, 1, seq_len))).unsqueeze(1) min_val = torch.finfo(attn_bias.dtype).min attn_bias = attn_bias.masked_fill(cannot_attend, min_val) return attn_bias def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, inputs_embeds: Optional[torch.Tensor]=None): return_dict = return_dict if return_dict is not None else self.config.return_dict use_cache = use_cache if use_cache is not None else self.config.use_cache if attention_mask is not None: attention_mask = attention_mask.bool() if prefix_mask is not None: prefix_mask = prefix_mask.bool() if not return_dict: raise NotImplementedError('return_dict False is not implemented yet for MPT') if output_attentions: if self.attn_impl != 'torch': raise NotImplementedError('output_attentions is not implemented for MPT when using attn_impl `flash` or `triton`.') if attention_mask is not None and attention_mask[:, 0].sum() != attention_mask.shape[0] and self.training: raise NotImplementedError('MPT does not support training with left padding.') if self.prefix_lm and prefix_mask is None: raise ValueError('prefix_mask is a required argument when MPT is configured with prefix_lm=True.') if self.training: if self.attn_uses_sequence_id and sequence_id is None: raise ValueError('sequence_id is a required argument when MPT is configured with attn_uses_sequence_id=True ' + 'and the model is in train mode.') elif self.attn_uses_sequence_id is False and sequence_id is not None: warnings.warn('MPT received non-None input for `sequence_id` but is configured with attn_uses_sequence_id=False. ' + 'This input will be ignored. If you want the model to use `sequence_id`, set attn_uses_sequence_id to True.') if input_ids is not None: S = input_ids.size(1) assert S <= self.config.max_seq_len, f'Cannot forward input with seq_len={S}, this model only supports seq_len<={self.config.max_seq_len}' tok_emb = self.wte(input_ids) else: assert inputs_embeds is not None assert self.alibi, 'inputs_embeds is not implemented for MPT unless for alibi.' S = inputs_embeds.size(1) tok_emb = inputs_embeds if self.alibi: x = tok_emb else: past_position = 0 if past_key_values is not None: if len(past_key_values) != self.config.n_layers: raise ValueError(f'past_key_values must provide a past_key_value for each attention ' + f'layer in the network (len(past_key_values)={len(past_key_values)!r}; self.config.n_layers={self.config.n_layers!r}).') past_position = past_key_values[0][0].size(1) if self.attn_impl == 'torch': past_position = past_key_values[0][0].size(3) if S + past_position > self.config.max_seq_len: raise ValueError(f'Cannot forward input with past sequence length {past_position} and current sequence length {S + 1}, this model only supports total sequence length <= {self.config.max_seq_len}.') pos = torch.arange(past_position, S + past_position, dtype=torch.long, device=input_ids.device).unsqueeze(0) if attention_mask is not None: pos = torch.clamp(pos - torch.cumsum((~attention_mask).to(torch.int32), dim=1)[:, past_position:], min=0) pos_emb = self.wpe(pos) x = tok_emb + pos_emb if self.embedding_fraction == 1: x = self.emb_drop(x) else: x_shrunk = x * self.embedding_fraction + x.detach() * (1 - self.embedding_fraction) assert isinstance(self.emb_drop, nn.Module) x = self.emb_drop(x_shrunk) (attn_bias, attention_mask) = self._attn_bias(device=x.device, dtype=torch.float32, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id) if use_cache and past_key_values is None: past_key_values = [() for _ in range(self.config.n_layers)] all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for (b_idx, block) in enumerate(self.blocks): if output_hidden_states: assert all_hidden_states is not None all_hidden_states = all_hidden_states + (x,) past_key_value = past_key_values[b_idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: (x, attn_weights, past_key_value) = torch.utils.checkpoint.checkpoint(block, x, past_key_value, attn_bias, attention_mask, self.is_causal) else: (x, attn_weights, past_key_value) = block(x, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=self.is_causal) if past_key_values is not None: past_key_values[b_idx] = past_key_value if output_attentions: assert all_self_attns is not None all_self_attns = all_self_attns + (attn_weights,) x = self.norm_f(x) if output_hidden_states: assert all_hidden_states is not None all_hidden_states = all_hidden_states + (x,) return BaseModelOutputWithPast(last_hidden_state=x, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns) def param_init_fn(self, module): init_fn_name = self.config.init_config['name']
MODEL_INIT_REGISTRY[init_fn_name](module=module, n_layers=self.config.n_layers, d_model=self.config.d_model, **self.config.init_config)
11
2023-11-07 13:06:02+00:00
12k
TheFunny/ArisuAutoSweeper
module/device/method/adb.py
[ { "identifier": "Config", "path": "module/base/decorator.py", "snippet": "class Config:\n \"\"\"\n Decorator that calls different function with a same name according to config.\n\n func_list likes:\n func_list = {\n 'func1': [\n {'options': {'ENABLE': True}, 'func': 1},\n {'options': {'ENABLE': False}, 'func': 1}\n ]\n }\n \"\"\"\n func_list = {}\n\n @classmethod\n def when(cls, **kwargs):\n \"\"\"\n Args:\n **kwargs: Any option in AzurLaneConfig.\n\n Examples:\n @Config.when(USE_ONE_CLICK_RETIREMENT=True)\n def retire_ships(self, amount=None, rarity=None):\n pass\n\n @Config.when(USE_ONE_CLICK_RETIREMENT=False)\n def retire_ships(self, amount=None, rarity=None):\n pass\n \"\"\"\n from module.logger import logger\n options = kwargs\n\n def decorate(func):\n name = func.__name__\n data = {'options': options, 'func': func}\n if name not in cls.func_list:\n cls.func_list[name] = [data]\n else:\n override = False\n for record in cls.func_list[name]:\n if record['options'] == data['options']:\n record['func'] = data['func']\n override = True\n if not override:\n cls.func_list[name].append(data)\n\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n \"\"\"\n Args:\n self: ModuleBase instance.\n *args:\n **kwargs:\n \"\"\"\n for record in cls.func_list[name]:\n\n flag = [value is None or self.config.__getattribute__(key) == value\n for key, value in record['options'].items()]\n if not all(flag):\n continue\n\n return record['func'](self, *args, **kwargs)\n\n logger.warning(f'No option fits for {name}, using the last define func.')\n return func(self, *args, **kwargs)\n\n return wrapper\n\n return decorate" }, { "identifier": "Connection", "path": "module/device/connection.py", "snippet": "class Connection(ConnectionAttr):\n def __init__(self, config):\n \"\"\"\n Args:\n config (AzurLaneConfig, str): Name of the user config under ./config\n \"\"\"\n super().__init__(config)\n if not self.is_over_http:\n self.detect_device()\n\n # Connect\n self.adb_connect(self.serial)\n logger.attr('AdbDevice', self.adb)\n\n # Package\n if self.config.Emulator_PackageName == 'auto':\n self.detect_package()\n else:\n self.package = server_.to_package(self.config.Emulator_PackageName)\n # No set_server cause game client and UI language can be different\n # else:\n # set_server(self.package)\n logger.attr('Server', self.config.Emulator_PackageName)\n server_.server = self.config.Emulator_PackageName\n logger.attr('PackageName', self.package)\n server_.lang = self.config.Emulator_GameLanguage\n logger.attr('Lang', self.config.LANG)\n\n self.check_mumu_app_keep_alive()\n\n @Config.when(DEVICE_OVER_HTTP=False)\n def adb_command(self, cmd, timeout=10):\n \"\"\"\n Execute ADB commands in a subprocess,\n usually to be used when pulling or pushing large files.\n\n Args:\n cmd (list):\n timeout (int):\n\n Returns:\n str:\n \"\"\"\n cmd = list(map(str, cmd))\n cmd = [self.adb_binary, '-s', self.serial] + cmd\n logger.info(f'Execute: {cmd}')\n\n # Use shell=True to disable console window when using GUI.\n # Although, there's still a window when you stop running in GUI, which cause by gooey.\n # To disable it, edit gooey/gui/util/taskkill.py\n\n # No gooey anymore, just shell=False\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=False)\n try:\n stdout, stderr = process.communicate(timeout=timeout)\n except subprocess.TimeoutExpired:\n process.kill()\n stdout, stderr = process.communicate()\n logger.warning(f'TimeoutExpired when calling {cmd}, stdout={stdout}, stderr={stderr}')\n return stdout\n\n @Config.when(DEVICE_OVER_HTTP=True)\n def adb_command(self, cmd, timeout=10):\n logger.warning(\n f'adb_command() is not available when connecting over http: {self.serial}, '\n )\n raise RequestHumanTakeover\n\n @Config.when(DEVICE_OVER_HTTP=False)\n def adb_shell(self, cmd, stream=False, recvall=True, timeout=10, rstrip=True):\n \"\"\"\n Equivalent to `adb -s <serial> shell <*cmd>`\n\n Args:\n cmd (list, str):\n stream (bool): Return stream instead of string output (Default: False)\n recvall (bool): Receive all data when stream=True (Default: True)\n timeout (int): (Default: 10)\n rstrip (bool): Strip the last empty line (Default: True)\n\n Returns:\n str if stream=False\n bytes if stream=True and recvall=True\n socket if stream=True and recvall=False\n \"\"\"\n if not isinstance(cmd, str):\n cmd = list(map(str, cmd))\n\n if stream:\n result = self.adb.shell(cmd, stream=stream, timeout=timeout, rstrip=rstrip)\n if recvall:\n # bytes\n return recv_all(result)\n else:\n # socket\n return result\n else:\n result = self.adb.shell(cmd, stream=stream, timeout=timeout, rstrip=rstrip)\n result = remove_shell_warning(result)\n # str\n return result\n\n @Config.when(DEVICE_OVER_HTTP=True)\n def adb_shell(self, cmd, stream=False, recvall=True, timeout=10, rstrip=True):\n \"\"\"\n Equivalent to http://127.0.0.1:7912/shell?command={command}\n\n Args:\n cmd (list, str):\n stream (bool): Return stream instead of string output (Default: False)\n recvall (bool): Receive all data when stream=True (Default: True)\n timeout (int): (Default: 10)\n rstrip (bool): Strip the last empty line (Default: True)\n\n Returns:\n str if stream=False\n bytes if stream=True\n \"\"\"\n if not isinstance(cmd, str):\n cmd = list(map(str, cmd))\n\n if stream:\n result = self.u2.shell(cmd, stream=stream, timeout=timeout)\n # Already received all, so `recvall` is ignored\n result = remove_shell_warning(result.content)\n # bytes\n return result\n else:\n result = self.u2.shell(cmd, stream=stream, timeout=timeout).output\n if rstrip:\n result = result.rstrip()\n result = remove_shell_warning(result)\n # str\n return result\n\n def adb_getprop(self, name):\n \"\"\"\n Get system property in Android, same as `getprop <name>`\n\n Args:\n name (str): Property name\n\n Returns:\n str:\n \"\"\"\n return self.adb_shell(['getprop', name]).strip()\n\n @cached_property\n def cpu_abi(self) -> str:\n \"\"\"\n Returns:\n str: arm64-v8a, armeabi-v7a, x86, x86_64\n \"\"\"\n abi = self.adb_getprop('ro.product.cpu.abi')\n if not len(abi):\n logger.error(f'CPU ABI invalid: \"{abi}\"')\n return abi\n\n @cached_property\n def sdk_ver(self) -> int:\n \"\"\"\n Android SDK/API levels, see https://apilevels.com/\n \"\"\"\n sdk = self.adb_getprop('ro.build.version.sdk')\n try:\n return int(sdk)\n except ValueError:\n logger.error(f'SDK version invalid: {sdk}')\n\n return 0\n\n @cached_property\n def is_avd(self):\n if get_serial_pair(self.serial)[0] is None:\n return False\n if 'ranchu' in self.adb_getprop('ro.hardware'):\n return True\n if 'goldfish' in self.adb_getprop('ro.hardware.audio.primary'):\n return True\n return False\n\n def check_mumu_app_keep_alive(self):\n if not self.is_mumu_family:\n return False\n\n res = self.adb_getprop('nemud.app_keep_alive')\n logger.attr('nemud.app_keep_alive', res)\n if res == '':\n # Empry property, might not be a mumu emulator or might be an old mumu\n return True\n elif res == 'false':\n # Disabled\n return True\n elif res == 'true':\n # https://mumu.163.com/help/20230802/35047_1102450.html\n logger.critical('请在MuMu模拟器设置内关闭 \"后台挂机时保活运行\"')\n raise RequestHumanTakeover\n else:\n logger.warning(f'Invalid nemud.app_keep_alive value: {res}')\n return False\n\n @cached_property\n def _nc_server_host_port(self):\n \"\"\"\n Returns:\n str, int, str, int:\n server_listen_host, server_listen_port, client_connect_host, client_connect_port\n \"\"\"\n # For BlueStacks hyper-v, use ADB reverse\n if self.is_bluestacks_hyperv:\n host = '127.0.0.1'\n logger.info(f'Connecting to BlueStacks hyper-v, using host {host}')\n port = self.adb_reverse(f'tcp:{self.config.REVERSE_SERVER_PORT}')\n return host, port, host, self.config.REVERSE_SERVER_PORT\n # For emulators, listen on current host\n if self.is_emulator or self.is_over_http:\n try:\n host = socket.gethostbyname(socket.gethostname())\n except socket.gaierror as e:\n logger.error(e)\n logger.error(f'Unknown host name: {socket.gethostname()}')\n host = '127.0.0.1'\n if platform.system() == 'Linux' and host == '127.0.1.1':\n host = '127.0.0.1'\n logger.info(f'Connecting to local emulator, using host {host}')\n port = random_port(self.config.FORWARD_PORT_RANGE)\n\n # For AVD instance\n if self.is_avd:\n return host, port, \"10.0.2.2\", port\n\n return host, port, host, port\n # For local network devices, listen on the host under the same network as target device\n if self.is_network_device:\n hosts = socket.gethostbyname_ex(socket.gethostname())[2]\n logger.info(f'Current hosts: {hosts}')\n ip = ipaddress.ip_address(self.serial.split(':')[0])\n for host in hosts:\n if ip in ipaddress.ip_interface(f'{host}/24').network:\n logger.info(f'Connecting to local network device, using host {host}')\n port = random_port(self.config.FORWARD_PORT_RANGE)\n return host, port, host, port\n # For other devices, create an ADB reverse and listen on 127.0.0.1\n host = '127.0.0.1'\n logger.info(f'Connecting to unknown device, using host {host}')\n port = self.adb_reverse(f'tcp:{self.config.REVERSE_SERVER_PORT}')\n return host, port, host, self.config.REVERSE_SERVER_PORT\n\n @cached_property\n def reverse_server(self):\n \"\"\"\n Setup a server on Alas, access it from emulator.\n This will bypass adb shell and be faster.\n \"\"\"\n del_cached_property(self, '_nc_server_host_port')\n host_port = self._nc_server_host_port\n logger.info(f'Reverse server listening on {host_port[0]}:{host_port[1]}, '\n f'client can send data to {host_port[2]}:{host_port[3]}')\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server.bind(host_port[:2])\n server.settimeout(5)\n server.listen(5)\n return server\n\n @cached_property\n def nc_command(self):\n \"\"\"\n Returns:\n list[str]: ['nc'] or ['busybox', 'nc']\n \"\"\"\n sdk = self.sdk_ver\n logger.info(f'sdk_ver: {sdk}')\n if sdk >= 28:\n # Android 9 emulators does not have `nc`, try `busybox nc`\n # BlueStacks Pie (Android 9) has `nc` but cannot send data, try `busybox nc` first\n trial = [\n ['busybox', 'nc'],\n ['nc'],\n ]\n else:\n trial = [\n ['nc'],\n ['busybox', 'nc'],\n ]\n for command in trial:\n # About 3ms\n result = self.adb_shell(command)\n # Result should be command help if success\n # `/system/bin/sh: nc: not found`\n if 'not found' in result:\n continue\n # `/system/bin/sh: busybox: inaccessible or not found\\n`\n if 'inaccessible' in result:\n continue\n logger.attr('nc command', command)\n return command\n\n logger.error('No `netcat` command available, please use screenshot methods without `_nc` suffix')\n raise RequestHumanTakeover\n\n def adb_shell_nc(self, cmd, timeout=5, chunk_size=262144):\n \"\"\"\n Args:\n cmd (list):\n timeout (int):\n chunk_size (int): Default to 262144\n\n Returns:\n bytes:\n \"\"\"\n # Server start listening\n server = self.reverse_server\n server.settimeout(timeout)\n # Client send data, waiting for server accept\n # <command> | nc 127.0.0.1 {port}\n cmd += [\"|\", *self.nc_command, *self._nc_server_host_port[2:]]\n stream = self.adb_shell(cmd, stream=True, recvall=False)\n try:\n # Server accept connection\n conn, conn_port = server.accept()\n except socket.timeout:\n output = recv_all(stream, chunk_size=chunk_size)\n logger.warning(str(output))\n raise AdbTimeout('reverse server accept timeout')\n\n # Server receive data\n data = recv_all(conn, chunk_size=chunk_size, recv_interval=0.001)\n\n # Server close connection\n conn.close()\n return data\n\n def adb_exec_out(self, cmd, serial=None):\n cmd.insert(0, 'exec-out')\n return self.adb_command(cmd, serial)\n\n def adb_forward(self, remote):\n \"\"\"\n Do `adb forward <local> <remote>`.\n choose a random port in FORWARD_PORT_RANGE or reuse an existing forward,\n and also remove redundant forwards.\n\n Args:\n remote (str):\n tcp:<port>\n localabstract:<unix domain socket name>\n localreserved:<unix domain socket name>\n localfilesystem:<unix domain socket name>\n dev:<character device name>\n jdwp:<process pid> (remote only)\n\n Returns:\n int: Port\n \"\"\"\n port = 0\n for forward in self.adb.forward_list():\n if forward.serial == self.serial and forward.remote == remote and forward.local.startswith('tcp:'):\n if not port:\n logger.info(f'Reuse forward: {forward}')\n port = int(forward.local[4:])\n else:\n logger.info(f'Remove redundant forward: {forward}')\n self.adb_forward_remove(forward.local)\n\n if port:\n return port\n else:\n # Create new forward\n port = random_port(self.config.FORWARD_PORT_RANGE)\n forward = ForwardItem(self.serial, f'tcp:{port}', remote)\n logger.info(f'Create forward: {forward}')\n self.adb.forward(forward.local, forward.remote)\n return port\n\n def adb_reverse(self, remote):\n port = 0\n for reverse in self.adb.reverse_list():\n if reverse.remote == remote and reverse.local.startswith('tcp:'):\n if not port:\n logger.info(f'Reuse reverse: {reverse}')\n port = int(reverse.local[4:])\n else:\n logger.info(f'Remove redundant forward: {reverse}')\n self.adb_forward_remove(reverse.local)\n\n if port:\n return port\n else:\n # Create new reverse\n port = random_port(self.config.FORWARD_PORT_RANGE)\n reverse = ReverseItem(f'tcp:{port}', remote)\n logger.info(f'Create reverse: {reverse}')\n self.adb.reverse(reverse.local, reverse.remote)\n return port\n\n def adb_forward_remove(self, local):\n \"\"\"\n Equivalent to `adb -s <serial> forward --remove <local>`\n More about the commands send to ADB server, see:\n https://cs.android.com/android/platform/superproject/+/master:packages/modules/adb/SERVICES.TXT\n\n Args:\n local (str): Such as 'tcp:2437'\n \"\"\"\n with self.adb_client._connect() as c:\n list_cmd = f\"host-serial:{self.serial}:killforward:{local}\"\n c.send_command(list_cmd)\n c.check_okay()\n\n def adb_reverse_remove(self, local):\n \"\"\"\n Equivalent to `adb -s <serial> reverse --remove <local>`\n\n Args:\n local (str): Such as 'tcp:2437'\n \"\"\"\n with self.adb_client._connect() as c:\n c.send_command(f\"host:transport:{self.serial}\")\n c.check_okay()\n list_cmd = f\"reverse:killforward:{local}\"\n c.send_command(list_cmd)\n c.check_okay()\n\n def adb_push(self, local, remote):\n \"\"\"\n Args:\n local (str):\n remote (str):\n\n Returns:\n str:\n \"\"\"\n cmd = ['push', local, remote]\n return self.adb_command(cmd)\n\n @Config.when(DEVICE_OVER_HTTP=False)\n def adb_connect(self, serial):\n \"\"\"\n Connect to a serial, try 3 times at max.\n If there's an old ADB server running while Alas is using a newer one, which happens on Chinese emulators,\n the first connection is used to kill the other one, and the second is the real connect.\n\n Args:\n serial (str):\n\n Returns:\n bool: If success\n \"\"\"\n # Disconnect offline device before connecting\n for device in self.list_device():\n if device.status == 'offline':\n logger.warning(f'Device {serial} is offline, disconnect it before connecting')\n self.adb_disconnect(serial)\n elif device.status == 'unauthorized':\n logger.error(f'Device {serial} is unauthorized, please accept ADB debugging on your device')\n elif device.status == 'device':\n pass\n else:\n logger.warning(f'Device {serial} is is having a unknown status: {device.status}')\n\n # Skip for emulator-5554\n if 'emulator-' in serial:\n logger.info(f'\"{serial}\" is a `emulator-*` serial, skip adb connect')\n return True\n if re.match(r'^[a-zA-Z0-9]+$', serial):\n logger.info(f'\"{serial}\" seems to be a Android serial, skip adb connect')\n return True\n\n # Try to connect\n for _ in range(3):\n msg = self.adb_client.connect(serial)\n logger.info(msg)\n if 'connected' in msg:\n # Connected to 127.0.0.1:59865\n # Already connected to 127.0.0.1:59865\n return True\n elif 'bad port' in msg:\n # bad port number '598265' in '127.0.0.1:598265'\n logger.error(msg)\n possible_reasons('Serial incorrect, might be a typo')\n raise RequestHumanTakeover\n elif '(10061)' in msg:\n # cannot connect to 127.0.0.1:55555:\n # No connection could be made because the target machine actively refused it. (10061)\n logger.info(msg)\n logger.warning('No such device exists, please restart the emulator or set a correct serial')\n raise EmulatorNotRunningError\n\n # Failed to connect\n logger.warning(f'Failed to connect {serial} after 3 trial, assume connected')\n self.detect_device()\n return False\n\n @Config.when(DEVICE_OVER_HTTP=True)\n def adb_connect(self, serial):\n # No adb connect if over http\n return True\n\n def adb_disconnect(self, serial):\n msg = self.adb_client.disconnect(serial)\n if msg:\n logger.info(msg)\n\n del_cached_property(self, 'hermit_session')\n del_cached_property(self, 'droidcast_session')\n del_cached_property(self, 'minitouch_builder')\n del_cached_property(self, 'reverse_server')\n\n def adb_restart(self):\n \"\"\"\n Reboot adb client\n \"\"\"\n logger.info('Restart adb')\n # Kill current client\n self.adb_client.server_kill()\n # Init adb client\n del_cached_property(self, 'adb_client')\n _ = self.adb_client\n\n @Config.when(DEVICE_OVER_HTTP=False)\n def adb_reconnect(self):\n \"\"\"\n Reboot adb client if no device found, otherwise try reconnecting device.\n \"\"\"\n if self.config.Emulator_AdbRestart and len(self.list_device()) == 0:\n # Restart Adb\n self.adb_restart()\n # Connect to device\n self.adb_connect(self.serial)\n self.detect_device()\n else:\n self.adb_disconnect(self.serial)\n self.adb_connect(self.serial)\n self.detect_device()\n\n @Config.when(DEVICE_OVER_HTTP=True)\n def adb_reconnect(self):\n logger.warning(\n f'When connecting a device over http: {self.serial} '\n f'adb_reconnect() is skipped, you may need to restart ATX manually'\n )\n\n def install_uiautomator2(self):\n \"\"\"\n Init uiautomator2 and remove minicap.\n \"\"\"\n logger.info('Install uiautomator2')\n init = u2.init.Initer(self.adb, loglevel=logging.DEBUG)\n # MuMu X has no ro.product.cpu.abi, pick abi from ro.product.cpu.abilist\n if init.abi not in ['x86_64', 'x86', 'arm64-v8a', 'armeabi-v7a', 'armeabi']:\n init.abi = init.abis[0]\n init.set_atx_agent_addr('127.0.0.1:7912')\n try:\n init.install()\n except ConnectionError:\n u2.init.GITHUB_BASEURL = 'http://tool.appetizer.io/openatx'\n init.install()\n self.uninstall_minicap()\n\n def uninstall_minicap(self):\n \"\"\" minicap can't work or will send compressed images on some emulators. \"\"\"\n logger.info('Removing minicap')\n self.adb_shell([\"rm\", \"/data/local/tmp/minicap\"])\n self.adb_shell([\"rm\", \"/data/local/tmp/minicap.so\"])\n\n @Config.when(DEVICE_OVER_HTTP=False)\n def restart_atx(self):\n \"\"\"\n Minitouch supports only one connection at a time.\n Restart ATX to kick the existing one.\n \"\"\"\n logger.info('Restart ATX')\n atx_agent_path = '/data/local/tmp/atx-agent'\n self.adb_shell([atx_agent_path, 'server', '--stop'])\n self.adb_shell([atx_agent_path, 'server', '--nouia', '-d', '--addr', '127.0.0.1:7912'])\n\n @Config.when(DEVICE_OVER_HTTP=True)\n def restart_atx(self):\n logger.warning(\n f'When connecting a device over http: {self.serial} '\n f'restart_atx() is skipped, you may need to restart ATX manually'\n )\n\n @staticmethod\n def sleep(second):\n \"\"\"\n Args:\n second(int, float, tuple):\n \"\"\"\n time.sleep(ensure_time(second))\n\n _orientation_description = {\n 0: 'Normal',\n 1: 'HOME key on the right',\n 2: 'HOME key on the top',\n 3: 'HOME key on the left',\n }\n orientation = 0\n\n @retry\n def get_orientation(self):\n \"\"\"\n Rotation of the phone\n\n Returns:\n int:\n 0: 'Normal'\n 1: 'HOME key on the right'\n 2: 'HOME key on the top'\n 3: 'HOME key on the left'\n \"\"\"\n _DISPLAY_RE = re.compile(\n r'.*DisplayViewport{.*valid=true, .*orientation=(?P<orientation>\\d+), .*deviceWidth=(?P<width>\\d+), deviceHeight=(?P<height>\\d+).*'\n )\n output = self.adb_shell(['dumpsys', 'display'])\n\n res = _DISPLAY_RE.search(output, 0)\n\n if res:\n o = int(res.group('orientation'))\n if o in Connection._orientation_description:\n pass\n else:\n o = 0\n logger.warning(f'Invalid device orientation: {o}, assume it is normal')\n else:\n o = 0\n logger.warning('Unable to get device orientation, assume it is normal')\n\n self.orientation = o\n logger.attr('Device Orientation', f'{o} ({Connection._orientation_description.get(o, \"Unknown\")})')\n return o\n\n @retry\n def list_device(self):\n \"\"\"\n Returns:\n SelectedGrids[AdbDeviceWithStatus]:\n \"\"\"\n devices = []\n try:\n with self.adb_client._connect() as c:\n c.send_command(\"host:devices\")\n c.check_okay()\n output = c.read_string_block()\n for line in output.splitlines():\n parts = line.strip().split(\"\\t\")\n if len(parts) != 2:\n continue\n device = AdbDeviceWithStatus(self.adb_client, parts[0], parts[1])\n devices.append(device)\n except ConnectionResetError as e:\n # Happens only on CN users.\n # ConnectionResetError: [WinError 10054] 远程主机强迫关闭了一个现有的连接。\n logger.error(e)\n if '强迫关闭' in str(e):\n logger.critical('无法连接至ADB服务,请关闭UU加速器、原神私服、以及一些劣质代理软件。'\n '它们会劫持电脑上所有的网络连接,包括Alas与模拟器之间的本地连接。')\n return SelectedGrids(devices)\n\n def detect_device(self):\n \"\"\"\n Find available devices\n If serial=='auto' and only 1 device detected, use it\n \"\"\"\n logger.hr('Detect device')\n logger.info('Here are the available devices, '\n 'copy to Alas.Emulator.Serial to use it or set Alas.Emulator.Serial=\"auto\"')\n devices = self.list_device()\n\n # Show available devices\n available = devices.select(status='device')\n for device in available:\n logger.info(device.serial)\n if not len(available):\n logger.info('No available devices')\n\n # Show unavailable devices if having any\n unavailable = devices.delete(available)\n if len(unavailable):\n logger.info('Here are the devices detected but unavailable')\n for device in unavailable:\n logger.info(f'{device.serial} ({device.status})')\n\n # Auto device detection\n if self.config.Emulator_Serial == 'auto':\n if available.count == 0:\n logger.critical('No available device found, auto device detection cannot work, '\n 'please set an exact serial in Alas.Emulator.Serial instead of using \"auto\"')\n raise RequestHumanTakeover\n elif available.count == 1:\n logger.info(f'Auto device detection found only one device, using it')\n self.serial = devices[0].serial\n del_cached_property(self, 'adb')\n else:\n logger.critical('Multiple devices found, auto device detection cannot decide which to choose, '\n 'please copy one of the available devices listed above to Alas.Emulator.Serial')\n raise RequestHumanTakeover\n\n # Handle LDPlayer\n # LDPlayer serial jumps between `127.0.0.1:5555+{X}` and `emulator-5554+{X}`\n port_serial, emu_serial = get_serial_pair(self.serial)\n if port_serial and emu_serial:\n # Might be LDPlayer, check connected devices\n port_device = devices.select(serial=port_serial).first_or_none()\n emu_device = devices.select(serial=emu_serial).first_or_none()\n if port_device and emu_device:\n # Paired devices found, check status to get the correct one\n if port_device.status == 'device' and emu_device.status == 'offline':\n self.serial = port_serial\n logger.info(f'LDPlayer device pair found: {port_device}, {emu_device}. '\n f'Using serial: {self.serial}')\n elif port_device.status == 'offline' and emu_device.status == 'device':\n self.serial = emu_serial\n logger.info(f'LDPlayer device pair found: {port_device}, {emu_device}. '\n f'Using serial: {self.serial}')\n elif not devices.select(serial=self.serial):\n # Current serial not found\n if port_device and not emu_device:\n logger.info(f'Current serial {self.serial} not found but paired device {port_serial} found. '\n f'Using serial: {port_serial}')\n self.serial = port_serial\n if not port_device and emu_device:\n logger.info(f'Current serial {self.serial} not found but paired device {emu_serial} found. '\n f'Using serial: {emu_serial}')\n self.serial = emu_serial\n\n @retry\n def list_package(self, show_log=True):\n \"\"\"\n Find all packages on device.\n Use dumpsys first for faster.\n \"\"\"\n # 80ms\n if show_log:\n logger.info('Get package list')\n output = self.adb_shell(r'dumpsys package | grep \"Package \\[\"')\n packages = re.findall(r'Package \\[([^\\s]+)\\]', output)\n if len(packages):\n return packages\n\n # 200ms\n if show_log:\n logger.info('Get package list')\n output = self.adb_shell(['pm', 'list', 'packages'])\n packages = re.findall(r'package:([^\\s]+)', output)\n return packages\n\n def list_azurlane_packages(self, show_log=True):\n \"\"\"\n Args:\n show_log:\n\n Returns:\n list[str]: List of package names\n \"\"\"\n packages = self.list_package(show_log=show_log)\n packages = [p for p in packages if p in server_.VALID_PACKAGE]\n return packages\n\n def detect_package(self, set_config=True):\n \"\"\"\n Show all possible packages with the given keyword on this device.\n \"\"\"\n logger.hr('Detect package')\n packages = self.list_azurlane_packages()\n\n # Show packages\n logger.info(f'Here are the available packages in device \"{self.serial}\", '\n f'copy to Alas.Emulator.PackageName to use it')\n if len(packages):\n for package in packages:\n logger.info(package)\n else:\n logger.info(f'No available packages on device \"{self.serial}\"')\n\n # Auto package detection\n if len(packages) == 0:\n logger.critical(f'No Blue Archive package found, '\n f'please confirm Blue Archive has been installed on device \"{self.serial}\"')\n raise RequestHumanTakeover\n if len(packages) == 1:\n logger.info('Auto package detection found only one package, using it')\n self.package = packages[0]\n # Set config\n if set_config:\n self.config.Emulator_PackageName = server_.to_server(self.package)\n # Set server\n # logger.info('Server changed, release resources')\n # set_server(self.package)\n else:\n logger.critical(\n f'Multiple Blue Archive packages found, auto package detection cannot decide which to choose, '\n 'please copy one of the available devices listed above to Alas.Emulator.PackageName')\n raise RequestHumanTakeover" }, { "identifier": "RETRY_TRIES", "path": "module/device/method/utils.py", "snippet": "RETRY_TRIES = 5" }, { "identifier": "retry_sleep", "path": "module/device/method/utils.py", "snippet": "def retry_sleep(trial):\n # First trial\n if trial == 0:\n pass\n # Failed once, fast retry\n elif trial == 1:\n pass\n # Failed twice\n elif trial == 2:\n time.sleep(1)\n # Failed more\n else:\n time.sleep(RETRY_DELAY)" }, { "identifier": "remove_prefix", "path": "module/device/method/utils.py", "snippet": "def remove_prefix(s, prefix):\n \"\"\"\n Remove prefix of a string or bytes like `string.removeprefix(prefix)`, which is on Python3.9+\n\n Args:\n s (str, bytes):\n prefix (str, bytes):\n\n Returns:\n str, bytes:\n \"\"\"\n return s[len(prefix):] if s.startswith(prefix) else s" }, { "identifier": "handle_adb_error", "path": "module/device/method/utils.py", "snippet": "def handle_adb_error(e):\n \"\"\"\n Args:\n e (Exception):\n\n Returns:\n bool: If should retry\n \"\"\"\n text = str(e)\n if 'not found' in text:\n # When you call `adb disconnect <serial>`\n # Or when adb server was killed (low possibility)\n # AdbError(device '127.0.0.1:59865' not found)\n logger.error(e)\n return True\n elif 'timeout' in text:\n # AdbTimeout(adb read timeout)\n logger.error(e)\n return True\n elif 'closed' in text:\n # AdbError(closed)\n # Usually after AdbTimeout(adb read timeout)\n # Disconnect and re-connect should fix this.\n logger.error(e)\n return True\n elif 'device offline' in text:\n # AdbError(device offline)\n # When a device that has been connected wirelessly is disconnected passively,\n # it does not disappear from the adb device list,\n # but will be displayed as offline.\n # In many cases, such as disconnection and recovery caused by network fluctuations,\n # or after VMOS reboot when running Alas on a phone,\n # the device is still available, but it needs to be disconnected and re-connected.\n logger.error(e)\n return True\n elif 'is offline' in text:\n # RuntimeError: USB device 127.0.0.1:7555 is offline\n # Raised by uiautomator2 when current adb service is killed by another version of adb service.\n logger.error(e)\n return True\n elif 'unknown host service' in text:\n # AdbError(unknown host service)\n # Another version of ADB service started, current ADB service has been killed.\n # Usually because user opened a Chinese emulator, which uses ADB from the Stone Age.\n logger.error(e)\n return True\n else:\n # AdbError()\n logger.exception(e)\n possible_reasons(\n 'If you are using BlueStacks or LD player or WSA, please enable ADB in the settings of your emulator',\n 'Emulator died, please restart emulator',\n 'Serial incorrect, no such device exists or emulator is not running'\n )\n return False" }, { "identifier": "ImageTruncated", "path": "module/device/method/utils.py", "snippet": "class ImageTruncated(Exception):\n pass" }, { "identifier": "PackageNotInstalled", "path": "module/device/method/utils.py", "snippet": "class PackageNotInstalled(Exception):\n pass" }, { "identifier": "RequestHumanTakeover", "path": "module/exception.py", "snippet": "class RequestHumanTakeover(Exception):\n # Request human takeover\n # Alas is unable to handle such error, probably because of wrong settings.\n pass" }, { "identifier": "ScriptError", "path": "module/exception.py", "snippet": "class ScriptError(Exception):\n # This is likely to be a mistake of developers, but sometimes a random issue\n pass" }, { "identifier": "logger", "path": "module/logger/logger.py", "snippet": "def empty_function(*args, **kwargs):\n def __init__(self, *args, func: Callable[[ConsoleRenderable], None] = None, **kwargs):\n def emit(self, record: logging.LogRecord) -> None:\n def handle(self, record: logging.LogRecord) -> bool:\n def options(self) -> ConsoleOptions:\ndef _set_file_logger(name=pyw_name):\ndef set_file_logger(name=pyw_name):\ndef set_func_logger(func):\ndef _get_renderables(\n self: Console, *objects, sep=\" \", end=\"\\n\", justify=None, emoji=None, markup=None, highlight=None,\n) -> List[ConsoleRenderable]:\ndef print(*objects: ConsoleRenderable, **kwargs):\ndef rule(title=\"\", *, characters=\"─\", style=\"rule.line\", end=\"\\n\", align=\"center\"):\ndef hr(title, level=3):\ndef attr(name, text):\ndef attr_align(name, text, front='', align=22):\ndef show():\ndef error_convert(func):\n def error_wrapper(msg, *args, **kwargs):\nclass RichFileHandler(RichHandler):\nclass RichRenderableHandler(RichHandler):\nclass HTMLConsole(Console):\nclass Highlighter(RegexHighlighter):\nWEB_THEME = Theme({\n \"web.brace\": Style(bold=True),\n \"web.bool_true\": Style(color=\"bright_green\", italic=True),\n \"web.bool_false\": Style(color=\"bright_red\", italic=True),\n \"web.none\": Style(color=\"magenta\", italic=True),\n \"web.path\": Style(color=\"magenta\"),\n \"web.filename\": Style(color=\"bright_magenta\"),\n \"web.str\": Style(color=\"green\", italic=False, bold=False),\n \"web.time\": Style(color=\"cyan\"),\n \"rule.text\": Style(bold=True),\n})" } ]
import re import cv2 import numpy as np import time from functools import wraps from adbutils.errors import AdbError from lxml import etree from module.base.decorator import Config from module.device.connection import Connection from module.device.method.utils import (RETRY_TRIES, retry_sleep, remove_prefix, handle_adb_error, ImageTruncated, PackageNotInstalled) from module.exception import RequestHumanTakeover, ScriptError from module.logger import logger
9,565
def retry(func): @wraps(func) def retry_wrapper(self, *args, **kwargs): """ Args: self (Adb): """ init = None for _ in range(RETRY_TRIES): try: if callable(init): retry_sleep(_) init() return func(self, *args, **kwargs) # Can't handle except RequestHumanTakeover: break # When adb server was killed except ConnectionResetError as e: logger.error(e) def init(): self.adb_reconnect() # AdbError except AdbError as e: if handle_adb_error(e): def init(): self.adb_reconnect() else: break # Package not installed except PackageNotInstalled as e: logger.error(e) def init(): self.detect_package() # ImageTruncated
def retry(func): @wraps(func) def retry_wrapper(self, *args, **kwargs): """ Args: self (Adb): """ init = None for _ in range(RETRY_TRIES): try: if callable(init): retry_sleep(_) init() return func(self, *args, **kwargs) # Can't handle except RequestHumanTakeover: break # When adb server was killed except ConnectionResetError as e: logger.error(e) def init(): self.adb_reconnect() # AdbError except AdbError as e: if handle_adb_error(e): def init(): self.adb_reconnect() else: break # Package not installed except PackageNotInstalled as e: logger.error(e) def init(): self.detect_package() # ImageTruncated
except ImageTruncated as e:
6
2023-11-01 07:09:45+00:00
12k
liuzhao1225/YouDub
main.py
[ { "identifier": "TTS_Clone", "path": "youdub/tts_xttsv2.py", "snippet": "class TTS_Clone:\n def __init__(self, model_path=\"tts_models/multilingual/multi-dataset/xtts_v2\", device='cuda', language='zh-cn'):\n logging.info(f'Loading TTS model {model_path}...')\n self.tts = TTS(model_path).to(device)\n self.language = language\n logging.info('Model TTS loaded.')\n \n def inference(self, text, output_path, speaker_wav) -> np.ndarray:\n wav = self.tts.tts(\n text=text, speaker_wav=speaker_wav, language=self.language)\n wav = np.array(wav)\n save_wav(wav, output_path)\n # wav /= np.max(np.abs(wav))\n return wav" }, { "identifier": "audio_process_folder", "path": "youdub/tts_xttsv2.py", "snippet": "def audio_process_folder(folder, tts: TTS_Clone, speaker_to_voice_type=None, vocal_only=False):\n logging.info(f'TTS processing folder {folder}...')\n logging.info(f'speaker_to_voice_type: {speaker_to_voice_type}')\n with open(os.path.join(folder, 'zh.json'), 'r', encoding='utf-8') as f:\n transcript = json.load(f)\n full_wav = np.zeros((0,))\n if not os.path.exists(os.path.join(folder, 'temp')):\n os.makedirs(os.path.join(folder, 'temp'))\n\n for i, line in enumerate(transcript):\n text = line['text']\n # start = line['start']\n start = line['start']\n last_end = len(full_wav)/24000\n if start > last_end:\n full_wav = np.concatenate(\n (full_wav, np.zeros((int(24000 * (start - last_end)),))))\n start = len(full_wav)/24000\n line['start'] = start\n end = line['end']\n if os.path.exists(os.path.join(folder, 'temp', f'zh_{str(i).zfill(3)}.wav')):\n wav = librosa.load(os.path.join(\n folder, 'temp', f'zh_{str(i).zfill(3)}.wav'), sr=24000)[0]\n else:\n speaker = line.get('speaker', 'SPEAKER_00')\n speaker_wav = os.path.join(folder, 'SPEAKER', f'{speaker}.wav')\n wav = tts.inference(tts_preprocess_text(text), os.path.join(\n folder, 'temp', f'zh_{str(i).zfill(3)}.wav'), speaker_wav)\n time.sleep(0.1)\n # save_wav(wav, )\n wav_adjusted, adjusted_length = adjust_audio_length(wav, os.path.join(folder, 'temp', f'zh_{str(i).zfill(3)}.wav'), os.path.join(\n folder, 'temp', f'zh_{str(i).zfill(3)}_adjusted.wav'), end - start)\n\n wav_adjusted /= wav_adjusted.max()\n line['end'] = line['start'] + adjusted_length\n full_wav = np.concatenate(\n (full_wav, wav_adjusted))\n # load os.path.join(folder, 'en_Instruments.wav')\n # combine with full_wav (the length of the two audio might not be equal)\n transcript = split_text(transcript, punctuations=[\n ',', ';', ':', '。', '?', '!', '\\n','”'])\n with open(os.path.join(folder, 'transcript.json'), 'w', encoding='utf-8') as f:\n json.dump(transcript, f, ensure_ascii=False, indent=4)\n instruments_wav, sr = librosa.load(\n os.path.join(folder, 'en_Instruments.wav'), sr=24000)\n\n len_full_wav = len(full_wav)\n len_instruments_wav = len(instruments_wav)\n\n if len_full_wav > len_instruments_wav:\n # 如果 full_wav 更长,将 instruments_wav 延伸到相同长度\n instruments_wav = np.pad(\n instruments_wav, (0, len_full_wav - len_instruments_wav), mode='constant')\n elif len_instruments_wav > len_full_wav:\n # 如果 instruments_wav 更长,将 full_wav 延伸到相同长度\n full_wav = np.pad(\n full_wav, (0, len_instruments_wav - len_full_wav), mode='constant')\n # 合并两个音频\n full_wav /= np.max(np.abs(full_wav))\n save_wav(full_wav, os.path.join(folder, f'zh_Vocals.wav'))\n # instruments_wav /= np.max(np.abs(instruments_wav))\n instrument_coefficient = 1\n if vocal_only:\n instrument_coefficient = 0\n combined_wav = full_wav + instruments_wav*instrument_coefficient\n combined_wav /= np.max(np.abs(combined_wav))\n save_wav(combined_wav, os.path.join(folder, f'zh.wav'))" }, { "identifier": "TTS_Clone", "path": "youdub/tts_bytedance.py", "snippet": "class TTS_Clone:\n def __init__(self):\n self.appid = os.getenv('APPID')\n self.access_token = os.getenv('ACCESS_TOKEN')\n self.cluster = \"volcano_tts\"\n self.host = \"openspeech.bytedance.com\"\n self.api_url = f\"https://{self.host}/api/v1/tts\"\n self.header = {\"Authorization\": f\"Bearer;{self.access_token}\"}\n self.request_json = {\n \"app\": {\n \"appid\": self.appid,\n \"token\": \"access_token\",\n \"cluster\": self.cluster\n },\n \"user\": {\n \"uid\": \"388808087185088\"\n },\n \"audio\": {\n \"voice_type\": '',\n \"encoding\": \"wav\",\n \"speed_ratio\": 1,\n \"volume_ratio\": 1.0,\n \"pitch_ratio\": 1.0,\n },\n \"request\": {\n \"reqid\": str(uuid.uuid4()),\n \"text\": \"字节跳动语音合成\",\n \"text_type\": \"plain\",\n \"operation\": \"query\",\n \"with_frontend\": 1,\n \"frontend_type\": \"unitTson\"\n\n }\n }\n self.output_path = r'.'\n if not os.path.exists(self.output_path):\n os.mkdir(self.output_path)\n\n def inference(self, text, output_wav_path, speaker='SPEAKER_00', speaker_to_voice_type={'SPEAKER_00': 'BV701_streaming'}):\n self.request_json['request']['text'] = text\n self.request_json['request']['reqid'] = str(uuid.uuid4())\n self.request_json['audio']['voice_type'] = speaker_to_voice_type.get(\n speaker, 'BV701_streaming')\n max_retries = 5\n timeout_seconds = 10 # Set your desired timeout in seconds\n\n for attempt in range(max_retries):\n try:\n resp = requests.post(self.api_url, json.dumps(\n self.request_json), headers=self.header, timeout=timeout_seconds)\n if resp.status_code == 200:\n data = resp.json()[\"data\"]\n data = base64.b64decode(data)\n with open(output_wav_path, \"wb\") as f:\n f.write(data)\n print(f'{output_wav_path}: {text}')\n return np.frombuffer(data, dtype=np.int16)\n else:\n print(f\"Request failed with status code: {resp.status_code}\")\n if resp.status_code == 500:\n return None\n raise Exception(f\"Request failed with status code: {resp.status_code}\")\n except Exception as e:\n print(f\"Request failed: {e}, retrying ({attempt+1}/{max_retries})\")\n time.sleep(2) # Wait 2 seconds before retrying\n\n print(\"Max retries reached, request failed\")\n return None" }, { "identifier": "audio_process_folder", "path": "youdub/tts_bytedance.py", "snippet": "def audio_process_folder(folder, tts: TTS_Clone, speaker_to_voice_type, vocal_only=False):\n logging.info(f'TTS processing folder {folder}...')\n logging.info(f'speaker_to_voice_type: {speaker_to_voice_type}')\n with open(os.path.join(folder, 'zh.json'), 'r', encoding='utf-8') as f:\n transcript = json.load(f)\n full_wav = np.zeros((0,))\n if not os.path.exists(os.path.join(folder, 'temp')):\n os.makedirs(os.path.join(folder, 'temp'))\n\n for i, line in enumerate(transcript):\n text = line['text']\n # start = line['start']\n start = line['start']\n last_end = len(full_wav)/24000\n if start > last_end:\n full_wav = np.concatenate(\n (full_wav, np.zeros((int(24000 * (start - last_end)),))))\n start = len(full_wav)/24000\n line['start'] = start\n end = line['end']\n if os.path.exists(os.path.join(folder, 'temp', f'zh_{str(i).zfill(3)}.wav')):\n wav = librosa.load(os.path.join(\n folder, 'temp', f'zh_{str(i).zfill(3)}.wav'), sr=24000)[0]\n else:\n wav = tts.inference(tts_preprocess_text(text), os.path.join(\n folder, 'temp', f'zh_{str(i).zfill(3)}.wav'), speaker=line.get('speaker', 'SPEAKER_00'), speaker_to_voice_type=speaker_to_voice_type)\n time.sleep(0.1)\n # save_wav(wav, )\n wav_adjusted, adjusted_length = adjust_audio_length(wav, os.path.join(folder, 'temp', f'zh_{str(i).zfill(3)}.wav'), os.path.join(\n folder, 'temp', f'zh_{str(i).zfill(3)}_adjusted.wav'), end - start)\n\n wav_adjusted /= wav_adjusted.max()\n line['end'] = line['start'] + adjusted_length\n full_wav = np.concatenate(\n (full_wav, wav_adjusted))\n # load os.path.join(folder, 'en_Instruments.wav')\n # combine with full_wav (the length of the two audio might not be equal)\n transcript = split_text(transcript, punctuations=[\n ',', ';', ':', '。', '?', '!', '\\n', '”'])\n with open(os.path.join(folder, 'transcript.json'), 'w', encoding='utf-8') as f:\n json.dump(transcript, f, ensure_ascii=False, indent=4)\n instruments_wav, sr = librosa.load(\n os.path.join(folder, 'en_Instruments.wav'), sr=24000)\n\n len_full_wav = len(full_wav)\n len_instruments_wav = len(instruments_wav)\n\n if len_full_wav > len_instruments_wav:\n # 如果 full_wav 更长,将 instruments_wav 延伸到相同长度\n instruments_wav = np.pad(\n instruments_wav, (0, len_full_wav - len_instruments_wav), mode='constant')\n elif len_instruments_wav > len_full_wav:\n # 如果 instruments_wav 更长,将 full_wav 延伸到相同长度\n full_wav = np.pad(\n full_wav, (0, len_instruments_wav - len_full_wav), mode='constant')\n # 合并两个音频\n full_wav /= np.max(np.abs(full_wav))\n save_wav(full_wav, os.path.join(folder, f'zh_Vocals.wav'))\n # instruments_wav /= np.max(np.abs(instruments_wav))\n instrument_coefficient = 1\n if vocal_only:\n instrument_coefficient = 0\n combined_wav = full_wav + instruments_wav*instrument_coefficient\n combined_wav /= np.max(np.abs(combined_wav))\n save_wav(combined_wav, os.path.join(folder, f'zh.wav'))" }, { "identifier": "VideoProcessor", "path": "youdub/asr_whisperX.py", "snippet": "class VideoProcessor:\n def __init__(self, model='large', download_root='models/ASR/whisper', device='cuda', batch_size=32, diarize=False):\n logging.info(f'Loading model {model} from {download_root}...')\n self.device = device\n self.batch_size = batch_size\n self.model = model\n # self.model = whisperx.load_model(model, download_root=download_root, device=device)\n if model == 'large-v3':\n self.whisper_model = whisper.load_model(model, download_root=download_root, device=device) # whisperx doesn't support large-v3 yet, so use whisper instead\n else:\n self.whisper_model = whisperx.load_model(model, download_root=download_root, device=device)\n self.diarize = diarize\n if self.diarize:\n self.diarize_model = whisperx.DiarizationPipeline(use_auth_token=os.getenv('HF_TOKEN'), device=device)\n self.embedding_model = Model.from_pretrained(\"pyannote/embedding\", use_auth_token=os.getenv('HF_TOKEN'))\n self.embedding_inference = Inference(\n self.embedding_model, window=\"whole\")\n self.voice_type_embedding = dict()\n voice_type_folder = r'voice_type'\n for file in os.listdir(voice_type_folder):\n if file.endswith('.npy'):\n voice_type = file.replace('.npy', '')\n embedding = np.load(os.path.join(voice_type_folder, file))\n self.voice_type_embedding[voice_type] = embedding\n logging.info(f'Loaded {len(self.voice_type_embedding)} voice types.')\n\n self.language_code = 'en'\n self.align_model, self.meta_data = whisperx.load_align_model(language_code=self.language_code, device=device)\n self.vocal_remover = Demucs(model='htdemucs_ft')\n logging.info('Model loaded.')\n\n def transcribe_audio(self, wav_path):\n logging.debug(f'Transcribing audio {wav_path}...')\n if self.model == 'large-v3':\n rec_result = self.whisper_model.transcribe(\n wav_path, verbose=True, condition_on_previous_text=True, max_initial_timestamp=None)\n else:\n rec_result = self.whisper_model.transcribe(\n wav_path, batch_size=self.batch_size, print_progress=True, combined_progress=True)\n \n if rec_result['language'] == 'nn':\n return None\n if rec_result['language'] != self.language_code:\n self.language_code = rec_result['language']\n print(self.language_code)\n self.align_model, self.meta_data = whisperx.load_align_model(language_code=self.language_code, device=self.device)\n \n rec_result = whisperx.align(rec_result['segments'], self.align_model, self.meta_data, wav_path, self.device, return_char_alignments=False, print_progress=True)\n return rec_result\n \n def diarize_transcribed_audio(self, wav_path, transcribe_result):\n logging.info(f'Diarizing audio {wav_path}...')\n diarize_segments = self.diarize_model(wav_path)\n result = whisperx.assign_word_speakers(\n diarize_segments, transcribe_result)\n return result\n \n def get_speaker_embedding(self, json_path):\n with open(json_path, 'r', encoding='utf-8') as f:\n result = json.load(f)\n wav_folder = os.path.dirname(json_path)\n wav_path = os.path.join(wav_folder, 'en_Vocals.wav')\n audio_data, samplerate = sf.read(wav_path)\n speaker_dict = dict()\n length = len(audio_data)\n delay = 0.1\n for segment in result:\n start = max(0, int((segment['start'] - delay) * samplerate))\n end = min(int((segment['end']+delay) * samplerate), length)\n speaker_segment_audio = audio_data[start:end]\n speaker_dict[segment['speaker']] = np.concatenate((speaker_dict.get(\n segment['speaker'], np.zeros((0,2))),speaker_segment_audio))\n speaker_folder = os.path.join(wav_folder, 'SPEAKER')\n if not os.path.exists(speaker_folder):\n os.makedirs(speaker_folder)\n for speaker, audio in speaker_dict.items():\n speaker_file_path = os.path.join(\n speaker_folder, f\"{speaker}.wav\")\n sf.write(speaker_file_path, audio, samplerate)\n \n for file in os.listdir(speaker_folder):\n if file.startswith('SPEAKER') and file.endswith('.wav'):\n wav_path = os.path.join(speaker_folder, file)\n embedding = self.embedding_inference(wav_path)\n np.save(wav_path.replace('.wav', '.npy'), embedding)\n \n def find_closest_unique_voice_type(self, speaker_embedding):\n speaker_to_voice_type = {}\n available_speakers = set(speaker_embedding.keys())\n available_voice_types = set(self.voice_type_embedding.keys())\n\n while available_speakers and available_voice_types:\n min_distance = float('inf')\n closest_speaker = None\n closest_voice_type = None\n\n for speaker in available_speakers:\n sp_embedding = speaker_embedding[speaker]\n for voice_type in available_voice_types:\n vt_embedding = self.voice_type_embedding[voice_type]\n distance = cosine(sp_embedding, vt_embedding)\n\n if distance < min_distance:\n min_distance = distance\n closest_speaker = speaker\n closest_voice_type = voice_type\n\n if closest_speaker and closest_voice_type:\n speaker_to_voice_type[closest_speaker] = closest_voice_type\n available_speakers.remove(closest_speaker)\n available_voice_types.remove(closest_voice_type)\n\n return speaker_to_voice_type\n\n def get_speaker_to_voice_type_dict(self, json_path):\n self.get_speaker_embedding(json_path)\n wav_folder = os.path.dirname(json_path)\n speaker_folder = os.path.join(wav_folder, 'SPEAKER')\n speaker_embedding = dict()\n for file in os.listdir(speaker_folder):\n if file.startswith('SPEAKER') and file.endswith('.npy'):\n speaker_name = file.replace('.npy', '')\n embedding = np.load(os.path.join(speaker_folder, file))\n speaker_embedding[speaker_name] = embedding\n\n return self.find_closest_unique_voice_type(speaker_embedding)\n \n def extract_audio_from_video(self, video_path, audio_path):\n logging.info(f'Extracting audio from video {video_path}...')\n video = VideoFileClip(video_path)\n video.audio.write_audiofile(audio_path)\n output_dir = os.path.dirname(audio_path)\n if not os.path.exists(os.path.join(output_dir, 'en_Vocals.wav')) or not os.path.exists(os.path.join(output_dir, 'en_Instruments.wav')):\n self.vocal_remover.inference(\n audio_path, os.path.dirname(audio_path))\n logging.info(f'Audio extracted and saved to {audio_path}.')\n\n def save_transcription_to_json(self, transcription, json_path):\n logging.debug(f'Saving transcription to {json_path}...')\n if transcription is None:\n transcription_with_timestemp = []\n else:\n transcription_with_timestemp = [{'start': round(segment['start'], 3), 'end': round(\n segment['end'], 3), 'text': segment['text'].strip(), 'speaker': segment.get('speaker', 'SPEAKER_00')} for segment in transcription['segments'] if segment['text'] != '']\n\n transcription_with_timestemp = merge_segments(\n transcription_with_timestemp)\n with open(json_path.replace('en.json', 'subtitle.json'), 'w', encoding='utf-8') as f:\n # f.write(transcription_with_timestemp)\n json.dump(\n transcription_with_timestemp, f, ensure_ascii=False, indent=4)\n\n transcription_with_timestemp = merge_segments(\n transcription_with_timestemp, ending='.?!。?!')\n with open(json_path, 'w', encoding='utf-8') as f:\n # f.write(transcription_with_timestemp)\n json.dump(\n transcription_with_timestemp, f, ensure_ascii=False, indent=8)\n\n logging.debug('Transcription saved.')\n\n def process_video(self, video_path, output_folder):\n logging.debug('Processing video...')\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n if not os.path.exists(os.path.join(output_folder, 'en_Vocals.wav')):\n self.extract_audio_from_video(video_path, os.path.join(output_folder, 'en.wav'))\n if not os.path.exists(os.path.join(output_folder, 'en.json')):\n transcription = self.transcribe_audio(\n os.path.join(output_folder, 'en_Vocals.wav'))\n if self.diarize:\n transcription = self.diarize_transcribed_audio(\n os.path.join(output_folder, 'en.wav'), transcription)\n self.save_transcription_to_json(\n transcription, os.path.join(output_folder, 'en.json'))\n if not os.path.exists(os.path.join(output_folder, 'speaker_to_voice_type.json')):\n if self.diarize:\n speaker_to_voice_type = self.get_speaker_to_voice_type_dict(\n os.path.join(output_folder, 'en.json'))\n with open(os.path.join(output_folder, 'speaker_to_voice_type.json'), 'w', encoding='utf-8') as f:\n json.dump(speaker_to_voice_type, f, ensure_ascii=False, indent=4)\n else:\n speaker_to_voice_type = {'SPEAKER_00': 'BV701_streaming'}\n else:\n with open(os.path.join(output_folder, 'speaker_to_voice_type.json'), 'r', encoding='utf-8') as f:\n speaker_to_voice_type = json.load(f)\n logging.debug('Video processing completed.')\n return speaker_to_voice_type" }, { "identifier": "replace_audio_ffmpeg", "path": "youdub/video_postprocess.py", "snippet": "def replace_audio_ffmpeg(input_video: str, input_audio: str, input_subtitles: str, output_path: str, fps=30) -> None:\n input_folder = os.path.dirname(input_video)\n dst_folder = os.path.join(input_folder, '0_finished')\n if not os.path.exists(dst_folder):\n os.mkdir(dst_folder)\n \n if os.path.exists(output_path):\n command = f'move \"{input_video}\" \"{dst_folder}\"'\n subprocess.Popen(command, shell=True)\n return\n\n # Extract the video name from the input video path\n video_name = os.path.basename(input_video)\n\n # Replace video file extension with '.srt' for subtitles\n srt_name = video_name.replace('.mp4', '.srt').replace(\n '.mkv', '.srt').replace('.avi', '.srt').replace('.flv', '.srt')\n\n # Construct the path for the subtitles file\n srt_path = os.path.join(os.path.dirname(input_audio), srt_name)\n\n # Convert subtitles from JSON to SRT format\n convert_json_to_srt(input_subtitles, srt_path)\n\n # Determine the output folder and define a temporary file path\n output_folder = os.path.dirname(output_path)\n tmp = os.path.join(output_folder, 'tmp.mp4')\n\n # Prepare a list to hold FFmpeg commands\n commands = []\n\n # FFmpeg command to speed up the video by 1.05 times\n speed_up = 1.05\n \n if speed_up == 1:\n tmp = output_path\n commands.append(f'ffmpeg -i \"{input_video}\" -i \"{input_audio}\" -vf \"subtitles={srt_path}:force_style=\\'FontName=Arial,FontSize=20,PrimaryColour=&HFFFFFF,OutlineColour=&H000000,Outline=2,WrapStyle=2\\'\" -c:v libx264 -r {fps} -c:a aac -map 0:v:0 -map 1:a:0 \"{tmp}\" -y'.replace('\\\\', '/'))\n \n # commands.append(f'ffmpeg -i \"{input_video}\" -i \"{input_audio}\" -c:v libx264 -r {fps} -c:a aac -map 0:v:0 -map 1:a:0 \"{tmp}\" -y'.replace('\\\\', '/'))\n \n if speed_up != 1:\n commands.append(\n f'ffmpeg -i \"{tmp}\" -vf \"setpts={1/speed_up}*PTS\" -af \"atempo={speed_up}\" -c:v libx264 -c:a aac \"{output_path}\" -y'.replace('\\\\', '/'))\n\n # Command to delete the temporary file\n commands.append(f'del \"{tmp}\"')\n \n # move input video to dst folder\n commands.append(f'move \"{input_video}\" \"{dst_folder}\"')\n\n # Add an 'exit' command to close the command prompt window after execution\n commands.append('exit')\n\n # Join the commands with '&&' to ensure sequential execution\n command = ' && '.join(commands)\n\n # Execute the combined FFmpeg command\n print(command)\n subprocess.Popen(command, shell=True)" }, { "identifier": "Translator", "path": "youdub/translation_unsafe.py", "snippet": "class Translator:\n def __init__(self):\n self.system_message = system_message\n self.messages = []\n\n def translate(self, transcript, original_fname):\n print('总结中...')\n retry = 1\n summary = ''\n while retry >= 0:\n try:\n response = openai.ChatCompletion.create(\n model=model_name,\n messages=[{\"role\": \"system\", \"content\": f'你是一个科普专家。你的目的是总结文本中的主要科学知识。{magic}!'}] + [{\"role\": \"user\", \"content\": f\"。简要概括这个视频的主要内容。\\n标题:{original_fname}\\n内容:{''.join(transcript)}\\n标题:{original_fname}\\n请你用中文给视频写一个“标题”、“主要内容”和“专业名词”,谢谢。\"},], timeout=240)\n summary = response.choices[0].message.content\n print(summary)\n retry = -1\n except Exception as e:\n retry -= 1\n print('总结失败')\n print(e)\n print('重新总结')\n time.sleep(1)\n if retry == 0:\n print('总结失败')\n \n self.fixed_messages = [{'role': 'user', 'content': '请翻译:Hello!'}, {\n 'role': 'assistant', 'content': f'“你好!”'}, {'role': 'user', 'content': '请翻译:Animation videos explaining things with optimistic nihilism since 2,013.'}, {\n 'role': 'assistant', 'content': f'“从2013年开始,我们以乐观的虚无主义制作动画,进行科普。”'}]\n # self.fixed_messages = []\n self.messages = []\n final_result = []\n print('\\n翻译中...')\n for sentence in transcript:\n if not sentence:\n continue\n retry = 20\n retry_message = ''\n\n # print(messages)\n # [{\"role\": \"system\", \"content\": summary + '\\n' + self.system_message}] + self.fixed_messages + \\\n history = \" \".join(final_result[-30:])\n while retry > 0:\n retry -= 1\n messages = [\n {\"role\": \"system\", \"content\": f'请你扮演科普专家的角色。这是一个为视频配音设计的翻译任务,将各种语言精准而优雅地转化为尽量简短的中文。请在翻译时避免生硬的直译,而是追求自然流畅、贴近原文而又不失文学韵味的表达。在这个过程中,请特别注意维护中文特有的语序和句式结构,使翻译文本既忠于原意又符合中文的表达习惯。{magic}'}] + self.fixed_messages + [{\"role\": \"user\", \"content\": f'{summary}\\n{self.system_message}\\n请将Transformer, token等人工智能相关的专业名词保留原文。长句分成几个短句。\\n历史内容:\\n{history}\\n以上为参考的历史内容。\\n{retry_message}\\n深呼吸,请正确翻译这句英文:“{sentence}”翻译成简洁中文。'},]\n try:\n response = openai.ChatCompletion.create(\n model=model_name,\n messages=messages,\n temperature=0.3,\n timeout=60,\n )\n response = response.choices[0].message.content\n result = response.strip()\n if retry != 0:\n if '\\n' in result:\n retry_message += '无视前面的内容,仅仅只翻译下面的英文,请简短翻译,只输出翻译结果。'\n raise Exception('存在换行')\n if '翻译' in result:\n retry_message += '无视前面的内容,请不要出现“翻译”字样,仅仅只翻译下面的英文,请简短翻译,只输出翻译结果。'\n raise Exception('存在\"翻译\"字样')\n if '这句话的意思是' in result:\n retry_message += '无视前面的内容,请不要出现“这句话的意思是”字样,仅仅只翻译下面的英文,请简短翻译,只输出翻译结果。'\n raise Exception('存在\"这句话的意思是\"字样')\n if '这句话的意译是' in result:\n retry_message += '无视前面的内容,请不要出现“这句话的意译是”字样,仅仅只翻译下面的英文,请简短翻译,只输出翻译结果。'\n raise Exception('存在\"这句话的意译是\"字样')\n if '这句' in result:\n retry_message += '无视前面的内容,请不要出现“这句话”字样,仅仅只翻译下面的英文,请简短翻译,只输出翻译结果。'\n raise Exception('存在\"这句\"字样')\n if '深呼吸' in result:\n retry_message += '无视前面的内容,请不要出现“深呼吸”字样,仅仅只翻译下面的英文,请简短翻译,只输出翻译结果。'\n raise Exception('存在\"深呼吸\"字样')\n if (result.startswith('“') and result.endswith('”')) or (result.startswith('\"') and result.endswith('\"')):\n result = result[1:-1]\n if len(sentence) <= 10:\n if len(result) > 20:\n retry_message += '注意:仅仅只翻译下面的内容,请简短翻译,只输出翻译结果。'\n raise Exception('翻译过长')\n elif len(result) > len(sentence)*0.75:\n retry_message += '注意:仅仅只翻译下面的内容,请简短翻译,只输出翻译结果。'\n raise Exception('翻译过长')\n result = translation_postprocess(result)\n \n if result:\n self.messages.append(\n {'role': 'user', 'content': f\"{sentence}\"})\n self.messages.append(\n {'role': 'assistant', 'content': f'{result}'})\n print(sentence)\n print(response)\n print(f'最终结果:{result}')\n print('='*50)\n final_result.append(result)\n retry = 0\n except Exception as e:\n print(sentence)\n print(response)\n print(e)\n print('翻译失败')\n retry_message += f''\n time.sleep(0.5)\n return final_result, summary" }, { "identifier": "split_text", "path": "youdub/utils.py", "snippet": "def split_text(input_data,\n punctuations=['。', '?', '!', '\\n', \"”\"]):\n # Chinese punctuation marks for sentence ending\n\n # Function to check if a character is a Chinese ending punctuation\n def is_punctuation(char):\n return char in punctuations\n\n # Process each item in the input data\n output_data = []\n for item in input_data:\n start = item[\"start\"]\n text = item[\"text\"]\n speaker = item.get(\"speaker\", \"SPEAKER_00\")\n sentence_start = 0\n\n # Calculate the duration for each character\n duration_per_char = (item[\"end\"] - item[\"start\"]) / len(text)\n for i, char in enumerate(text):\n # If the character is a punctuation, split the sentence\n if not is_punctuation(char) and i != len(text) - 1:\n continue\n if i - sentence_start < 5 and i != len(text) - 1:\n continue\n if i < len(text) - 1 and is_punctuation(text[i+1]):\n continue\n sentence = text[sentence_start:i+1]\n sentence_end = start + duration_per_char * len(sentence)\n\n # Append the new item\n output_data.append({\n \"start\": round(start, 3),\n \"end\": round(sentence_end, 3),\n \"text\": sentence,\n \"speaker\": speaker\n })\n\n # Update the start for the next sentence\n start = sentence_end\n sentence_start = i + 1\n\n return output_data" } ]
import os import logging import json import re import time import numpy as np import re import argparse from tqdm import tqdm from youdub.tts_xttsv2 import TTS_Clone, audio_process_folder from youdub.tts_bytedance import TTS_Clone as TTS_Clone_bytedance from youdub.tts_bytedance import audio_process_folder as audio_process_folder_bytedance from youdub.asr_whisperX import VideoProcessor from youdub.video_postprocess import replace_audio_ffmpeg from youdub.translation_unsafe import Translator from youdub.utils import split_text from multiprocessing import Process
9,403
# from youdub.tts_bytedance import TTS_Clone as TTS_Clone_bytedance, audio_process_folder as audio_process_folder_bytedance allowed_chars = '[^a-zA-Z0-9_ .]' def translate_from_folder(folder, translator: Translator, original_fname): with open(os.path.join(folder, 'en.json'), mode='r', encoding='utf-8') as f: transcript = json.load(f) _transcript = [sentence['text'] for sentence in transcript if sentence['text']] result = [''] while len(result) != len(_transcript): result, summary = translator.translate(_transcript, original_fname) for i, sentence in enumerate(result): transcript[i]['text'] = sentence transcript = split_text(transcript) # 使用whisperX后,会自动分句,所以不再需要手动分句。同时避免了将`“你好。”`分为`“你好。`和`”`的情况 with open(os.path.join(folder, 'zh.json'), 'w', encoding='utf-8') as f: json.dump(transcript, f, ensure_ascii=False, indent=4) with open(os.path.join(folder, 'summary.txt'), 'w', encoding='utf-8') as f: f.write(summary) # def main(input_folder, output_folder, diarize=False): def main(): parser = argparse.ArgumentParser(description='Process some videos.') parser.add_argument('--input_folders', type=str, nargs='+', required=True, help='The list of input folders containing the videos') parser.add_argument('--output_folders', type=str, nargs='+', required=True, help='The list of output folders where the processed videos will be stored') parser.add_argument('--vocal_only_folders', type=str, nargs='+', default=[], help='The list of input folders containing the videos that only need vocal for the final result.') parser.add_argument('--diarize', action='store_true', help='Enable diarization') args = parser.parse_args() if len(args.input_folders) != len(args.output_folders): raise ValueError( "The number of input folders must match the number of output folders.") print('='*50) print('Initializing...') if args.diarize: print('Diarization enabled.') print('='*50) diarize = args.diarize processor = VideoProcessor(diarize=diarize) translator = Translator() tts = TTS_Clone() tts_bytedance = TTS_Clone_bytedance() for input_folder, output_folder in zip(args.input_folders, args.output_folders): if input_folder in args.vocal_only_folders: vocal_only = True print(f'Vocal only mode enabled for {input_folder}.') else: vocal_only = False if not os.path.exists(os.path.join(input_folder, '0_finished')): os.makedirs(os.path.join(input_folder, '0_finished')) if not os.path.exists(output_folder): os.makedirs(output_folder) if not os.path.exists(os.path.join(output_folder, '0_to_upload')): os.makedirs(os.path.join(output_folder, '0_to_upload')) if not os.path.exists(os.path.join(output_folder, '0_finished')): os.makedirs(os.path.join(output_folder, '0_finished')) print('='*50) print( f'Video processing started for {input_folder} to {output_folder}.') print('='*50) logging.info('Processing folder...') files = os.listdir(input_folder) t = tqdm(files, desc="Processing files") video_lists = [] for file in t: print('='*50) t.set_description(f"Processing {file}") print('='*50) if file.endswith('.mp4') or file.endswith('.mkv') or file.endswith('.avi') or file.endswith('.flv'): original_fname = file[:-4] new_filename = re.sub(r'[^a-zA-Z0-9_. ]', '', file) new_filename = re.sub(r'\s+', ' ', new_filename) new_filename = new_filename.strip() os.rename(os.path.join(input_folder, file), os.path.join(input_folder, new_filename)) file = new_filename video_lists.append(file) input_path = os.path.join(input_folder, file) output_path = os.path.join(output_folder, file[:-4]).strip() if not os.path.exists(output_path): os.makedirs(output_path) speaker_to_voice_type = processor.process_video( input_path, output_path) else: continue if not os.path.exists(os.path.join(output_path, 'zh.json')): translate_from_folder(output_path, translator, original_fname) if len(speaker_to_voice_type) == 1: print('Only one speaker detected. Using TTS.') audio_process_folder_bytedance( output_path, tts_bytedance, speaker_to_voice_type, vocal_only=vocal_only) else: print('Multiple speakers detected. Using XTTSv2.')
# from youdub.tts_bytedance import TTS_Clone as TTS_Clone_bytedance, audio_process_folder as audio_process_folder_bytedance allowed_chars = '[^a-zA-Z0-9_ .]' def translate_from_folder(folder, translator: Translator, original_fname): with open(os.path.join(folder, 'en.json'), mode='r', encoding='utf-8') as f: transcript = json.load(f) _transcript = [sentence['text'] for sentence in transcript if sentence['text']] result = [''] while len(result) != len(_transcript): result, summary = translator.translate(_transcript, original_fname) for i, sentence in enumerate(result): transcript[i]['text'] = sentence transcript = split_text(transcript) # 使用whisperX后,会自动分句,所以不再需要手动分句。同时避免了将`“你好。”`分为`“你好。`和`”`的情况 with open(os.path.join(folder, 'zh.json'), 'w', encoding='utf-8') as f: json.dump(transcript, f, ensure_ascii=False, indent=4) with open(os.path.join(folder, 'summary.txt'), 'w', encoding='utf-8') as f: f.write(summary) # def main(input_folder, output_folder, diarize=False): def main(): parser = argparse.ArgumentParser(description='Process some videos.') parser.add_argument('--input_folders', type=str, nargs='+', required=True, help='The list of input folders containing the videos') parser.add_argument('--output_folders', type=str, nargs='+', required=True, help='The list of output folders where the processed videos will be stored') parser.add_argument('--vocal_only_folders', type=str, nargs='+', default=[], help='The list of input folders containing the videos that only need vocal for the final result.') parser.add_argument('--diarize', action='store_true', help='Enable diarization') args = parser.parse_args() if len(args.input_folders) != len(args.output_folders): raise ValueError( "The number of input folders must match the number of output folders.") print('='*50) print('Initializing...') if args.diarize: print('Diarization enabled.') print('='*50) diarize = args.diarize processor = VideoProcessor(diarize=diarize) translator = Translator() tts = TTS_Clone() tts_bytedance = TTS_Clone_bytedance() for input_folder, output_folder in zip(args.input_folders, args.output_folders): if input_folder in args.vocal_only_folders: vocal_only = True print(f'Vocal only mode enabled for {input_folder}.') else: vocal_only = False if not os.path.exists(os.path.join(input_folder, '0_finished')): os.makedirs(os.path.join(input_folder, '0_finished')) if not os.path.exists(output_folder): os.makedirs(output_folder) if not os.path.exists(os.path.join(output_folder, '0_to_upload')): os.makedirs(os.path.join(output_folder, '0_to_upload')) if not os.path.exists(os.path.join(output_folder, '0_finished')): os.makedirs(os.path.join(output_folder, '0_finished')) print('='*50) print( f'Video processing started for {input_folder} to {output_folder}.') print('='*50) logging.info('Processing folder...') files = os.listdir(input_folder) t = tqdm(files, desc="Processing files") video_lists = [] for file in t: print('='*50) t.set_description(f"Processing {file}") print('='*50) if file.endswith('.mp4') or file.endswith('.mkv') or file.endswith('.avi') or file.endswith('.flv'): original_fname = file[:-4] new_filename = re.sub(r'[^a-zA-Z0-9_. ]', '', file) new_filename = re.sub(r'\s+', ' ', new_filename) new_filename = new_filename.strip() os.rename(os.path.join(input_folder, file), os.path.join(input_folder, new_filename)) file = new_filename video_lists.append(file) input_path = os.path.join(input_folder, file) output_path = os.path.join(output_folder, file[:-4]).strip() if not os.path.exists(output_path): os.makedirs(output_path) speaker_to_voice_type = processor.process_video( input_path, output_path) else: continue if not os.path.exists(os.path.join(output_path, 'zh.json')): translate_from_folder(output_path, translator, original_fname) if len(speaker_to_voice_type) == 1: print('Only one speaker detected. Using TTS.') audio_process_folder_bytedance( output_path, tts_bytedance, speaker_to_voice_type, vocal_only=vocal_only) else: print('Multiple speakers detected. Using XTTSv2.')
audio_process_folder(
3
2023-11-02 08:21:31+00:00
12k
BrianPugh/cyclopts
tests/test_group_extractors.py
[ { "identifier": "App", "path": "cyclopts/core.py", "snippet": "class App:\n _name: Optional[Tuple[str, ...]] = field(default=None, alias=\"name\", converter=optional_to_tuple_converter)\n\n _help: Optional[str] = field(default=None, alias=\"help\")\n\n usage: Optional[str] = field(default=None)\n\n # Everything below must be kw_only\n\n default_command: Optional[Callable] = field(default=None, converter=_validate_default_command, kw_only=True)\n default_parameter: Optional[Parameter] = field(default=None, kw_only=True)\n\n version: Union[None, str, Callable] = field(factory=_default_version, kw_only=True)\n version_flags: Tuple[str, ...] = field(\n default=[\"--version\"],\n on_setattr=attrs.setters.frozen,\n converter=to_tuple_converter,\n kw_only=True,\n )\n\n show: bool = field(default=True, kw_only=True)\n\n help_flags: Tuple[str, ...] = field(\n default=[\"--help\", \"-h\"],\n on_setattr=attrs.setters.frozen,\n converter=to_tuple_converter,\n kw_only=True,\n )\n\n # This can ONLY ever be Tuple[Union[Group, str], ...] due to converter.\n # The other types is to make mypy happy for Cyclopts users.\n group: Union[Group, str, Tuple[Union[Group, str], ...]] = field(\n default=None, converter=to_tuple_converter, kw_only=True\n )\n\n group_arguments: Group = field(\n default=None,\n converter=GroupConverter(Group.create_default_arguments()),\n kw_only=True,\n )\n group_parameters: Group = field(\n default=None,\n converter=GroupConverter(Group.create_default_parameters()),\n kw_only=True,\n )\n group_commands: Group = field(\n default=None,\n converter=GroupConverter(Group.create_default_commands()),\n kw_only=True,\n )\n\n converter: Optional[Callable] = field(default=None, kw_only=True)\n validator: List[Callable] = field(default=None, converter=to_list_converter, kw_only=True)\n\n ######################\n # Private Attributes #\n ######################\n # Maps CLI-name of a command to a function handle.\n _commands: Dict[str, \"App\"] = field(init=False, factory=dict)\n\n _parents: List[\"App\"] = field(init=False, factory=list)\n\n _meta: \"App\" = field(init=False, default=None)\n _meta_parent: \"App\" = field(init=False, default=None)\n\n def __attrs_post_init__(self):\n if self.help_flags:\n self.command(\n self.help_print,\n name=self.help_flags,\n help_flags=[],\n version_flags=[],\n help=\"Display this message and exit.\",\n )\n if self.version_flags:\n self.command(\n self.version_print,\n name=self.version_flags,\n help_flags=[],\n version_flags=[],\n help=\"Display application version.\",\n )\n\n ###########\n # Methods #\n ###########\n\n @property\n def name(self) -> Tuple[str, ...]:\n \"\"\"Application name(s). Dynamically derived if not previously set.\"\"\"\n if self._name:\n return self._name\n elif self.default_command is None:\n name = Path(sys.argv[0]).name\n if name == \"__main__.py\":\n name = _get_root_module_name()\n return (name,)\n else:\n return (_format_name(self.default_command.__name__),)\n\n @property\n def help(self) -> str:\n if self._help is not None:\n return self._help\n elif self.default_command is None:\n # Try and fallback to a meta-app docstring.\n if self._meta is None:\n return \"\"\n else:\n return self.meta.help\n elif self.default_command.__doc__ is None:\n return \"\"\n else:\n return self.default_command.__doc__\n\n @help.setter\n def help(self, value):\n self._help = value\n\n def version_print(self) -> None:\n \"\"\"Print the application version.\"\"\"\n print(self.version() if callable(self.version) else self.version)\n\n def __getitem__(self, key: str) -> \"App\":\n \"\"\"Get the subapp from a command string.\n\n All commands get registered to Cyclopts as subapps.\n The actual function handler is at ``app[key].default_command``.\n \"\"\"\n if self._meta:\n with suppress(KeyError):\n return self.meta[key]\n return self._commands[key]\n\n def __contains__(self, k: str) -> bool:\n if k in self._commands:\n return True\n if self._meta_parent:\n return k in self._meta_parent\n return False\n\n @property\n def meta(self) -> \"App\":\n if self._meta is None:\n self._meta = type(self)(\n group_commands=copy(self.group_commands),\n group_arguments=copy(self.group_arguments),\n group_parameters=copy(self.group_parameters),\n )\n self._meta._meta_parent = self\n return self._meta\n\n def _parse_command_chain(self, tokens):\n command_chain = []\n app = self\n apps = [app]\n unused_tokens = tokens\n\n command_mapping = _combined_meta_command_mapping(app)\n\n for i, token in enumerate(tokens):\n if token in self.help_flags:\n break\n try:\n app = command_mapping[token]\n apps.append(app)\n unused_tokens = tokens[i + 1 :]\n except KeyError:\n break\n command_chain.append(token)\n command_mapping = _combined_meta_command_mapping(app)\n\n return command_chain, apps, unused_tokens\n\n def command(\n self,\n obj: Optional[Callable] = None,\n name: Union[None, str, Iterable[str]] = None,\n **kwargs,\n ) -> Callable:\n \"\"\"Decorator to register a function as a CLI command.\n\n Parameters\n ----------\n obj: Optional[Callable]\n Function or :class:`App` to be registered as a command.\n name: Union[None, str, Iterable[str]]\n Name(s) to register the ``obj`` to.\n If not provided, defaults to:\n\n * If registering an :class:`App`, then the app's name.\n * If registering a function, then the function's name.\n `**kwargs`\n Any argument that :class:`App` can take.\n \"\"\"\n if obj is None: # Called ``@app.command(...)``\n return partial(self.command, name=name, **kwargs)\n\n if isinstance(obj, App):\n app = obj\n\n if app._name is None and name is None:\n raise ValueError(\"Sub-app MUST have a name specified.\")\n\n if kwargs:\n raise ValueError(\"Cannot supplied additional configuration when registering a sub-App.\")\n else:\n validate_command(obj)\n kwargs.setdefault(\"help_flags\", [])\n kwargs.setdefault(\"version_flags\", [])\n if \"group_commands\" not in kwargs:\n kwargs[\"group_commands\"] = copy(self.group_commands)\n if \"group_parameters\" not in kwargs:\n kwargs[\"group_parameters\"] = copy(self.group_parameters)\n if \"group_arguments\" not in kwargs:\n kwargs[\"group_arguments\"] = copy(self.group_arguments)\n app = App(default_command=obj, **kwargs)\n # app.name is handled below\n\n if name is None:\n name = app.name\n else:\n app._name = name\n\n for n in to_tuple_converter(name):\n if n in self:\n raise CommandCollisionError(f'Command \"{n}\" already registered.')\n\n # Warning: app._name may not align with command name\n self._commands[n] = app\n\n app._parents.append(self)\n\n return obj\n\n def default(\n self,\n obj: Optional[Callable] = None,\n *,\n converter=None,\n validator=None,\n ):\n \"\"\"Decorator to register a function as the default action handler.\"\"\"\n if obj is None: # Called ``@app.default_command(...)``\n return partial(self.default, converter=converter, validator=validator)\n\n if isinstance(obj, App): # Registering a sub-App\n raise TypeError(\"Cannot register a sub-App to default.\")\n\n if self.default_command is not None:\n raise CommandCollisionError(f\"Default command previously set to {self.default_command}.\")\n\n validate_command(obj)\n self.default_command = obj\n if converter:\n self.converter = converter\n if validator:\n self.validator = validator\n return obj\n\n def parse_known_args(\n self,\n tokens: Union[None, str, Iterable[str]] = None,\n *,\n console: Optional[Console] = None,\n ) -> Tuple[Callable, inspect.BoundArguments, List[str]]:\n \"\"\"Interpret arguments into a function, :class:`~inspect.BoundArguments`, and any remaining unknown tokens.\n\n Parameters\n ----------\n tokens: Union[None, str, Iterable[str]]\n Either a string, or a list of strings to launch a command.\n Defaults to ``sys.argv[1:]``\n\n Returns\n -------\n command: Callable\n Bare function to execute.\n\n bound: inspect.BoundArguments\n Bound arguments for ``command``.\n\n unused_tokens: List[str]\n Any remaining CLI tokens that didn't get parsed for ``command``.\n \"\"\"\n tokens = normalize_tokens(tokens)\n\n command_chain, apps, unused_tokens = self._parse_command_chain(tokens)\n command_app = apps[-1]\n\n try:\n parent_app = apps[-2]\n except IndexError:\n parent_app = None\n\n try:\n if command_app.default_command:\n command = command_app.default_command\n resolved_command = ResolvedCommand(\n command,\n _resolve_default_parameter(apps),\n command_app.group_arguments,\n command_app.group_parameters,\n parse_docstring=False,\n )\n # We want the resolved group that ``app`` belongs to.\n if parent_app is None:\n command_groups = []\n else:\n command_groups = _get_command_groups(parent_app, command_app)\n\n bound, unused_tokens = create_bound_arguments(resolved_command, unused_tokens)\n try:\n if command_app.converter:\n bound.arguments = command_app.converter(**bound.arguments)\n for command_group in command_groups:\n if command_group.converter:\n bound.arguments = command_group.converter(**bound.arguments)\n for validator in command_app.validator:\n validator(**bound.arguments)\n for command_group in command_groups:\n for validator in command_group.validator:\n validator(**bound.arguments)\n except (AssertionError, ValueError, TypeError) as e:\n new_exception = ValidationError(value=e.args[0])\n raise new_exception from e\n\n return command, bound, unused_tokens\n else:\n if unused_tokens:\n raise InvalidCommandError(unused_tokens=unused_tokens)\n else:\n # Running the application with no arguments and no registered\n # ``default_command`` will default to ``help_print``.\n command = self.help_print\n bound = inspect.signature(command).bind(tokens=tokens, console=console)\n return command, bound, []\n except CycloptsError as e:\n e.app = command_app\n if command_chain:\n e.command_chain = command_chain\n raise\n\n raise NotImplementedError(\"Should never get here.\")\n\n def parse_args(\n self,\n tokens: Union[None, str, Iterable[str]] = None,\n *,\n console: Optional[Console] = None,\n print_error: bool = True,\n exit_on_error: bool = True,\n verbose: bool = False,\n ) -> Tuple[Callable, inspect.BoundArguments]:\n \"\"\"Interpret arguments into a function and :class:`~inspect.BoundArguments`.\n\n **Does** handle special flags like \"version\" or \"help\".\n\n Raises\n ------\n UnusedCliTokensError\n If any tokens remain after parsing.\n\n Parameters\n ----------\n tokens: Union[None, str, Iterable[str]]\n Either a string, or a list of strings to launch a command.\n Defaults to ``sys.argv[1:]``.\n print_error: bool\n Print a rich-formatted error on error.\n Defaults to ``True``.\n exit_on_error: bool\n If there is an error parsing the CLI tokens invoke ``sys.exit(1)``.\n Otherwise, continue to raise the exception.\n Defaults to ``True``.\n verbose: bool\n Populate exception strings with more information intended for developers.\n Defaults to ``False``.\n\n Returns\n -------\n command: Callable\n Function associated with command action.\n\n bound: inspect.BoundArguments\n Parsed and converted ``args`` and ``kwargs`` to be used when calling ``command``.\n \"\"\"\n tokens = normalize_tokens(tokens)\n\n meta_parent = self\n\n try:\n # Special flags (help/version) get bubbled up to the root app.\n # The root ``help_print`` will then traverse the meta app linked list.\n\n # The Help Flag is allowed to be anywhere in the token stream.\n help_flag_index = None\n for help_flag in self.help_flags:\n try:\n help_flag_index = tokens.index(help_flag)\n break\n except ValueError:\n pass\n\n if help_flag_index is not None:\n tokens.pop(help_flag_index)\n command = self.help_print\n while meta_parent := meta_parent._meta_parent:\n command = meta_parent.help_print\n bound = inspect.signature(command).bind(tokens, console=console)\n unused_tokens = []\n elif any(flag in tokens for flag in self.version_flags):\n # Version\n command = self.version_print\n while meta_parent := meta_parent._meta_parent:\n command = meta_parent.version_print\n bound = inspect.signature(command).bind()\n unused_tokens = []\n else:\n # Normal parsing\n command, bound, unused_tokens = self.parse_known_args(tokens, console=console)\n if unused_tokens:\n raise UnusedCliTokensError(\n target=command,\n unused_tokens=unused_tokens,\n )\n except CycloptsError as e:\n e.verbose = verbose\n e.root_input_tokens = tokens\n if print_error:\n if console is None:\n console = Console()\n console.print(format_cyclopts_error(e))\n\n if exit_on_error:\n sys.exit(1)\n else:\n raise\n\n return command, bound\n\n def __call__(\n self,\n tokens: Union[None, str, Iterable[str]] = None,\n *,\n console: Optional[Console] = None,\n print_error: bool = True,\n exit_on_error: bool = True,\n verbose: bool = False,\n ):\n \"\"\"Interprets and executes a command.\n\n Parameters\n ----------\n tokens : Union[None, str, Iterable[str]]\n Either a string, or a list of strings to launch a command.\n Defaults to ``sys.argv[1:]``.\n print_error: bool\n Print a rich-formatted error on error.\n Defaults to ``True``.\n exit_on_error: bool\n If there is an error parsing the CLI tokens invoke ``sys.exit(1)``.\n Otherwise, continue to raise the exception.\n Defaults to ``True``.\n verbose: bool\n Populate exception strings with more information intended for developers.\n Defaults to ``False``.\n\n Returns\n -------\n return_value: Any\n The value the parsed command handler returns.\n \"\"\"\n tokens = normalize_tokens(tokens)\n command, bound = self.parse_args(\n tokens,\n console=console,\n print_error=print_error,\n exit_on_error=exit_on_error,\n verbose=verbose,\n )\n try:\n return command(*bound.args, **bound.kwargs)\n except Exception as e:\n if PydanticValidationError is not None and isinstance(e, PydanticValidationError):\n if print_error:\n if console is None:\n console = Console()\n console.print(format_cyclopts_error(e))\n\n if exit_on_error:\n sys.exit(1)\n raise\n\n def help_print(\n self,\n tokens: Union[None, str, Iterable[str]] = None,\n *,\n console: Optional[Console] = None,\n ) -> None:\n \"\"\"Print the help page.\n\n Parameters\n ----------\n tokens: Union[None, str, Iterable[str]]\n Tokens to interpret for traversing the application command structure.\n If not provided, defaults to ``sys.argv``.\n \"\"\"\n tokens = normalize_tokens(tokens)\n\n if console is None:\n console = Console()\n\n command_chain, apps, _ = self._parse_command_chain(tokens)\n executing_app = apps[-1]\n\n # Print the:\n # my-app command COMMAND [ARGS] [OPTIONS]\n if executing_app.usage is None:\n console.print(format_usage(self, command_chain))\n elif executing_app.usage: # i.e. skip empty-string.\n console.print(executing_app.usage + \"\\n\")\n\n # Print the App/Command's Doc String.\n console.print(format_doc(self, executing_app))\n\n def walk_apps():\n # Iterates from deepest to shallowest meta-apps\n meta_list = [] # shallowest to deepest\n meta_list.append(executing_app)\n meta = executing_app\n while (meta := meta._meta) and meta.default_command:\n meta_list.append(meta)\n yield from reversed(meta_list)\n\n panels: Dict[str, Tuple[Group, HelpPanel]] = {}\n # Handle commands first; there's an off chance they may be \"upgraded\"\n # to an argument/parameter panel.\n for subapp in walk_apps():\n # Handle Commands\n for group, elements in groups_from_app(subapp):\n if not group.show:\n continue\n\n try:\n _, command_panel = panels[group.name]\n except KeyError:\n command_panel = HelpPanel(\n format=\"command\",\n title=group.name,\n )\n panels[group.name] = (group, command_panel)\n\n if group.help:\n if command_panel.description:\n command_panel.description += \"\\n\" + group.help\n else:\n command_panel.description = group.help\n\n command_panel.entries.extend(format_command_entries(elements))\n\n # Handle Arguments/Parameters\n for subapp in walk_apps():\n if subapp.default_command:\n command = ResolvedCommand(\n subapp.default_command,\n subapp.default_parameter,\n subapp.group_arguments,\n subapp.group_parameters,\n )\n for group, iparams in command.groups_iparams:\n if not group.show:\n continue\n cparams = [command.iparam_to_cparam[x] for x in iparams]\n try:\n _, existing_panel = panels[group.name]\n except KeyError:\n existing_panel = None\n new_panel = create_parameter_help_panel(group, iparams, cparams)\n\n if existing_panel:\n # An imperfect merging process\n existing_panel.format = \"parameter\"\n existing_panel.entries = new_panel.entries + existing_panel.entries # Commands go last\n if new_panel.description:\n if existing_panel.description:\n existing_panel.description += \"\\n\" + new_panel.description\n else:\n existing_panel.description = new_panel.description\n else:\n panels[group.name] = (group, new_panel)\n\n groups = [x[0] for x in panels.values()]\n help_panels = [x[1] for x in panels.values()]\n\n for help_panel in sort_groups(groups, help_panels)[1]:\n help_panel.remove_duplicates()\n if help_panel.format == \"command\":\n # don't sort format == \"parameter\" because order may matter there!\n help_panel.sort()\n console.print(help_panel)\n\n def interactive_shell(\n self,\n prompt: str = \"$ \",\n quit: Union[None, str, Iterable[str]] = None,\n dispatcher: Optional[Dispatcher] = None,\n **kwargs,\n ) -> None:\n \"\"\"Create a blocking, interactive shell.\n\n All registered commands can be executed in the shell.\n\n Parameters\n ----------\n prompt: str\n Shell prompt. Defaults to ``\"$ \"``.\n quit: Union[str, Iterable[str]]\n String or list of strings that will cause the shell to exit and this method to return.\n Defaults to ``[\"q\", \"quit\"]``.\n dispatcher: Optional[Dispatcher]\n Optional function that subsequently invokes the command.\n The ``dispatcher`` function must have signature:\n\n .. code-block:: python\n\n def dispatcher(command: Callable, bound: inspect.BoundArguments) -> Any:\n return command(*bound.args, **bound.kwargs)\n\n The above is the default dispatcher implementation.\n `**kwargs`\n Get passed along to :meth:`parse_args`.\n \"\"\"\n if os.name == \"posix\":\n print(\"Interactive shell. Press Ctrl-D to exit.\")\n else: # Windows\n print(\"Interactive shell. Press Ctrl-Z followed by Enter to exit.\")\n\n if quit is None:\n quit = [\"q\", \"quit\"]\n if isinstance(quit, str):\n quit = [quit]\n\n def default_dispatcher(command, bound):\n return command(*bound.args, **bound.kwargs)\n\n if dispatcher is None:\n dispatcher = default_dispatcher\n\n kwargs.setdefault(\"exit_on_error\", False)\n\n while True:\n try:\n user_input = input(prompt)\n except EOFError:\n break\n\n tokens = normalize_tokens(user_input)\n if not tokens:\n continue\n if tokens[0] in quit:\n break\n\n try:\n command, bound = self.parse_args(tokens, **kwargs)\n dispatcher(command, bound)\n except CycloptsError:\n # Upstream ``parse_args`` already printed the error\n pass\n except Exception:\n print(traceback.format_exc())\n\n def __repr__(self):\n \"\"\"Only shows non-default values.\"\"\"\n non_defaults = {}\n for a in self.__attrs_attrs__: # pyright: ignore[reportGeneralTypeIssues]\n if not a.init:\n continue\n v = getattr(self, a.name)\n # Compare types first because of some weird attribute issues.\n if type(v) != type(a.default) or v != a.default: # noqa: E721\n non_defaults[a.alias] = v\n\n signature = \", \".join(f\"{k}={v!r}\" for k, v in non_defaults.items())\n return f\"{type(self).__name__}({signature})\"" }, { "identifier": "Group", "path": "cyclopts/group.py", "snippet": "class Group:\n name: str = \"\"\n\n help: str = \"\"\n\n # All below parameters are keyword-only\n _show: Optional[bool] = field(default=None, alias=\"show\", kw_only=True)\n\n _sort_key: Any = field(\n default=None,\n alias=\"sort_key\",\n converter=lambda x: NO_USER_SORT_KEY if x is None else x,\n )\n\n converter: Optional[Callable] = field(default=None, kw_only=True)\n\n validator: Tuple[Callable, ...] = field(\n default=None,\n converter=lambda x: cast(Tuple[Callable, ...], to_tuple_converter(x)),\n kw_only=True,\n )\n\n default_parameter: Optional[\"Parameter\"] = field(\n default=None,\n validator=_group_default_parameter_must_be_none,\n kw_only=True,\n )\n\n def __str__(self):\n return self.name\n\n @property\n def show(self):\n return bool(self.name) if self._show is None else self._show\n\n @show.setter\n def show(self, value):\n self._show = value\n\n @property\n def sort_key(self):\n return None if self._sort_key is NO_USER_SORT_KEY else self._sort_key\n\n @sort_key.setter\n def sort_key(self, value):\n self._sort_key = value\n\n @classmethod\n def create_default_arguments(cls):\n return cls(\"Arguments\")\n\n @classmethod\n def create_default_parameters(cls):\n return cls(\"Parameters\")\n\n @classmethod\n def create_default_commands(cls):\n return cls(\"Commands\")\n\n @classmethod\n def create_ordered(cls, *args, sort_key=None, **kwargs):\n \"\"\"Create a group with a globally incremented :attr:`~Group.sort_key`.\n\n Used to create a group that will be displayed **after** a previously declared :meth:`Group.create_ordered` group on the help-page.\n\n If a :attr:`~Group.sort_key` is provided, it is **prepended** to the globally incremented counter value (i.e. has priority during sorting).\n \"\"\"\n count = next(_sort_key_counter)\n if sort_key is None:\n sort_key = (NO_USER_SORT_KEY, count)\n elif is_iterable(sort_key):\n sort_key = (tuple(sort_key), count)\n else:\n sort_key = (sort_key, count)\n return cls(*args, sort_key=sort_key, **kwargs)" }, { "identifier": "Parameter", "path": "cyclopts/parameter.py", "snippet": "class Parameter:\n \"\"\"Cyclopts configuration for individual function parameters.\"\"\"\n\n # All documentation has been moved to ``docs/api.rst`` for greater control with attrs.\n\n name: Tuple[str, ...] = field(\n default=None,\n converter=lambda x: cast(Tuple[str, ...], to_tuple_converter(x)),\n )\n\n converter: Callable = field(default=None, converter=attrs.converters.default_if_none(convert))\n\n validator: Tuple[Callable, ...] = field(\n default=(),\n converter=lambda x: cast(Tuple[Callable, ...], to_tuple_converter(x)),\n )\n\n negative: Union[None, Tuple[str, ...]] = field(default=None, converter=optional_to_tuple_converter)\n\n group: Tuple[Union[Group, str], ...] = field(default=None, converter=to_tuple_converter, hash=False)\n\n parse: bool = field(default=None, converter=attrs.converters.default_if_none(True))\n\n _show: Optional[bool] = field(default=None, alias=\"show\")\n\n show_default: Optional[bool] = field(default=None)\n\n show_choices: bool = field(default=None, converter=attrs.converters.default_if_none(True))\n\n help: Optional[str] = field(default=None)\n\n show_env_var: bool = field(default=None, converter=attrs.converters.default_if_none(True))\n\n env_var: Tuple[str, ...] = field(\n default=None,\n converter=lambda x: cast(Tuple[str, ...], to_tuple_converter(x)),\n )\n\n negative_bool: Tuple[str, ...] = field(\n default=None,\n converter=_negative_converter((\"--no-\",)),\n validator=_double_hyphen_validator,\n )\n\n negative_iterable: Tuple[str, ...] = field(\n default=None,\n converter=_negative_converter((\"--empty-\",)),\n validator=_double_hyphen_validator,\n )\n\n required: Optional[bool] = field(default=None)\n\n allow_leading_hyphen: bool = field(default=False)\n\n # Populated by the record_attrs_init_args decorator.\n _provided_args: Tuple[str] = field(default=(), init=False, eq=False)\n\n @property\n def show(self):\n return self._show if self._show is not None else self.parse\n\n def get_negatives(self, type_, *names: str) -> Tuple[str, ...]:\n type_ = get_origin(type_) or type_\n\n if self.negative is not None:\n return self.negative\n elif type_ not in (bool, list, set):\n return ()\n\n out = []\n for name in names:\n if name.startswith(\"--\"):\n name = name[2:]\n elif name.startswith(\"-\"):\n # Do not support automatic negation for short flags.\n continue\n else:\n # Should never reach here.\n raise NotImplementedError(\"All parameters should have started with '-' or '--'.\")\n\n negative_prefixes = self.negative_bool if type_ is bool else self.negative_iterable\n\n for negative_prefix in negative_prefixes:\n out.append(f\"{negative_prefix}{name}\")\n return tuple(out)\n\n def __repr__(self):\n \"\"\"Only shows non-default values.\"\"\"\n content = \", \".join(\n [\n f\"{a.alias}={getattr(self, a.name)!r}\"\n for a in self.__attrs_attrs__ # pyright: ignore[reportGeneralTypeIssues]\n if a.alias in self._provided_args\n ]\n )\n return f\"{type(self).__name__}({content})\"\n\n @classmethod\n def combine(cls, *parameters: Optional[\"Parameter\"]) -> \"Parameter\":\n \"\"\"Returns a new Parameter with values of ``parameters``.\n\n Parameters\n ----------\n `*parameters`: Optional[Parameter]\n Parameters who's attributes override ``self`` attributes.\n Ordered from least-to-highest attribute priority.\n \"\"\"\n kwargs = {}\n for parameter in parameters:\n if parameter is None:\n continue\n for a in parameter.__attrs_attrs__: # pyright: ignore[reportGeneralTypeIssues]\n if a.init and a.alias in parameter._provided_args:\n kwargs[a.alias] = getattr(parameter, a.name)\n\n return cls(**kwargs)\n\n @classmethod\n def default(cls) -> \"Parameter\":\n \"\"\"Create a Parameter with all Cyclopts-default values.\n\n This is different than just :class:`Parameter` because the default\n values will be recorded and override all upstream parameter values.\n \"\"\"\n return cls(\n **{a.alias: a.default for a in cls.__attrs_attrs__ if a.init} # pyright: ignore[reportGeneralTypeIssues]\n )" }, { "identifier": "groups_from_app", "path": "cyclopts/group_extractors.py", "snippet": "def groups_from_app(app: \"App\") -> List[Tuple[Group, List[\"App\"]]]:\n \"\"\"Extract Group/App association.\"\"\"\n group_mapping: List[Tuple[Group, List[\"App\"]]] = [\n (app.group_commands, []),\n ]\n\n subapps = [subapp for subapp in app._commands.values() if subapp.show]\n\n # 2 iterations need to be performed:\n # 1. Extract out all Group objects as they may have additional configuration.\n # 2. Assign/Create Groups out of the strings, as necessary.\n for subapp in subapps:\n assert isinstance(subapp.group, tuple)\n for group in subapp.group:\n if isinstance(group, Group):\n for mapping in group_mapping:\n if mapping[0] is group:\n break\n elif mapping[0].name == group.name:\n raise ValueError(f'Command Group \"{group.name}\" already exists.')\n else:\n group_mapping.append((group, []))\n\n for subapp in subapps:\n if subapp.group:\n assert isinstance(subapp.group, tuple)\n for group in subapp.group:\n _create_or_append(group_mapping, group, subapp)\n else:\n _create_or_append(group_mapping, app.group_commands, subapp)\n\n # Remove the empty groups\n group_mapping = [x for x in group_mapping if x[1]]\n\n # Sort alphabetically by name\n group_mapping.sort(key=lambda x: x[0].name)\n\n return group_mapping" } ]
import pytest from cyclopts import App, Group, Parameter from cyclopts.group_extractors import groups_from_app
7,710
def test_groups_annotated_invalid_recursive_definition(): """A default_parameter isn't allowed to have a group set, as it would introduce a paradox.""" default_parameter = Parameter(group="Drink") # pyright: ignore[reportGeneralTypeIssues] with pytest.raises(ValueError):
def test_groups_annotated_invalid_recursive_definition(): """A default_parameter isn't allowed to have a group set, as it would introduce a paradox.""" default_parameter = Parameter(group="Drink") # pyright: ignore[reportGeneralTypeIssues] with pytest.raises(ValueError):
Group("Food", default_parameter=default_parameter)
1
2023-11-03 02:24:25+00:00
12k
RoboFlamingo/RoboFlamingo
robot_flamingo/data/data.py
[ { "identifier": "RealDatasetHDF5", "path": "robot_flamingo/data/real_dataset_hdf5.py", "snippet": "class RealDatasetHDF5(Dataset):\n def __init__(self,\n data_dir,\n image_fn,\n text_fn,\n seq_len=12,\n mode='train',\n action_mode='ee_rel_pose_local',\n use_data_augmentation=True,\n text_aug=False):\n \"\"\"Constructor.\"\"\"\n super().__init__()\n self.dataset_dir = os.path.join(data_dir, mode)\n self.text_fn = text_fn\n self.image_fn = image_fn\n self.text_aug = text_aug\n with open('enrich_lang_real.json', 'r') as f:\n self.enrich_lang_dict = json.load(f)\n self.seq_len = seq_len\n self.mode = mode\n self.action_mode = action_mode\n self.use_data_augmentation = use_data_augmentation\n\n if self.action_mode == 'ee_rel_pose':\n self.action_dim = 7 # ee xyz (3) + ee euler (3) + gripper (1)\n self.state_dim = 7\n self.ACTION_POS_SCALE = 50\n self.ACTION_ROT_SCALE = 33\n elif self.action_mode == 'ee_rel_pose_local':\n self.action_dim = 7 # ee xyz (3) + ee euler (3) + gripper (1)\n self.state_dim = 7\n self.ACTION_POS_SCALE = 50\n self.ACTION_ROT_SCALE = 33\n else:\n raise NotImplementedError()\n print(f\"ACTION_POS_SCALE: {self.ACTION_POS_SCALE}\")\n print(f\"ACTION_ROT_SCALE: {self.ACTION_ROT_SCALE}\")\n \n # the input to this function is a numpy array\n self.input_size = (224, 224)\n self.clip_mean = (0.485, 0.456, 0.406)\n self.clip_std = (0.229, 0.224, 0.225)\n\n if self.use_data_augmentation:\n self.static_rgb_preprocess_train = T.Compose([\n T.ColorJitter(\n brightness=0.05,\n # contrast=0.05,\n # hue=0.02\n ),\n # CubeRandomShiftsAug(pad=10), # static rgb (300x400)\n RandomShiftsAug(pad=10), # static rgb (300x400)\n T.Resize(self.input_size, interpolation=Image.BICUBIC),\n T.Normalize(self.clip_mean, self.clip_std),\n PatchMask()])\n self.hand_rgb_preprocess_train = T.Compose([\n # CubeRandomShiftsAug(pad=20), # hand rgb (480x640)\n RandomShiftsAug(pad=20), # hand rgb (480x640)\n T.Resize(self.input_size, interpolation=Image.BICUBIC),\n T.Normalize(self.clip_mean, self.clip_std),\n PatchMask()])\n else:\n self.static_rgb_preprocess_train = T.Compose([\n T.ColorJitter(\n brightness=0.05,\n # contrast=0.05,\n # hue=0.02\n ),\n T.Resize(self.input_size, interpolation=Image.BICUBIC),\n T.Normalize(self.clip_mean, self.clip_std)])\n self.hand_rgb_preprocess_train = T.Compose([\n T.ColorJitter(\n brightness=0.05,\n # contrast=0.05,\n # hue=0.02\n ),\n T.Resize(self.input_size, interpolation=Image.BICUBIC),\n T.Normalize(self.clip_mean, self.clip_std)])\n self.static_rgb_preprocess_val = T.Compose([\n T.Resize(self.input_size, interpolation=Image.BICUBIC),\n T.Normalize(self.clip_mean, self.clip_std)])\n self.hand_rgb_preprocess_val = T.Compose([\n T.Resize(self.input_size, interpolation=Image.BICUBIC),\n T.Normalize(self.clip_mean, self.clip_std)])\n\n self.offset_rotm = gamma2rotm(OFFSET_EULER_Z)\n self.offset_pos = np.array(OFFSET_POS)\n self.hdf5 = h5py.File(os.path.join(self.dataset_dir, \"data.hdf5\"))\n self._initialize()\n print(f'{len(self)} trajectories in total')\n\n def _initialize(self):\n \"\"\"Generate the sequence index pair.\"\"\"\n with open(os.path.join(self.dataset_dir, \"meta.json\"), \"r\") as f:\n self.meta = json.load(f)\n n_trajs = self.meta[\"num_trajectories\"]\n # n_trajs = 1000\n print(f\"number of trajectories: {n_trajs}\")\n \n self.seq_tuple = []\n self.robot_states = dict()\n all_texts = []\n for traj_idx in tqdm(range(n_trajs)):\n text = self.meta[str(traj_idx)][0]\n all_texts.append(text)\n n_frames = self.meta[str(traj_idx)][1]\n video_name = self.meta[str(traj_idx)][2]\n hdf5_st = self.meta[str(traj_idx)][3]\n hdf5_ed = self.meta[str(traj_idx)][4]\n assert n_frames == hdf5_ed - hdf5_st\n if (hdf5_ed - hdf5_st) < self.seq_len:\n continue\n if video_name in EXCLUDING_VIDEOS:\n continue\n\n # load robot status and xform with offset\n traj_robot_status = self.hdf5[\"robot_status\"][\"robot_status_0\"][hdf5_st:hdf5_ed]\n traj_xyz = traj_robot_status[:, 10:13] # (n, 3)\n traj_xyz = traj_xyz.transpose() # (3, n)\n traj_xyz = (self.offset_rotm @ traj_xyz).transpose() + self.offset_pos\n traj_quat = traj_robot_status[:, 13:17]\n traj_rpy = np.zeros((n_frames, 3))\n for i in range(n_frames):\n traj_rpy[i] = rotm2euler(self.offset_rotm @ quat2rotm(traj_quat[i]))\n traj_state = np.zeros((n_frames, 7)).astype(np.float32)\n traj_state[:, :3] = traj_xyz\n traj_state[:, 3:6] = traj_rpy\n vive_control = self.hdf5[\"vive_control\"][\"vive_control_0\"][hdf5_st:hdf5_ed]\n vive_gripper_cmd = vive_control[:, 1]\n gripper_pos = traj_robot_status[:, 30]\n gripper_states = get_binary_gripper_state_from_gripper_pos_vive_cmd(gripper_pos, vive_gripper_cmd)\n traj_state[:, -1] = gripper_states\n assert not (traj_idx in self.robot_states)\n self.robot_states[traj_idx] = traj_state\n\n # create sequence: the last frame will not be in the sequence\n for st in range(0, n_frames - self.seq_len):\n ed = st + self.seq_len\n self.seq_tuple.append([traj_idx, text, st, ed, hdf5_st])\n \n all_texts = list(set(all_texts))\n print(all_texts)\n # exit(0)\n \n def __len__(self):\n return len(self.seq_tuple)\n\n def __getitem__(self, index):\n curr_tuple = self.seq_tuple[index]\n traj_idx = curr_tuple[0]\n text = curr_tuple[1]\n \n # if (\"on the plate\" in text) and (\"pick\" in text):\n # text = text.replace(\" on the plate\", \"\")\n # if (\"on the desk\" in text) and (\"pick\" in text):\n # text = text.replace(\" on the desk\", \"\")\n\n st = curr_tuple[2]\n ed = curr_tuple[3]\n hdf5_st = curr_tuple[4]\n\n static_rgbs = []\n hand_rgbs = []\n actions = []\n states = []\n\n tlen = ed - st\n assert tlen == self.seq_len\n\n for i in range(st, ed):\n # action\n if self.action_mode == 'ee_rel_pose':\n # delta_xyz + detla_rpy + gripper in absolute world coordinates\n # xyz are scaled up by 50 rpy are scaled up by 20 and both are clipped to [-1, 1]\n xyz_action = (self.robot_states[traj_idx][i+1, :3] - self.robot_states[traj_idx][i, :3]) \n rpy_action = (self.robot_states[traj_idx][i+1, 3:6] - self.robot_states[traj_idx][i, 3:6])\n gripper_action = self.robot_states[traj_idx][i+1, 6]\n elif self.action_mode == 'ee_rel_pose_local':\n # a_trans = rotm_t.T @ (trans_t+1 - trans_t)\n # a_rot = rotm_t.T @ rotm_t+1\n curr_xyz = self.robot_states[traj_idx][i, :3]\n curr_rpy = self.robot_states[traj_idx][i, 3:6]\n curr_rotm = euler2rotm(curr_rpy)\n next_xyz = self.robot_states[traj_idx][i+1, :3]\n next_rpy = self.robot_states[traj_idx][i+1, 3:6]\n next_rotm = euler2rotm(next_rpy)\n xyz_action = np.dot(curr_rotm.T, next_xyz - curr_xyz)\n rel_rotm = curr_rotm.T @ next_rotm\n rpy_action = rotm2euler(rel_rotm)\n for rpy_i in range(len(rpy_action)):\n while rpy_action[rpy_i] > np.pi:\n rpy_action[rpy_i] -= (2 * np.pi)\n while rpy_action[rpy_i] < -np.pi:\n rpy_action[rpy_i] += (2 * np.pi)\n gripper_action = self.robot_states[traj_idx][i+1, 6]\n else:\n raise NotImplementedError()\n action = np.zeros(7)\n action[:3] = xyz_action * self.ACTION_POS_SCALE\n action[3:6] = rpy_action * self.ACTION_ROT_SCALE\n action[6] = gripper_action\n actions.append(action)\n \n # state\n states.append(self.robot_states[traj_idx][i])\n\n # static rgb\n static_rgb = self.hdf5[\"rgb\"][\"rgb_1\"][hdf5_st+i]\n static_rgb = static_rgb[190:700, 250:1050] # mode 1\n static_rgb = Image.fromarray(static_rgb)\n static_rgb = T.ToTensor()(static_rgb.convert(\"RGB\"))\n static_rgbs.append(static_rgb)\n\n # hand rgb\n hand_rgb = self.hdf5[\"rgb\"][\"rgb_0\"][hdf5_st+i]\n hand_rgb = Image.fromarray(hand_rgb)\n hand_rgb = T.ToTensor()(hand_rgb.convert(\"RGB\"))\n hand_rgbs.append(hand_rgb)\n \n # Images\n static_rgbs = torch.stack(static_rgbs, dim=0)\n hand_rgbs = torch.stack(hand_rgbs, dim=0)\n if self.mode == 'train':\n static_rgbs = self.static_rgb_preprocess_train(static_rgbs)\n hand_rgbs = self.hand_rgb_preprocess_train(hand_rgbs)\n else:\n static_rgbs = self.static_rgb_preprocess_val(static_rgbs)\n hand_rgbs = self.hand_rgb_preprocess_val(hand_rgbs)\n\n # State\n states = np.array(states)\n states = torch.from_numpy(states)\n\n # Action\n actions = np.array(actions) # (len, act_dim)\n actions = torch.from_numpy(actions)\n\n # RGB\n _, C, H, W = static_rgbs.shape\n padded_static_rgbs = torch.zeros((self.seq_len, C, H, W)).float() # (len, C, H, W)\n padded_hand_rgbs = torch.zeros((self.seq_len, C, H, W)).float() # (len, C, H, W)\n padded_static_rgbs[:tlen] = static_rgbs\n padded_hand_rgbs[:tlen] = hand_rgbs\n rgb_data = padded_static_rgbs\n hand_rgb_data = padded_hand_rgbs\n\n # State\n padded_states = torch.zeros(self.seq_len, self.state_dim).float() # (len, state_dim)\n padded_states[:tlen] = states\n state_data = padded_states\n\n # Action\n padded_actions = torch.zeros(self.seq_len, self.action_dim).float() # (len, action_dim)\n padded_actions[:tlen] = actions\n action_data = padded_actions\n\n # Timestep\n timestep = np.zeros(self.seq_len, dtype=np.int32) # (len)\n timestep[:tlen] = np.arange(st, ed)\n timestep_data = torch.from_numpy(timestep).long()\n\n # Attention mask (should be all 1 for full dataset)\n attention_mask = np.ones(self.seq_len, dtype=np.int32) # (len)\n attention_mask[tlen:] = 0.0\n assert np.sum(attention_mask) == self.seq_len\n attention_mask_data = torch.from_numpy(attention_mask).long()\n\n data = dict()\n data['rgb'] = rgb_data # (len, C, H, W)\n data['hand_rgb'] = hand_rgb_data # (len, C, H, W)\n if self.text_aug:\n if text in self.enrich_lang_dict:\n if random.random() > 0.1: # preserve the original text in 0.1 prob\n text = random.choice(self.enrich_lang_dict[text])\n data['text'] = text\n data['timestep'] = timestep_data # (len,)\n data['state'] = state_data # (len, state_dim)\n data['action'] = action_data # (len, action_dim)\n data['attention_mask'] = attention_mask_data # (len,)\n\n return data\n \n def visualize_action(self):\n \"\"\"Visualize the distribution of actions.\"\"\"\n with open(os.path.join(self.dataset_dir, \"meta.json\"), \"r\") as f:\n self.meta = json.load(f)\n n_trajs = self.meta[\"num_trajectories\"]\n xyz_actions = []\n rpy_actions = []\n xyz_states = []\n rpy_states = []\n for traj_idx in range(n_trajs):\n temp_robot_states = self.robot_states[traj_idx]\n n_frames = self.meta[str(traj_idx)][1]\n for i in range(0, n_frames):\n xyz_states.append(temp_robot_states[i, :3])\n rpy_states.append(temp_robot_states[i, 3:6])\n for i in range(1, n_frames):\n xyz_action = temp_robot_states[i, :3] - temp_robot_states[i-1, :3]\n rpy_action = temp_robot_states[i, 3:6] - temp_robot_states[i-1, 3:6]\n xyz_actions.append(xyz_action)\n rpy_actions.append(rpy_action)\n print(f\"number of actions: {len(xyz_actions)}\")\n xyz_actions = np.array(xyz_actions)\n rpy_actions = np.array(rpy_actions)\n xyz_states = np.array(xyz_states)\n rpy_states = np.array(rpy_states)\n a_labels = ['a_x', 'a_y', 'a_z']\n for i in range(len(a_labels)):\n plt.figure()\n plt.hist(xyz_actions[:, i], bins=512, label=a_labels[i], alpha=0.5)\n plt.legend(loc='upper right')\n plt.savefig(f\"./data_stats/{a_labels[i]}.png\")\n a_labels = ['a_roll', 'a_pitch', 'a_yaw']\n for i in range(len(a_labels)):\n plt.figure()\n plt.hist(rpy_actions[:, i], bins=512, label=a_labels[i], alpha=0.5)\n plt.legend(loc='upper right')\n plt.savefig(f\"./data_stats/{a_labels[i]}.png\")\n s_labels = ['s_x', 's_y', 's_z']\n for i in range(len(s_labels)):\n plt.figure()\n plt.hist(xyz_states[:, i], bins=512, label=s_labels[i], alpha=0.5)\n plt.legend(loc='upper right')\n plt.savefig(f\"./data_stats/{s_labels[i]}.png\")\n s_labels = ['s_roll', 's_pitch', 's_yaw']\n for i in range(len(s_labels)):\n plt.figure()\n plt.hist(rpy_states[:, i], bins=512, label=s_labels[i], alpha=0.5)\n plt.legend(loc='upper right')\n plt.savefig(f\"./data_stats/{s_labels[i]}.png\")\n\n abs_xyz_actions = np.abs(xyz_actions)\n abs_rpy_actions = np.abs(rpy_actions)\n x_action_max = np.max(abs_xyz_actions[:, 0])\n y_action_max = np.max(abs_xyz_actions[:, 1])\n z_action_max = np.max(abs_xyz_actions[:, 2])\n x_action_min = np.min(abs_xyz_actions[:, 0])\n y_action_min = np.min(abs_xyz_actions[:, 1])\n z_action_min = np.min(abs_xyz_actions[:, 2])\n x_action_mean = np.mean(abs_xyz_actions[:, 0])\n y_action_mean = np.mean(abs_xyz_actions[:, 1])\n z_action_mean = np.mean(abs_xyz_actions[:, 2])\n\n print(f\"xyz_action max: {x_action_max:.3f}, {y_action_max:.3f}, {z_action_max:.3f}\")\n print(f\"xyz_action min: {x_action_min:.3f}, {y_action_min:.3f}, {z_action_min:.3f}\")\n print(f\"xyz_action mean: {x_action_mean:.3f}, {y_action_mean:.3f}, {z_action_mean:.3f}\")\n\n er_action_max = np.max(abs_rpy_actions[:, 0])\n ep_action_max = np.max(abs_rpy_actions[:, 1])\n ey_action_max = np.max(abs_rpy_actions[:, 2])\n er_action_min = np.min(abs_rpy_actions[:, 0])\n ep_action_min = np.min(abs_rpy_actions[:, 1])\n ey_action_min = np.min(abs_rpy_actions[:, 2])\n er_action_mean = np.mean(abs_rpy_actions[:, 0])\n ep_action_mean = np.mean(abs_rpy_actions[:, 1])\n ey_action_mean = np.mean(abs_rpy_actions[:, 2])\n\n print(f\"rpy_action max: {er_action_max:.3f}, {ep_action_max:.3f}, {ey_action_max:.3f}\")\n print(f\"rpy_action min: {er_action_min:.3f}, {ep_action_min:.3f}, {ey_action_min:.3f}\")\n print(f\"rpy_action mean: {er_action_mean:.3f}, {ep_action_mean:.3f}, {ey_action_mean:.3f}\")\n\n def visualize_episode_len(self):\n \"\"\"Visualize the length distribution of episodes.\"\"\"\n pass\n \n def collator(self, sample):\n image_tensors = torch.stack([s['rgb'] for s in sample], dim=0)\n gripper_tensors = torch.stack([s['hand_rgb'] for s in sample], dim=0)\n action_tensors = torch.stack([s['action'] for s in sample], dim=0)\n state_tensors = torch.stack([s['state'] for s in sample], dim=0)\n robot_obs = state_tensors.clone()\n text = [s['text'] for s in sample]\n # print(text)\n text_tensors, attention_mask = self.text_fn(text)\n # print(text_tensors, attention_mask)\n return image_tensors, (text_tensors, attention_mask), action_tensors, gripper_tensors, state_tensors, robot_obs" }, { "identifier": "CaptionDataset", "path": "robot_flamingo/data/vl_dataset.py", "snippet": "class CaptionDataset(Dataset):\n def __init__(\n self,\n image_train_dir_path,\n annotations_path,\n tokenizer=None,\n transforms=None,\n seed=123,\n is_train=True,\n dataset_name='coco',\n image_val_dir_path=None,\n ):\n self.image_train_dir_path = image_train_dir_path\n self.image_val_dir_path = image_val_dir_path\n self.annotations = []\n self.is_train = is_train\n self.dataset_name = dataset_name\n self.seed = seed\n random.seed(self.seed)\n full_annotations = json.load(open(annotations_path))\n self.tokenizer = tokenizer\n self.transforms = transforms\n print(len(full_annotations[\"images\"]), len(full_annotations[\"annotations\"]))\n self.id2path = {}\n self.id2caption = {}\n for i in range(len(full_annotations[\"images\"])):\n self.id2path[full_annotations[\"images\"][i][\"id\"]] = os.path.join(\n self.image_train_dir_path, full_annotations[\"images\"][i][\"file_name\"])\n self.image_ids = list(self.id2path.keys())\n for i in range(len(full_annotations[\"annotations\"])):\n image_id = full_annotations[\"annotations\"][i][\"image_id\"]\n if image_id not in self.id2caption:\n self.id2caption[image_id] = [full_annotations[\"annotations\"][i]['caption']]\n else:\n self.id2caption[image_id].append(full_annotations[\"annotations\"][i]['caption'])\n\n def __len__(self):\n return len(self.image_ids)\n\n def __getitem__(self, idx):\n image = Image.open(self.id2path[self.image_ids[idx]])\n image.load()\n caption = random.choice(self.id2caption[self.image_ids[idx]])\n return {\n \"image\": image,\n \"caption\": caption,\n \"image_id\": self.image_ids[idx]\n }\n \n def get_caption_prompt(self, caption=None):\n return f\"A photo of {caption if caption is not None else ''}\"\n \n def collator(self, samples):\n images = torch.stack([self.transforms(s['image']) for s in samples], dim=0)\n text = [self.get_caption_prompt(s['caption']) for s in samples]\n text_tensors, attention_mask = self.tokenizer(text)\n return images, (text_tensors, attention_mask)" }, { "identifier": "VQADataset", "path": "robot_flamingo/data/vl_dataset.py", "snippet": "class VQADataset(Dataset):\n def __init__(\n self, image_dir_path, question_path, annotations_path, tokenizer=None, transforms=None, seed=123, is_train=True, dataset_name='vqav2'\n ):\n self.questions = json.load(open(question_path, \"r\"))[\"questions\"]\n if annotations_path is not None:\n self.answers = json.load(open(annotations_path, \"r\"))[\"annotations\"]\n else:\n self.answers = None\n self.image_dir_path = image_dir_path\n self.is_train = is_train\n self.dataset_name = dataset_name\n # self.img_coco_split = \"train2014\"\n self.tokenizer = tokenizer\n self.transforms = transforms\n self.seed = seed\n random.seed(self.seed)\n if self.dataset_name in {\"vqav2\", \"ok_vqa\"}:\n self.img_coco_split = self.image_dir_path.strip(\"/\").split(\"/\")[-1]\n assert self.img_coco_split in {\"train2014\", \"val2014\", \"test2015\"}\n\n def __len__(self):\n return len(self.questions)\n\n def get_img_path(self, question):\n if self.dataset_name in {\"vqav2\", \"ok_vqa\"}:\n return os.path.join(\n self.image_dir_path,\n f\"COCO_{self.img_coco_split}_{question['image_id']:012d}.jpg\"\n if self.is_train\n else f\"COCO_{self.img_coco_split}_{question['image_id']:012d}.jpg\",\n )\n elif self.dataset_name == \"vizwiz\":\n return os.path.join(self.image_dir_path, question[\"image_id\"])\n elif self.dataset_name == \"textvqa\":\n return os.path.join(self.image_dir_path, f\"{question['image_id']}.jpg\")\n else:\n raise Exception(f\"Unknown VQA dataset {self.dataset_name}\")\n\n def __getitem__(self, idx):\n question = self.questions[idx]\n img_path = self.get_img_path(question)\n image = Image.open(img_path)\n # image.load()\n results = {\n \"image\": image,\n \"question\": question[\"question\"],\n \"question_id\": question[\"question_id\"],\n }\n if self.answers is not None:\n answers = self.answers[idx]\n results[\"answers\"] = [a[\"answer\"] for a in answers[\"answers\"]]\n return results\n \n def get_vqa_prompt(self, question, answer=None):\n return f\"Question:{question} Short answer:{answer if answer is not None else ''}\"\n \n def get_vqa_ques_prompt(self, question):\n return f\"Question:{question} Short answer:\"\n \n def collator(self, samples):\n images = torch.stack([self.transforms(s['image']) for s in samples], dim=0)\n text = [self.get_vqa_prompt(s['question'], random.choice(s['answers'])) for s in samples]\n text_tensors, attention_mask = self.tokenizer(text)\n B, T = attention_mask.shape\n ques = [self.get_vqa_ques_prompt(s['question']) for s in samples]\n _, ques_mask = self.tokenizer(ques)\n ques_len = ques_mask.sum(dim=1).unsqueeze(-1).expand(B, T)\n answer_mask = torch.ones_like(attention_mask)\n indices = torch.arange(answer_mask.shape[-1]).unsqueeze(0).expand(B, T)\n index_mask = indices < ques_len\n answer_mask.masked_fill_(index_mask, 0)\n answer_mask = answer_mask * attention_mask # both mask for attention and question\n return images, (text_tensors, attention_mask), answer_mask" } ]
import ast import functools import io import json import logging import math import os import random import sys import tarfile import zipfile import braceexpand import torch import torchvision import webdataset as wds import numpy as np import numpy as np import pyhash import torch import horovod.torch as hvd import logging import numpy as np import pyhash import torch import pickle import torch.nn as nn import torch.nn.functional as F import copy from cgitb import text from dataclasses import dataclass from multiprocessing import Value from PIL import Image from torch.utils.data import DataLoader, IterableDataset, get_worker_info, Dataset from torch.utils.data.distributed import DistributedSampler from webdataset.filters import _shuffle from webdataset.tariterators import ( base_plus_ext, tar_file_expander, url_opener, valid_sample, ) from calvin_agent.datasets.utils.episode_utils import ( get_state_info_dict, process_actions, process_depth, process_language, process_rgb, process_state, ) from omegaconf import DictConfig from torch.utils.data import Dataset from robot_flamingo.data.real_dataset_hdf5 import RealDatasetHDF5 from pathlib import Path from typing import Dict, Tuple, Union from calvin_agent.datasets.utils.episode_utils import ( get_state_info_dict, process_actions, process_depth, # process_language, # process_rgb, process_state, ) from omegaconf import DictConfig from torch.utils.data import Dataset from robot_flamingo.data.vl_dataset import CaptionDataset, VQADataset from typing import Any, Dict, List, Tuple, Callable from itertools import chain from calvin_agent.datasets.utils.episode_utils import lookup_naming_pattern
9,128
sampler: DistributedSampler = None shared_epoch: SharedEpoch = None dataset: Dataset = None def set_epoch(self, epoch): if self.shared_epoch is not None: self.shared_epoch.set_value(epoch) if self.sampler is not None and isinstance(self.sampler, DistributedSampler): self.sampler.set_epoch(epoch) def preprocess_image(sample, image_processor): image = [image_processor(s).unsqueeze(0) for s in sample] image = torch.cat(image, dim=0) # apply random horizontal flip and color jitter return image def preprocess_text_calvin(sample, tokenizer): tokenizer.padding_side = "right" sample = [ # (f"{s.strip()}{tokenizer.eos_token}") # for s in sample (f"<image>{s.strip()}<|endofchunk|>{tokenizer.eos_token}") for s in sample ] text = tokenizer( sample, max_length=32, padding="longest", truncation="only_first", return_tensors="pt", ) return text["input_ids"], text["attention_mask"] def preprocess_interleaved(sample, tokenizer, clip_processor, sim_threshold): info = json.loads(sample[0]) tar_file_obj = io.BytesIO(sample[1]) image_tar = tarfile.open(fileobj=tar_file_obj) sentences = info["text_list"] images, image_idxs = [], [] for image_path, sim in zip(info["image_info"], info["similarity_matrix"]): # pick one image per sentence if info["image_info"][image_path]["matched_text_index"] in image_idxs: continue rawbytes = image_tar.extractfile( os.path.join(image_tar.getnames()[0], image_path) ).read() # filter to images >= 10KB if len(rawbytes) // 1000 <= MIN_KB: continue if sim[info["image_info"][image_path]["matched_text_index"]] < sim_threshold: continue image = Image.open(io.BytesIO(rawbytes)).convert("RGB") images.append(image) image_idxs.append(info["image_info"][image_path]["matched_text_index"]) if len(images) == 0: raise ValueError("No images in sample") # filter out images that are exact duplicates images_tensors = preprocess_image(images, clip_processor) keep_ixs = range(min(len(images_tensors), MAX_NUM_IMAGES)) images_tensors = images_tensors[keep_ixs] image_idxs = [image_idxs[ix] for ix in keep_ixs] # pad to 5 images if len(images_tensors) < MAX_NUM_IMAGES: zero_padding = torch.zeros( (MAX_NUM_IMAGES - len(images_tensors), 3, 224, 224), dtype=torch.float ) images_tensors = torch.cat((images_tensors, zero_padding), dim=0) # add in <image> and <eoc> tokens # eoc after sentence = "sentence loss" for ix in image_idxs: sentences[ix] = f"<|endofchunk|><image>{sentences[ix]}" text = " ".join(sentences) text = text.replace("<|endofchunk|>", "", 1) # but remove first eoc # whitespace cleanup text = ( text.replace(" <|endofchunk|>", "<|endofchunk|>") .replace("<image> ", "<image>") .replace(" <image>", "<image>") ) text = f"{text}<|endofchunk|>{tokenizer.eos_token}" tokenizer.padding_side = "right" text_tensor = tokenizer( text, max_length=256, truncation=True, padding="max_length", return_tensors="pt" ) # reject sequences with too few images (after truncation) num_images = torch.count_nonzero( text_tensor["input_ids"] == tokenizer.additional_special_tokens_ids[ tokenizer.additional_special_tokens.index("<image>") ] ) if num_images == 0: raise ValueError("No images in sample") elif ( num_images == 1 and random.random() <= 0.5 ): # 50% chance of keeping single image samples raise ValueError("Only one image in sample") return ( images_tensors, (text_tensor["input_ids"], text_tensor["attention_mask"]), ) def get_coco_dataset(args, image_processor, tokenizer, epoch=0): coco_data_dir = "path/to/coco/train2014" coco_ann = "path/to/coco/annotations/captions_train2014.json" preprocess_text_fn = functools.partial(preprocess_text_calvin, tokenizer=tokenizer)
Image.MAX_IMAGE_PIXELS = 1000000000 MAX_NUM_TOKENS = 256 MAX_NUM_IMAGES = 5 TINY_IMAGE_SIZE_THRESHOLD = 1 N_CHANNELS = 3 INTERLEAVED_IMAGE_SIZE = 224 _SHARD_SHUFFLE_SIZE = 2000 _SHARD_SHUFFLE_INITIAL = 500 _SAMPLE_SHUFFLE_SIZE = 5000 _SAMPLE_SHUFFLE_INITIAL = 1000 MIN_KB = 10 MAX_NUM_IMAGES = 5 try: except ImportError: hvd = None hasher = pyhash.fnv1_32() logger = logging.getLogger(__name__) obs_config = DictConfig( { "rgb_obs": ["rgb_static", "rgb_gripper"], "depth_obs": [], "state_obs": ["robot_obs"], "actions": ["rel_actions"], "language": ["language"], } ) prop_state = DictConfig( { "n_state_obs": 15, "keep_indices": [[0, 15]], "robot_orientation_idx": [3, 6], "normalize": True, "normalize_robot_orientation": True, } ) def get_validation_window_size( idx: int, min_window_size: int, max_window_size: int ) -> int: """ In validation step, use hash function instead of random sampling for consistent window sizes across epochs. Args: idx: Sequence index. min_window_size: Minimum window size. max_window_size: Maximum window size. Returns: Window size computed with hash function. """ window_range = max_window_size - min_window_size + 1 return min_window_size + hasher(str(idx)) % window_range class RandomShiftsAug(nn.Module): def __init__(self, pad): super().__init__() self.pad = pad def forward(self, x): n, c, h, w = x.size() assert h == w padding = tuple([self.pad] * 4) x = F.pad(x, padding, 'replicate') eps = 1.0 / (h + 2 * self.pad) arange = torch.linspace(-1.0 + eps, 1.0 - eps, h + 2 * self.pad, device=x.device, dtype=x.dtype)[:h] arange = arange.unsqueeze(0).repeat(h, 1).unsqueeze(2) base_grid = torch.cat([arange, arange.transpose(1, 0)], dim=2) base_grid = base_grid.unsqueeze(0).repeat(n, 1, 1, 1) shift = torch.randint(0, 2 * self.pad + 1, size=(n, 1, 1, 2), device=x.device, dtype=x.dtype) shift *= 2.0 / (h + 2 * self.pad) grid = base_grid + shift return F.grid_sample(x, grid, padding_mode='zeros', align_corners=False) def forward_traj(self, x): n, t, c, h, w = x.size() x = x.view(n*t, *x.shape[2:]) assert h == w padding = tuple([self.pad] * 4) x = F.pad(x, padding, 'replicate') eps = 1.0 / (h + 2 * self.pad) arange = torch.linspace(-1.0 + eps, 1.0 - eps, h + 2 * self.pad, device=x.device, dtype=x.dtype)[:h] arange = arange.unsqueeze(0).repeat(h, 1).unsqueeze(2) base_grid = torch.cat([arange, arange.transpose(1, 0)], dim=2) base_grid = base_grid.unsqueeze(0).repeat(n, 1, 1, 1) base_grid = base_grid.unsqueeze(1).repeat(1, t, 1, 1, 1) base_grid = base_grid.view(n*t, *base_grid.shape[2:]) shift = torch.randint(1, 2 * self.pad + 1, size=(n*t, 1, 1, 2), device=x.device, dtype=x.dtype) shift *= 2.0 / (h + 2 * self.pad) grid = base_grid + shift x = F.grid_sample(x, grid, padding_mode='zeros', align_corners=False) x = x.view(n, t, *x.shape[1:]) return x class BaseCalvinDataset(Dataset): """ Abstract dataset base class. Args: datasets_dir: Path of folder containing episode files (string must contain 'validation' or 'training'). obs_space: DictConfig of observation space. proprio_state: DictConfig with shape of prioprioceptive state. key: 'vis' or 'lang'. lang_folder: Name of the subdirectory of the dataset containing the language annotations. num_workers: Number of dataloading workers for this dataset. transforms: Dict with pytorch data transforms. batch_size: Batch size. min_window_size: Minimum window length of loaded sequences. max_window_size: Maximum window length of loaded sequences. pad: If True, repeat last frame such that all sequences have length 'max_window_size'. aux_lang_loss_window: How many sliding windows to consider for auxiliary language losses, counted from the end of an annotated language episode. """ def __init__( self, datasets_dir: Path, proprio_state: DictConfig = prop_state, lang_folder: str = "lang_annotations", num_workers: int = 0, key: str = "lang", obs_space: DictConfig = obs_config, transforms: Dict = {}, batch_size: int = 32, window_size: int = 16, min_window_size: int = 16, max_window_size: int = 16, pad: bool = True, aux_lang_loss_window: int = 1, rgb_pad=-1, gripper_pad=-1, traj_cons=False, text_aug=False, dif_ws=False, act_step=1 ): self.observation_space = obs_space self.proprio_state = proprio_state self.transforms = transforms self.with_lang = key == "lang" self.relative_actions = "rel_actions" in self.observation_space["actions"] self.pad = pad self.batch_size = batch_size self.num_workers = num_workers self.window_size = window_size if not dif_ws: self.min_window_size = window_size + act_step - 1 self.max_window_size = window_size + act_step - 1 else: self.min_window_size = min_window_size self.max_window_size = max_window_size self.act_step = act_step # print('ws {}, min_ws {}, max_ws {}'.format(self.window_size, self.max_window_size, self.min_window_size)) self.abs_datasets_dir = datasets_dir self.lang_folder = lang_folder # if self.with_lang else None self.aux_lang_loss_window = aux_lang_loss_window self.traj_cons = traj_cons with open('/mnt/bn/robotics/lxh/robot-flamingo/enrich_lang_annotations.json', 'r') as f: self.enrich_lang = json.load(f) self.text_aug = text_aug self.rgb_pad = rgb_pad if self.rgb_pad != -1: self.rgb_shift = RandomShiftsAug(rgb_pad) self.gripper_pad = gripper_pad if self.gripper_pad != -1: self.gripper_shift = RandomShiftsAug(gripper_pad) assert ( "validation" in self.abs_datasets_dir.as_posix() or "training" in self.abs_datasets_dir.as_posix() ) self.validation = "validation" in self.abs_datasets_dir.as_posix() assert self.abs_datasets_dir.is_dir() logger.info(f"loading dataset at {self.abs_datasets_dir}") logger.info("finished loading dataset") def process_rgb( self, episode: Dict[str, np.ndarray], observation_space: DictConfig, transforms: Dict, seq_idx: int = 0, window_size: int = 0, ) -> Dict[str, Dict[str, torch.Tensor]]: rgb_obs_keys = observation_space["rgb_obs"] seq_rgb_obs_dict = {} for _, rgb_obs_key in enumerate(rgb_obs_keys): rgb_obs = episode[rgb_obs_key] # expand dims for single environment obs if len(rgb_obs.shape) != 4: rgb_obs = np.expand_dims(rgb_obs, axis=0) assert len(rgb_obs.shape) == 4 if window_size == 0 and seq_idx == 0: # single file loader # To Square image seq_rgb_obs_ = torch.from_numpy(rgb_obs).byte() else: # episode loader seq_rgb_obs_ = torch.from_numpy( rgb_obs[seq_idx : seq_idx + window_size] ).byte() if rgb_obs_key in transforms: seq_rgb_obs_ = transforms[rgb_obs_key](seq_rgb_obs_) seq_rgb_obs_dict[rgb_obs_key] = seq_rgb_obs_ # shape: N_rgb_obs x (BxHxWxC) return {"rgb_obs": seq_rgb_obs_dict} def process_language( self, episode: Dict[str, np.ndarray], transforms: Dict, with_lang: bool ): return {"lang": episode["language"]} def __getitem__(self, idx: Union[int, Tuple[int, int]], fixed_seed=False) -> Dict: """ Get sequence of dataset. Args: idx: Index of the sequence. Returns: Loaded sequence. """ if isinstance(idx, int): # When max_ws_size and min_ws_size are equal, avoid unnecessary padding # acts like Constant dataset. Currently, used for language data if self.min_window_size == self.max_window_size: window_size = self.max_window_size elif self.min_window_size < self.max_window_size: window_size = self._get_window_size(idx) else: logger.error( f"min_window_size {self.min_window_size} > max_window_size {self.max_window_size}" ) raise ValueError else: idx, window_size = idx head = False sequence = self._get_sequences(idx, window_size, head=head) if self.pad: pad_size = self._get_pad_size(sequence) sequence = self._pad_sequence(sequence, pad_size, head=head) new_list = [] np_rgb = copy.deepcopy(sequence["rgb_obs"]["rgb_static"].numpy()) for i in range(np_rgb.shape[0]): new_list.append(Image.fromarray(np_rgb[i, :, :, :].astype(np.uint8))) sequence["rgb_obs"]["rgb_static"] = new_list new_list = [] np_gripper = copy.deepcopy(sequence["rgb_obs"]["rgb_gripper"].numpy()) for i in range(np_gripper.shape[0]): new_list.append(Image.fromarray(np_gripper[i, :, :, :].astype(np.uint8))) sequence["rgb_obs"]["rgb_gripper"] = new_list # print(pad_size, len(new_list)) return sequence def _get_sequences(self, idx: int, window_size: int, head: bool=False) -> Dict: """ Load sequence of length window_size. Args: idx: Index of starting frame. window_size: Length of sampled episode. Returns: dict: Dictionary of tensors of loaded sequence with different input modalities and actions. """ episode = self._load_episode(idx, window_size) seq_state_obs = process_state( episode, self.observation_space, self.transforms, self.proprio_state ) seq_rgb_obs = self.process_rgb(episode, self.observation_space, self.transforms) seq_depth_obs = process_depth(episode, self.observation_space, self.transforms) seq_acts = process_actions(episode, self.observation_space, self.transforms) info = get_state_info_dict(episode) seq_lang = self.process_language(episode, self.transforms, self.with_lang) info = self._add_language_info(info, idx) seq_dict = { **seq_state_obs, **seq_rgb_obs, **seq_depth_obs, **seq_acts, **info, **seq_lang, } # type:ignore seq_dict["idx"] = idx # type:ignore return seq_dict def _load_episode(self, idx: int, window_size: int) -> Dict[str, np.ndarray]: raise NotImplementedError def _get_window_size(self, idx: int) -> int: """ Sample a window size taking into account the episode limits. Args: idx: Index of the sequence to load. Returns: Window size. """ window_diff = self.max_window_size - self.min_window_size if len(self.episode_lookup) <= idx + window_diff: # last episode max_window = self.min_window_size + len(self.episode_lookup) - idx - 1 elif ( self.episode_lookup[idx + window_diff] != self.episode_lookup[idx] + window_diff ): # less than max_episode steps until next episode steps_to_next_episode = int( np.nonzero( self.episode_lookup[idx : idx + window_diff + 1] - (self.episode_lookup[idx] + np.arange(window_diff + 1)) )[0][0] ) max_window = min( self.max_window_size, (self.min_window_size + steps_to_next_episode - 1) ) else: max_window = self.max_window_size if self.validation: # in validation step, repeat the window sizes for each epoch. return get_validation_window_size(idx, self.min_window_size, max_window) else: return np.random.randint(self.min_window_size, max_window + 1) def __len__(self) -> int: """ Returns: Size of the dataset. """ return len(self.episode_lookup) def _get_pad_size(self, sequence: Dict) -> int: """ Determine how many frames to append to end of the sequence Args: sequence: Loaded sequence. Returns: Number of frames to pad. """ return self.max_window_size - len(sequence["actions"]) def _pad_sequence(self, seq: Dict, pad_size: int, head: bool=False) -> Dict: """ Pad a sequence by repeating the last frame. Args: seq: Sequence to pad. pad_size: Number of frames to pad. Returns: Padded sequence. """ seq.update({"robot_obs": self._pad_with_repetition(seq["robot_obs"], pad_size)}) seq.update( { "rgb_obs": { k: self._pad_with_repetition(v, pad_size, head) for k, v in seq["rgb_obs"].items() } } ) seq.update( { "depth_obs": { k: self._pad_with_repetition(v, pad_size, head) for k, v in seq["depth_obs"].items() } } ) # todo: find better way of distinguishing rk and play action spaces if not self.relative_actions: if head: seq_acts = self._pad_with_zeros(seq["actions"], pad_size, head) else: # repeat action for world coordinates action space seq.update({"actions": self._pad_with_repetition(seq["actions"], pad_size, head)}) else: # for relative actions zero pad all but the last action dims and repeat last action dim (gripper action) if head: seq_acts = self._pad_with_zeros(seq["actions"], pad_size, head) else: seq_acts = torch.cat( [ self._pad_with_zeros(seq["actions"][..., :-1], pad_size, head), self._pad_with_repetition(seq["actions"][..., -1:], pad_size, head), ], dim=-1, ) seq.update({"actions": seq_acts}) seq.update( { "state_info": { k: self._pad_with_repetition(v, pad_size, head) for k, v in seq["state_info"].items() } } ) return seq @staticmethod def _pad_with_repetition(input_tensor: torch.Tensor, pad_size: int, head: bool = False) -> torch.Tensor: """ Pad a sequence Tensor by repeating last element pad_size times. Args: input_tensor: Sequence to pad. pad_size: Number of frames to pad. Returns: Padded Tensor. """ if head: last_repeated = torch.repeat_interleave( torch.unsqueeze(input_tensor[0], dim=0), repeats=pad_size, dim=0 ) padded = torch.vstack((last_repeated, input_tensor)) else: last_repeated = torch.repeat_interleave( torch.unsqueeze(input_tensor[-1], dim=0), repeats=pad_size, dim=0 ) padded = torch.vstack((input_tensor, last_repeated)) return padded @staticmethod def _pad_with_zeros(input_tensor: torch.Tensor, pad_size: int, head: bool = False) -> torch.Tensor: """ Pad a Tensor with zeros. Args: input_tensor: Sequence to pad. pad_size: Number of frames to pad. Returns: Padded Tensor. """ zeros_repeated = torch.repeat_interleave( torch.unsqueeze(torch.zeros(input_tensor.shape[-1]), dim=0), repeats=pad_size, dim=0, ) if head: padded = torch.vstack((zeros_repeated, input_tensor)) else: padded = torch.vstack((input_tensor, zeros_repeated)) return padded def _add_language_info(self, info: Dict, idx: int) -> Dict: """ If dataset contains language, add info to determine if this sequence will be used for the auxiliary losses. Args: info: Info dictionary. idx: Sequence index. Returns: Info dictionary with updated information. """ if not self.with_lang: return info use_for_aux_lang_loss = ( idx + self.aux_lang_loss_window >= len(self.lang_lookup) or self.lang_lookup[idx] < self.lang_lookup[idx + self.aux_lang_loss_window] ) info["use_for_aux_lang_loss"] = use_for_aux_lang_loss return info class DebugDataset(Dataset): def __init__(self, **kwargs: Any,): super().__init__() def __len__(self) -> int: return 10000 def __getitem__(self, index): window_size = 8 rgb = torch.randn(window_size, 3, 200, 200) gripper = torch.randn(window_size, 84, 84) state = torch.randn(window_size, 15) class DiskCalvinDataset(BaseCalvinDataset): """ Dataset that loads episodes as individual files from disk. Args: skip_frames: Skip this amount of windows for language dataset. save_format: File format in datasets_dir (pkl or npz). pretrain: Set to True when pretraining. """ def __init__( self, image_fn: Callable, text_fn: Callable, *args: Any, skip_frames: int = 1, save_format: str = "npz", pretrain: bool = False, partial_data=False, **kwargs: Any, ): super().__init__(*args, **kwargs) self.save_format = save_format self.image_fn = image_fn self.text_fn = text_fn self.partial_data = partial_data if self.save_format == "pkl": self.load_file = load_pkl elif self.save_format == "npz": self.load_file = load_npz else: raise NotImplementedError self.pretrain = pretrain self.skip_frames = skip_frames if self.with_lang: ( self.episode_lookup, self.lang_lookup, self.lang_ann, self.lang_task ) = self._build_file_indices_lang(self.abs_datasets_dir) else: self.episode_lookup = self._build_file_indices(self.abs_datasets_dir) self.naming_pattern, self.n_digits = lookup_naming_pattern( self.abs_datasets_dir, self.save_format ) def _get_episode_name(self, file_idx: int) -> Path: """ Convert file idx to file path. Args: file_idx: index of starting frame. Returns: Path to file. """ return Path( f"{self.naming_pattern[0]}{file_idx:0{self.n_digits}d}{self.naming_pattern[1]}" ) def _load_episode(self, idx: int, window_size: int) -> Dict[str, np.ndarray]: """ Load consecutive frames saved as individual files on disk and combine to episode dict. Args: idx: Index of first frame. window_size: Length of sampled episode. Returns: episode: Dict of numpy arrays containing the episode where keys are the names of modalities. """ start_idx = self.episode_lookup[idx] end_idx = start_idx + window_size keys = list(chain(*self.observation_space.values())) keys.remove("language") keys.append("scene_obs") episodes = [ self.load_file(self._get_episode_name(file_idx)) for file_idx in range(start_idx, end_idx) ] episode = {key: np.stack([ep[key] for ep in episodes]) for key in keys} if self.with_lang: episode["language"] = self.lang_ann[self.lang_lookup[idx]] if self.text_aug: task = self.lang_task[self.lang_lookup[idx]] enrich_lang = random.choice(self.enrich_lang[task] + [episode["language"]]) episode["language"] = enrich_lang return episode def _build_file_indices_lang( self, abs_datasets_dir: Path ): """ This method builds the mapping from index to file_name used for loading the episodes of the language dataset. Args: abs_datasets_dir: Absolute path of the directory containing the dataset. Returns: episode_lookup: Mapping from training example index to episode (file) index. lang_lookup: Mapping from training example to index of language instruction. lang_ann: Language embeddings. """ assert abs_datasets_dir.is_dir() episode_lookup = [] try: print( "trying to load lang data from: ", abs_datasets_dir / self.lang_folder / "auto_lang_ann.npy", ) lang_data = np.load( abs_datasets_dir / self.lang_folder / "auto_lang_ann.npy", allow_pickle=True, ).item() except Exception: print( "Exception, trying to load lang data from: ", abs_datasets_dir / "auto_lang_ann.npy", ) lang_data = np.load( abs_datasets_dir / "auto_lang_ann.npy", allow_pickle=True ).item() ep_start_end_ids = lang_data["info"]["indx"] # each of them are 64 lang_ann = lang_data["language"]["ann"] # length total number of annotations lang_task = lang_data["language"]["task"] lang_lookup = [] partial_st_ed_list = load_partial_traj_data() for i, (start_idx, end_idx) in enumerate(ep_start_end_ids): if self.partial_data: if (start_idx, end_idx) not in partial_st_ed_list: continue if self.pretrain: start_idx = max( start_idx, end_idx + 1 - self.min_window_size - self.aux_lang_loss_window, ) assert end_idx >= self.max_window_size cnt = 0 for idx in range(start_idx, end_idx + 1 - self.min_window_size): if cnt % self.skip_frames == 0: lang_lookup.append(i) episode_lookup.append(idx) cnt += 1 return np.array(episode_lookup), lang_lookup, lang_ann, lang_task def _build_file_indices(self, abs_datasets_dir: Path) -> np.ndarray: """ This method builds the mapping from index to file_name used for loading the episodes of the non language dataset. Args: abs_datasets_dir: Absolute path of the directory containing the dataset. Returns: episode_lookup: Mapping from training example index to episode (file) index. """ assert abs_datasets_dir.is_dir() episode_lookup = [] ep_start_end_ids = np.load(abs_datasets_dir / "ep_start_end_ids.npy") logger.info( f'Found "ep_start_end_ids.npy" with {len(ep_start_end_ids)} episodes.' ) for start_idx, end_idx in ep_start_end_ids: assert end_idx > self.max_window_size for idx in range(start_idx, end_idx + 1 - self.min_window_size): episode_lookup.append(idx) return np.array(episode_lookup) def collater(self, sample): action_tensors = torch.from_numpy(np.array([np.stack(s["actions"]) for s in sample])) state_tensors = torch.from_numpy(np.array([np.stack(s["robot_obs"]) for s in sample])) image_tensors = torch.stack([self.image_fn(s["rgb_obs"]["rgb_static"]) for s in sample]) gripper_tensors = torch.stack([self.image_fn(s["rgb_obs"]["rgb_gripper"]) for s in sample]) stacked_language = [s["lang"] for s in sample] text_tensors, attention_mask = self.text_fn(stacked_language) if self.rgb_pad != -1: bs, seq_len = image_tensors.shape[:2] if self.traj_cons: image_tensors = self.rgb_shift.forward_traj(image_tensors) else: image_tensors = image_tensors.view(bs*seq_len, *image_tensors.shape[2:]) image_tensors = self.rgb_shift(image_tensors) image_tensors = image_tensors.view(bs, seq_len, *image_tensors.shape[1:]) if self.gripper_pad != -1: bs, seq_len = gripper_tensors.shape[:2] if self.traj_cons: gripper_tensors = self.gripper_shift.forward_traj(gripper_tensors) else: gripper_tensors = gripper_tensors.view(bs * seq_len, *gripper_tensors.shape[2:]) gripper_tensors = self.gripper_shift(gripper_tensors) gripper_tensors = gripper_tensors.view(bs, seq_len, *gripper_tensors.shape[1:]) robot_obs = torch.zeros(1) if self.act_step != 1: actions = torch.zeros((action_tensors.shape[0], self.window_size, self.act_step, action_tensors.shape[-1])) for b in range(action_tensors.shape[0]): for ix in range(self.window_size): actions[b, ix] = action_tensors[b, ix:ix+self.act_step] robot_obs = torch.zeros((action_tensors.shape[0], self.window_size, self.act_step, state_tensors.shape[-1])) for b in range(action_tensors.shape[0]): for ix in range(self.window_size): robot_obs[b, ix] = state_tensors[b, ix:ix+self.act_step] robot_obs = torch.cat([robot_obs[..., :6], robot_obs[..., [-1]]], dim=-1) action_tensors = actions image_tensors = image_tensors[:, :-(self.act_step-1)] gripper_tensors = gripper_tensors[:, :-(self.act_step-1)] state_tensors = state_tensors[:, :-(self.act_step-1)] return image_tensors, (text_tensors, attention_mask), action_tensors, gripper_tensors, state_tensors, robot_obs class CalvinDataset(Dataset): """Naive implementation of dataset to store calvin debug dataset, may be changed to WDS for the full dataset """ def __init__(self, image_fn, text_fn, dataset_path, is_train=True) -> None: super().__init__() self.dataset_path = dataset_path self.image_fn = image_fn self.text_fn = text_fn tag = "training" if is_train else "validation" self.file_prefix = f"{self.dataset_path}/{tag}" self.anns = np.load( f"{self.file_prefix}/lang_annotations/auto_lang_ann.npy", allow_pickle=True ).item() def __len__(self): return len(self.anns["info"]["indx"]) def __getitem__(self, index): task = self.anns["language"]["task"][index] text = self.anns["language"]["ann"][index] st, ed = self.anns["info"]["indx"][index] # CJ: randomly sample a datapoint in the episode frame = random.randint(st, ed) frame = np.load( f"{self.file_prefix}/episode_{frame:07d}.npz" ) # , allow_pickle=True (lazy load) rgb_static = Image.fromarray(frame["rgb_static"]) rgb_gripper = Image.fromarray(frame["rgb_gripper"]) actions = np.array(frame["rel_actions"]) actions[..., 6:] = (actions[..., 6:] + 1) // 2 return rgb_static, text, actions def collater(self, sample): images = [s[0] for s in sample] texts = [s[1] for s in sample] actions = [s[2] for s in sample] image_tensors = self.image_fn(images) text_tensors = self.text_fn(texts) action_tensors = torch.FloatTensor(np.stack(actions)) return image_tensors, text_tensors, action_tensors def load_pkl(filename: Path) -> Dict[str, np.ndarray]: with open(filename, "rb") as f: return pickle.load(f) def load_npz(filename: Path) -> Dict[str, np.ndarray]: return np.load(filename.as_posix()) class SharedEpoch: def __init__(self, epoch: int = 0): self.shared_epoch = Value("i", epoch) def set_value(self, epoch): self.shared_epoch.value = epoch def get_value(self): return self.shared_epoch.value @dataclass class DataInfo: dataloader: DataLoader sampler: DistributedSampler = None shared_epoch: SharedEpoch = None dataset: Dataset = None def set_epoch(self, epoch): if self.shared_epoch is not None: self.shared_epoch.set_value(epoch) if self.sampler is not None and isinstance(self.sampler, DistributedSampler): self.sampler.set_epoch(epoch) def preprocess_image(sample, image_processor): image = [image_processor(s).unsqueeze(0) for s in sample] image = torch.cat(image, dim=0) # apply random horizontal flip and color jitter return image def preprocess_text_calvin(sample, tokenizer): tokenizer.padding_side = "right" sample = [ # (f"{s.strip()}{tokenizer.eos_token}") # for s in sample (f"<image>{s.strip()}<|endofchunk|>{tokenizer.eos_token}") for s in sample ] text = tokenizer( sample, max_length=32, padding="longest", truncation="only_first", return_tensors="pt", ) return text["input_ids"], text["attention_mask"] def preprocess_interleaved(sample, tokenizer, clip_processor, sim_threshold): info = json.loads(sample[0]) tar_file_obj = io.BytesIO(sample[1]) image_tar = tarfile.open(fileobj=tar_file_obj) sentences = info["text_list"] images, image_idxs = [], [] for image_path, sim in zip(info["image_info"], info["similarity_matrix"]): # pick one image per sentence if info["image_info"][image_path]["matched_text_index"] in image_idxs: continue rawbytes = image_tar.extractfile( os.path.join(image_tar.getnames()[0], image_path) ).read() # filter to images >= 10KB if len(rawbytes) // 1000 <= MIN_KB: continue if sim[info["image_info"][image_path]["matched_text_index"]] < sim_threshold: continue image = Image.open(io.BytesIO(rawbytes)).convert("RGB") images.append(image) image_idxs.append(info["image_info"][image_path]["matched_text_index"]) if len(images) == 0: raise ValueError("No images in sample") # filter out images that are exact duplicates images_tensors = preprocess_image(images, clip_processor) keep_ixs = range(min(len(images_tensors), MAX_NUM_IMAGES)) images_tensors = images_tensors[keep_ixs] image_idxs = [image_idxs[ix] for ix in keep_ixs] # pad to 5 images if len(images_tensors) < MAX_NUM_IMAGES: zero_padding = torch.zeros( (MAX_NUM_IMAGES - len(images_tensors), 3, 224, 224), dtype=torch.float ) images_tensors = torch.cat((images_tensors, zero_padding), dim=0) # add in <image> and <eoc> tokens # eoc after sentence = "sentence loss" for ix in image_idxs: sentences[ix] = f"<|endofchunk|><image>{sentences[ix]}" text = " ".join(sentences) text = text.replace("<|endofchunk|>", "", 1) # but remove first eoc # whitespace cleanup text = ( text.replace(" <|endofchunk|>", "<|endofchunk|>") .replace("<image> ", "<image>") .replace(" <image>", "<image>") ) text = f"{text}<|endofchunk|>{tokenizer.eos_token}" tokenizer.padding_side = "right" text_tensor = tokenizer( text, max_length=256, truncation=True, padding="max_length", return_tensors="pt" ) # reject sequences with too few images (after truncation) num_images = torch.count_nonzero( text_tensor["input_ids"] == tokenizer.additional_special_tokens_ids[ tokenizer.additional_special_tokens.index("<image>") ] ) if num_images == 0: raise ValueError("No images in sample") elif ( num_images == 1 and random.random() <= 0.5 ): # 50% chance of keeping single image samples raise ValueError("Only one image in sample") return ( images_tensors, (text_tensor["input_ids"], text_tensor["attention_mask"]), ) def get_coco_dataset(args, image_processor, tokenizer, epoch=0): coco_data_dir = "path/to/coco/train2014" coco_ann = "path/to/coco/annotations/captions_train2014.json" preprocess_text_fn = functools.partial(preprocess_text_calvin, tokenizer=tokenizer)
coco_dataset = CaptionDataset(coco_data_dir, coco_ann, preprocess_text_fn, image_processor)
1
2023-11-02 01:36:23+00:00
12k
microsoft/monitors4codegen
src/monitors4codegen/multilspy/lsp_protocol_handler/server.py
[ { "identifier": "LspNotification", "path": "src/monitors4codegen/multilspy/lsp_protocol_handler/lsp_requests.py", "snippet": "class LspNotification:\n def __init__(self, send_notification):\n self.send_notification = send_notification\n\n def did_change_workspace_folders(\n self, params: lsp_types.DidChangeWorkspaceFoldersParams\n ) -> None:\n \"\"\"The `workspace/didChangeWorkspaceFolders` notification is sent from the client to the server when the workspace\n folder configuration changes.\"\"\"\n return self.send_notification(\"workspace/didChangeWorkspaceFolders\", params)\n\n def cancel_work_done_progress(\n self, params: lsp_types.WorkDoneProgressCancelParams\n ) -> None:\n \"\"\"The `window/workDoneProgress/cancel` notification is sent from the client to the server to cancel a progress\n initiated on the server side.\"\"\"\n return self.send_notification(\"window/workDoneProgress/cancel\", params)\n\n def did_create_files(self, params: lsp_types.CreateFilesParams) -> None:\n \"\"\"The did create files notification is sent from the client to the server when\n files were created from within the client.\n\n @since 3.16.0\"\"\"\n return self.send_notification(\"workspace/didCreateFiles\", params)\n\n def did_rename_files(self, params: lsp_types.RenameFilesParams) -> None:\n \"\"\"The did rename files notification is sent from the client to the server when\n files were renamed from within the client.\n\n @since 3.16.0\"\"\"\n return self.send_notification(\"workspace/didRenameFiles\", params)\n\n def did_delete_files(self, params: lsp_types.DeleteFilesParams) -> None:\n \"\"\"The will delete files request is sent from the client to the server before files are actually\n deleted as long as the deletion is triggered from within the client.\n\n @since 3.16.0\"\"\"\n return self.send_notification(\"workspace/didDeleteFiles\", params)\n\n def did_open_notebook_document(\n self, params: lsp_types.DidOpenNotebookDocumentParams\n ) -> None:\n \"\"\"A notification sent when a notebook opens.\n\n @since 3.17.0\"\"\"\n return self.send_notification(\"notebookDocument/didOpen\", params)\n\n def did_change_notebook_document(\n self, params: lsp_types.DidChangeNotebookDocumentParams\n ) -> None:\n return self.send_notification(\"notebookDocument/didChange\", params)\n\n def did_save_notebook_document(\n self, params: lsp_types.DidSaveNotebookDocumentParams\n ) -> None:\n \"\"\"A notification sent when a notebook document is saved.\n\n @since 3.17.0\"\"\"\n return self.send_notification(\"notebookDocument/didSave\", params)\n\n def did_close_notebook_document(\n self, params: lsp_types.DidCloseNotebookDocumentParams\n ) -> None:\n \"\"\"A notification sent when a notebook closes.\n\n @since 3.17.0\"\"\"\n return self.send_notification(\"notebookDocument/didClose\", params)\n\n def initialized(self, params: lsp_types.InitializedParams) -> None:\n \"\"\"The initialized notification is sent from the client to the\n server after the client is fully initialized and the server\n is allowed to send requests from the server to the client.\"\"\"\n return self.send_notification(\"initialized\", params)\n\n def exit(self) -> None:\n \"\"\"The exit event is sent from the client to the server to\n ask the server to exit its process.\"\"\"\n return self.send_notification(\"exit\")\n\n def workspace_did_change_configuration(\n self, params: lsp_types.DidChangeConfigurationParams\n ) -> None:\n \"\"\"The configuration change notification is sent from the client to the server\n when the client's configuration has changed. The notification contains\n the changed configuration as defined by the language client.\"\"\"\n return self.send_notification(\"workspace/didChangeConfiguration\", params)\n\n def did_open_text_document(\n self, params: lsp_types.DidOpenTextDocumentParams\n ) -> None:\n \"\"\"The document open notification is sent from the client to the server to signal\n newly opened text documents. The document's truth is now managed by the client\n and the server must not try to read the document's truth using the document's\n uri. Open in this sense means it is managed by the client. It doesn't necessarily\n mean that its content is presented in an editor. An open notification must not\n be sent more than once without a corresponding close notification send before.\n This means open and close notification must be balanced and the max open count\n is one.\"\"\"\n return self.send_notification(\"textDocument/didOpen\", params)\n\n def did_change_text_document(\n self, params: lsp_types.DidChangeTextDocumentParams\n ) -> None:\n \"\"\"The document change notification is sent from the client to the server to signal\n changes to a text document.\"\"\"\n return self.send_notification(\"textDocument/didChange\", params)\n\n def did_close_text_document(\n self, params: lsp_types.DidCloseTextDocumentParams\n ) -> None:\n \"\"\"The document close notification is sent from the client to the server when\n the document got closed in the client. The document's truth now exists where\n the document's uri points to (e.g. if the document's uri is a file uri the\n truth now exists on disk). As with the open notification the close notification\n is about managing the document's content. Receiving a close notification\n doesn't mean that the document was open in an editor before. A close\n notification requires a previous open notification to be sent.\"\"\"\n return self.send_notification(\"textDocument/didClose\", params)\n\n def did_save_text_document(\n self, params: lsp_types.DidSaveTextDocumentParams\n ) -> None:\n \"\"\"The document save notification is sent from the client to the server when\n the document got saved in the client.\"\"\"\n return self.send_notification(\"textDocument/didSave\", params)\n\n def will_save_text_document(\n self, params: lsp_types.WillSaveTextDocumentParams\n ) -> None:\n \"\"\"A document will save notification is sent from the client to the server before\n the document is actually saved.\"\"\"\n return self.send_notification(\"textDocument/willSave\", params)\n\n def did_change_watched_files(\n self, params: lsp_types.DidChangeWatchedFilesParams\n ) -> None:\n \"\"\"The watched files notification is sent from the client to the server when\n the client detects changes to file watched by the language client.\"\"\"\n return self.send_notification(\"workspace/didChangeWatchedFiles\", params)\n\n def set_trace(self, params: lsp_types.SetTraceParams) -> None:\n return self.send_notification(\"$/setTrace\", params)\n\n def cancel_request(self, params: lsp_types.CancelParams) -> None:\n return self.send_notification(\"$/cancelRequest\", params)\n\n def progress(self, params: lsp_types.ProgressParams) -> None:\n return self.send_notification(\"$/progress\", params)" }, { "identifier": "LspRequest", "path": "src/monitors4codegen/multilspy/lsp_protocol_handler/lsp_requests.py", "snippet": "class LspRequest:\n def __init__(self, send_request):\n self.send_request = send_request\n\n async def implementation(\n self, params: lsp_types.ImplementationParams\n ) -> Union[\"lsp_types.Definition\", List[\"lsp_types.LocationLink\"], None]:\n \"\"\"A request to resolve the implementation locations of a symbol at a given text\n document position. The request's parameter is of type [TextDocumentPositionParams]\n (#TextDocumentPositionParams) the response is of type {@link Definition} or a\n Thenable that resolves to such.\"\"\"\n return await self.send_request(\"textDocument/implementation\", params)\n\n async def type_definition(\n self, params: lsp_types.TypeDefinitionParams\n ) -> Union[\"lsp_types.Definition\", List[\"lsp_types.LocationLink\"], None]:\n \"\"\"A request to resolve the type definition locations of a symbol at a given text\n document position. The request's parameter is of type [TextDocumentPositionParams]\n (#TextDocumentPositionParams) the response is of type {@link Definition} or a\n Thenable that resolves to such.\"\"\"\n return await self.send_request(\"textDocument/typeDefinition\", params)\n\n async def document_color(\n self, params: lsp_types.DocumentColorParams\n ) -> List[\"lsp_types.ColorInformation\"]:\n \"\"\"A request to list all color symbols found in a given text document. The request's\n parameter is of type {@link DocumentColorParams} the\n response is of type {@link ColorInformation ColorInformation[]} or a Thenable\n that resolves to such.\"\"\"\n return await self.send_request(\"textDocument/documentColor\", params)\n\n async def color_presentation(\n self, params: lsp_types.ColorPresentationParams\n ) -> List[\"lsp_types.ColorPresentation\"]:\n \"\"\"A request to list all presentation for a color. The request's\n parameter is of type {@link ColorPresentationParams} the\n response is of type {@link ColorInformation ColorInformation[]} or a Thenable\n that resolves to such.\"\"\"\n return await self.send_request(\"textDocument/colorPresentation\", params)\n\n async def folding_range(\n self, params: lsp_types.FoldingRangeParams\n ) -> Union[List[\"lsp_types.FoldingRange\"], None]:\n \"\"\"A request to provide folding ranges in a document. The request's\n parameter is of type {@link FoldingRangeParams}, the\n response is of type {@link FoldingRangeList} or a Thenable\n that resolves to such.\"\"\"\n return await self.send_request(\"textDocument/foldingRange\", params)\n\n async def declaration(\n self, params: lsp_types.DeclarationParams\n ) -> Union[\"lsp_types.Declaration\", List[\"lsp_types.LocationLink\"], None]:\n \"\"\"A request to resolve the type definition locations of a symbol at a given text\n document position. The request's parameter is of type [TextDocumentPositionParams]\n (#TextDocumentPositionParams) the response is of type {@link Declaration}\n or a typed array of {@link DeclarationLink} or a Thenable that resolves\n to such.\"\"\"\n return await self.send_request(\"textDocument/declaration\", params)\n\n async def selection_range(\n self, params: lsp_types.SelectionRangeParams\n ) -> Union[List[\"lsp_types.SelectionRange\"], None]:\n \"\"\"A request to provide selection ranges in a document. The request's\n parameter is of type {@link SelectionRangeParams}, the\n response is of type {@link SelectionRange SelectionRange[]} or a Thenable\n that resolves to such.\"\"\"\n return await self.send_request(\"textDocument/selectionRange\", params)\n\n async def prepare_call_hierarchy(\n self, params: lsp_types.CallHierarchyPrepareParams\n ) -> Union[List[\"lsp_types.CallHierarchyItem\"], None]:\n \"\"\"A request to result a `CallHierarchyItem` in a document at a given position.\n Can be used as an input to an incoming or outgoing call hierarchy.\n\n @since 3.16.0\"\"\"\n return await self.send_request(\"textDocument/prepareCallHierarchy\", params)\n\n async def incoming_calls(\n self, params: lsp_types.CallHierarchyIncomingCallsParams\n ) -> Union[List[\"lsp_types.CallHierarchyIncomingCall\"], None]:\n \"\"\"A request to resolve the incoming calls for a given `CallHierarchyItem`.\n\n @since 3.16.0\"\"\"\n return await self.send_request(\"callHierarchy/incomingCalls\", params)\n\n async def outgoing_calls(\n self, params: lsp_types.CallHierarchyOutgoingCallsParams\n ) -> Union[List[\"lsp_types.CallHierarchyOutgoingCall\"], None]:\n \"\"\"A request to resolve the outgoing calls for a given `CallHierarchyItem`.\n\n @since 3.16.0\"\"\"\n return await self.send_request(\"callHierarchy/outgoingCalls\", params)\n\n async def semantic_tokens_full(\n self, params: lsp_types.SemanticTokensParams\n ) -> Union[\"lsp_types.SemanticTokens\", None]:\n \"\"\"@since 3.16.0\"\"\"\n return await self.send_request(\"textDocument/semanticTokens/full\", params)\n\n async def semantic_tokens_delta(\n self, params: lsp_types.SemanticTokensDeltaParams\n ) -> Union[\"lsp_types.SemanticTokens\", \"lsp_types.SemanticTokensDelta\", None]:\n \"\"\"@since 3.16.0\"\"\"\n return await self.send_request(\"textDocument/semanticTokens/full/delta\", params)\n\n async def semantic_tokens_range(\n self, params: lsp_types.SemanticTokensRangeParams\n ) -> Union[\"lsp_types.SemanticTokens\", None]:\n \"\"\"@since 3.16.0\"\"\"\n return await self.send_request(\"textDocument/semanticTokens/range\", params)\n\n async def linked_editing_range(\n self, params: lsp_types.LinkedEditingRangeParams\n ) -> Union[\"lsp_types.LinkedEditingRanges\", None]:\n \"\"\"A request to provide ranges that can be edited together.\n\n @since 3.16.0\"\"\"\n return await self.send_request(\"textDocument/linkedEditingRange\", params)\n\n async def will_create_files(\n self, params: lsp_types.CreateFilesParams\n ) -> Union[\"lsp_types.WorkspaceEdit\", None]:\n \"\"\"The will create files request is sent from the client to the server before files are actually\n created as long as the creation is triggered from within the client.\n\n @since 3.16.0\"\"\"\n return await self.send_request(\"workspace/willCreateFiles\", params)\n\n async def will_rename_files(\n self, params: lsp_types.RenameFilesParams\n ) -> Union[\"lsp_types.WorkspaceEdit\", None]:\n \"\"\"The will rename files request is sent from the client to the server before files are actually\n renamed as long as the rename is triggered from within the client.\n\n @since 3.16.0\"\"\"\n return await self.send_request(\"workspace/willRenameFiles\", params)\n\n async def will_delete_files(\n self, params: lsp_types.DeleteFilesParams\n ) -> Union[\"lsp_types.WorkspaceEdit\", None]:\n \"\"\"The did delete files notification is sent from the client to the server when\n files were deleted from within the client.\n\n @since 3.16.0\"\"\"\n return await self.send_request(\"workspace/willDeleteFiles\", params)\n\n async def moniker(\n self, params: lsp_types.MonikerParams\n ) -> Union[List[\"lsp_types.Moniker\"], None]:\n \"\"\"A request to get the moniker of a symbol at a given text document position.\n The request parameter is of type {@link TextDocumentPositionParams}.\n The response is of type {@link Moniker Moniker[]} or `null`.\"\"\"\n return await self.send_request(\"textDocument/moniker\", params)\n\n async def prepare_type_hierarchy(\n self, params: lsp_types.TypeHierarchyPrepareParams\n ) -> Union[List[\"lsp_types.TypeHierarchyItem\"], None]:\n \"\"\"A request to result a `TypeHierarchyItem` in a document at a given position.\n Can be used as an input to a subtypes or supertypes type hierarchy.\n\n @since 3.17.0\"\"\"\n return await self.send_request(\"textDocument/prepareTypeHierarchy\", params)\n\n async def type_hierarchy_supertypes(\n self, params: lsp_types.TypeHierarchySupertypesParams\n ) -> Union[List[\"lsp_types.TypeHierarchyItem\"], None]:\n \"\"\"A request to resolve the supertypes for a given `TypeHierarchyItem`.\n\n @since 3.17.0\"\"\"\n return await self.send_request(\"typeHierarchy/supertypes\", params)\n\n async def type_hierarchy_subtypes(\n self, params: lsp_types.TypeHierarchySubtypesParams\n ) -> Union[List[\"lsp_types.TypeHierarchyItem\"], None]:\n \"\"\"A request to resolve the subtypes for a given `TypeHierarchyItem`.\n\n @since 3.17.0\"\"\"\n return await self.send_request(\"typeHierarchy/subtypes\", params)\n\n async def inline_value(\n self, params: lsp_types.InlineValueParams\n ) -> Union[List[\"lsp_types.InlineValue\"], None]:\n \"\"\"A request to provide inline values in a document. The request's parameter is of\n type {@link InlineValueParams}, the response is of type\n {@link InlineValue InlineValue[]} or a Thenable that resolves to such.\n\n @since 3.17.0\"\"\"\n return await self.send_request(\"textDocument/inlineValue\", params)\n\n async def inlay_hint(\n self, params: lsp_types.InlayHintParams\n ) -> Union[List[\"lsp_types.InlayHint\"], None]:\n \"\"\"A request to provide inlay hints in a document. The request's parameter is of\n type {@link InlayHintsParams}, the response is of type\n {@link InlayHint InlayHint[]} or a Thenable that resolves to such.\n\n @since 3.17.0\"\"\"\n return await self.send_request(\"textDocument/inlayHint\", params)\n\n async def resolve_inlay_hint(\n self, params: lsp_types.InlayHint\n ) -> \"lsp_types.InlayHint\":\n \"\"\"A request to resolve additional properties for an inlay hint.\n The request's parameter is of type {@link InlayHint}, the response is\n of type {@link InlayHint} or a Thenable that resolves to such.\n\n @since 3.17.0\"\"\"\n return await self.send_request(\"inlayHint/resolve\", params)\n\n async def text_document_diagnostic(\n self, params: lsp_types.DocumentDiagnosticParams\n ) -> \"lsp_types.DocumentDiagnosticReport\":\n \"\"\"The document diagnostic request definition.\n\n @since 3.17.0\"\"\"\n return await self.send_request(\"textDocument/diagnostic\", params)\n\n async def workspace_diagnostic(\n self, params: lsp_types.WorkspaceDiagnosticParams\n ) -> \"lsp_types.WorkspaceDiagnosticReport\":\n \"\"\"The workspace diagnostic request definition.\n\n @since 3.17.0\"\"\"\n return await self.send_request(\"workspace/diagnostic\", params)\n\n async def initialize(\n self, params: lsp_types.InitializeParams\n ) -> \"lsp_types.InitializeResult\":\n \"\"\"The initialize request is sent from the client to the server.\n It is sent once as the request after starting up the server.\n The requests parameter is of type {@link InitializeParams}\n the response if of type {@link InitializeResult} of a Thenable that\n resolves to such.\"\"\"\n return await self.send_request(\"initialize\", params)\n\n async def shutdown(self) -> None:\n \"\"\"A shutdown request is sent from the client to the server.\n It is sent once when the client decides to shutdown the\n server. The only notification that is sent after a shutdown request\n is the exit event.\"\"\"\n return await self.send_request(\"shutdown\")\n\n async def will_save_wait_until(\n self, params: lsp_types.WillSaveTextDocumentParams\n ) -> Union[List[\"lsp_types.TextEdit\"], None]:\n \"\"\"A document will save request is sent from the client to the server before\n the document is actually saved. The request can return an array of TextEdits\n which will be applied to the text document before it is saved. Please note that\n clients might drop results if computing the text edits took too long or if a\n server constantly fails on this request. This is done to keep the save fast and\n reliable.\"\"\"\n return await self.send_request(\"textDocument/willSaveWaitUntil\", params)\n\n async def completion(\n self, params: lsp_types.CompletionParams\n ) -> Union[List[\"lsp_types.CompletionItem\"], \"lsp_types.CompletionList\", None]:\n \"\"\"Request to request completion at a given text document position. The request's\n parameter is of type {@link TextDocumentPosition} the response\n is of type {@link CompletionItem CompletionItem[]} or {@link CompletionList}\n or a Thenable that resolves to such.\n\n The request can delay the computation of the {@link CompletionItem.detail `detail`}\n and {@link CompletionItem.documentation `documentation`} properties to the `completionItem/resolve`\n request. However, properties that are needed for the initial sorting and filtering, like `sortText`,\n `filterText`, `insertText`, and `textEdit`, must not be changed during resolve.\n \"\"\"\n return await self.send_request(\"textDocument/completion\", params)\n\n async def resolve_completion_item(\n self, params: lsp_types.CompletionItem\n ) -> \"lsp_types.CompletionItem\":\n \"\"\"Request to resolve additional information for a given completion item.The request's\n parameter is of type {@link CompletionItem} the response\n is of type {@link CompletionItem} or a Thenable that resolves to such.\"\"\"\n return await self.send_request(\"completionItem/resolve\", params)\n\n async def hover(\n self, params: lsp_types.HoverParams\n ) -> Union[\"lsp_types.Hover\", None]:\n \"\"\"Request to request hover information at a given text document position. The request's\n parameter is of type {@link TextDocumentPosition} the response is of\n type {@link Hover} or a Thenable that resolves to such.\"\"\"\n return await self.send_request(\"textDocument/hover\", params)\n\n async def signature_help(\n self, params: lsp_types.SignatureHelpParams\n ) -> Union[\"lsp_types.SignatureHelp\", None]:\n return await self.send_request(\"textDocument/signatureHelp\", params)\n\n async def definition(\n self, params: lsp_types.DefinitionParams\n ) -> Union[\"lsp_types.Definition\", List[\"lsp_types.LocationLink\"], None]:\n \"\"\"A request to resolve the definition location of a symbol at a given text\n document position. The request's parameter is of type [TextDocumentPosition]\n (#TextDocumentPosition) the response is of either type {@link Definition}\n or a typed array of {@link DefinitionLink} or a Thenable that resolves\n to such.\"\"\"\n return await self.send_request(\"textDocument/definition\", params)\n\n async def references(\n self, params: lsp_types.ReferenceParams\n ) -> Union[List[\"lsp_types.Location\"], None]:\n \"\"\"A request to resolve project-wide references for the symbol denoted\n by the given text document position. The request's parameter is of\n type {@link ReferenceParams} the response is of type\n {@link Location Location[]} or a Thenable that resolves to such.\"\"\"\n return await self.send_request(\"textDocument/references\", params)\n\n async def document_highlight(\n self, params: lsp_types.DocumentHighlightParams\n ) -> Union[List[\"lsp_types.DocumentHighlight\"], None]:\n \"\"\"Request to resolve a {@link DocumentHighlight} for a given\n text document position. The request's parameter is of type [TextDocumentPosition]\n (#TextDocumentPosition) the request response is of type [DocumentHighlight[]]\n (#DocumentHighlight) or a Thenable that resolves to such.\"\"\"\n return await self.send_request(\"textDocument/documentHighlight\", params)\n\n async def document_symbol(\n self, params: lsp_types.DocumentSymbolParams\n ) -> Union[\n List[\"lsp_types.SymbolInformation\"], List[\"lsp_types.DocumentSymbol\"], None\n ]:\n \"\"\"A request to list all symbols found in a given text document. The request's\n parameter is of type {@link TextDocumentIdentifier} the\n response is of type {@link SymbolInformation SymbolInformation[]} or a Thenable\n that resolves to such.\"\"\"\n return await self.send_request(\"textDocument/documentSymbol\", params)\n\n async def code_action(\n self, params: lsp_types.CodeActionParams\n ) -> Union[List[Union[\"lsp_types.Command\", \"lsp_types.CodeAction\"]], None]:\n \"\"\"A request to provide commands for the given text document and range.\"\"\"\n return await self.send_request(\"textDocument/codeAction\", params)\n\n async def resolve_code_action(\n self, params: lsp_types.CodeAction\n ) -> \"lsp_types.CodeAction\":\n \"\"\"Request to resolve additional information for a given code action.The request's\n parameter is of type {@link CodeAction} the response\n is of type {@link CodeAction} or a Thenable that resolves to such.\"\"\"\n return await self.send_request(\"codeAction/resolve\", params)\n\n async def workspace_symbol(\n self, params: lsp_types.WorkspaceSymbolParams\n ) -> Union[\n List[\"lsp_types.SymbolInformation\"], List[\"lsp_types.WorkspaceSymbol\"], None\n ]:\n \"\"\"A request to list project-wide symbols matching the query string given\n by the {@link WorkspaceSymbolParams}. The response is\n of type {@link SymbolInformation SymbolInformation[]} or a Thenable that\n resolves to such.\n\n @since 3.17.0 - support for WorkspaceSymbol in the returned data. Clients\n need to advertise support for WorkspaceSymbols via the client capability\n `workspace.symbol.resolveSupport`.\n \"\"\"\n return await self.send_request(\"workspace/symbol\", params)\n\n async def resolve_workspace_symbol(\n self, params: lsp_types.WorkspaceSymbol\n ) -> \"lsp_types.WorkspaceSymbol\":\n \"\"\"A request to resolve the range inside the workspace\n symbol's location.\n\n @since 3.17.0\"\"\"\n return await self.send_request(\"workspaceSymbol/resolve\", params)\n\n async def code_lens(\n self, params: lsp_types.CodeLensParams\n ) -> Union[List[\"lsp_types.CodeLens\"], None]:\n \"\"\"A request to provide code lens for the given text document.\"\"\"\n return await self.send_request(\"textDocument/codeLens\", params)\n\n async def resolve_code_lens(\n self, params: lsp_types.CodeLens\n ) -> \"lsp_types.CodeLens\":\n \"\"\"A request to resolve a command for a given code lens.\"\"\"\n return await self.send_request(\"codeLens/resolve\", params)\n\n async def document_link(\n self, params: lsp_types.DocumentLinkParams\n ) -> Union[List[\"lsp_types.DocumentLink\"], None]:\n \"\"\"A request to provide document links\"\"\"\n return await self.send_request(\"textDocument/documentLink\", params)\n\n async def resolve_document_link(\n self, params: lsp_types.DocumentLink\n ) -> \"lsp_types.DocumentLink\":\n \"\"\"Request to resolve additional information for a given document link. The request's\n parameter is of type {@link DocumentLink} the response\n is of type {@link DocumentLink} or a Thenable that resolves to such.\"\"\"\n return await self.send_request(\"documentLink/resolve\", params)\n\n async def formatting(\n self, params: lsp_types.DocumentFormattingParams\n ) -> Union[List[\"lsp_types.TextEdit\"], None]:\n \"\"\"A request to to format a whole document.\"\"\"\n return await self.send_request(\"textDocument/formatting\", params)\n\n async def range_formatting(\n self, params: lsp_types.DocumentRangeFormattingParams\n ) -> Union[List[\"lsp_types.TextEdit\"], None]:\n \"\"\"A request to to format a range in a document.\"\"\"\n return await self.send_request(\"textDocument/rangeFormatting\", params)\n\n async def on_type_formatting(\n self, params: lsp_types.DocumentOnTypeFormattingParams\n ) -> Union[List[\"lsp_types.TextEdit\"], None]:\n \"\"\"A request to format a document on type.\"\"\"\n return await self.send_request(\"textDocument/onTypeFormatting\", params)\n\n async def rename(\n self, params: lsp_types.RenameParams\n ) -> Union[\"lsp_types.WorkspaceEdit\", None]:\n \"\"\"A request to rename a symbol.\"\"\"\n return await self.send_request(\"textDocument/rename\", params)\n\n async def prepare_rename(\n self, params: lsp_types.PrepareRenameParams\n ) -> Union[\"lsp_types.PrepareRenameResult\", None]:\n \"\"\"A request to test and perform the setup necessary for a rename.\n\n @since 3.16 - support for default behavior\"\"\"\n return await self.send_request(\"textDocument/prepareRename\", params)\n\n async def execute_command(\n self, params: lsp_types.ExecuteCommandParams\n ) -> Union[\"lsp_types.LSPAny\", None]:\n \"\"\"A request send from the client to the server to execute a command. The request might return\n a workspace edit which the client will apply to the workspace.\"\"\"\n return await self.send_request(\"workspace/executeCommand\", params)" }, { "identifier": "ErrorCodes", "path": "src/monitors4codegen/multilspy/lsp_protocol_handler/lsp_types.py", "snippet": "class ErrorCodes(IntEnum):\n \"\"\"Predefined error codes.\"\"\"\n\n ParseError = -32700\n InvalidRequest = -32600\n MethodNotFound = -32601\n InvalidParams = -32602\n InternalError = -32603\n ServerNotInitialized = -32002\n \"\"\" Error code indicating that a server received a notification or\n request before the server has received the `initialize` request. \"\"\"\n UnknownErrorCode = -32001" } ]
import asyncio import dataclasses import json import os from typing import Any, Dict, List, Optional, Union from .lsp_requests import LspNotification, LspRequest from .lsp_types import ErrorCodes
8,392
super().__init__(message) self.code = code def to_lsp(self) -> StringDict: return {"code": self.code, "message": super().__str__()} @classmethod def from_lsp(cls, d: StringDict) -> "Error": return Error(d["code"], d["message"]) def __str__(self) -> str: return f"{super().__str__()} ({self.code})" def make_response(request_id: Any, params: PayloadLike) -> StringDict: return {"jsonrpc": "2.0", "id": request_id, "result": params} def make_error_response(request_id: Any, err: Error) -> StringDict: return {"jsonrpc": "2.0", "id": request_id, "error": err.to_lsp()} def make_notification(method: str, params: PayloadLike) -> StringDict: return {"jsonrpc": "2.0", "method": method, "params": params} def make_request(method: str, request_id: Any, params: PayloadLike) -> StringDict: return {"jsonrpc": "2.0", "method": method, "id": request_id, "params": params} class StopLoopException(Exception): pass def create_message(payload: PayloadLike): body = json.dumps(payload, check_circular=False, ensure_ascii=False, separators=(",", ":")).encode(ENCODING) return ( f"Content-Length: {len(body)}\r\n".encode(ENCODING), "Content-Type: application/vscode-jsonrpc; charset=utf-8\r\n\r\n".encode(ENCODING), body, ) class MessageType: error = 1 warning = 2 info = 3 log = 4 class Request: def __init__(self) -> None: self.cv = asyncio.Condition() self.result: Optional[PayloadLike] = None self.error: Optional[Error] = None async def on_result(self, params: PayloadLike) -> None: self.result = params async with self.cv: self.cv.notify() async def on_error(self, err: Error) -> None: self.error = err async with self.cv: self.cv.notify() def content_length(line: bytes) -> Optional[int]: if line.startswith(b"Content-Length: "): _, value = line.split(b"Content-Length: ") value = value.strip() try: return int(value) except ValueError: raise ValueError("Invalid Content-Length header: {}".format(value)) return None class LanguageServerHandler: """ This class provides the implementation of Python client for the Language Server Protocol. A class that launches the language server and communicates with it using the Language Server Protocol (LSP). It provides methods for sending requests, responses, and notifications to the server and for registering handlers for requests and notifications from the server. Uses JSON-RPC 2.0 for communication with the server over stdin/stdout. Attributes: send: A LspRequest object that can be used to send requests to the server and await for the responses. notify: A LspNotification object that can be used to send notifications to the server. cmd: A string that represents the command to launch the language server process. process: A subprocess.Popen object that represents the language server process. _received_shutdown: A boolean flag that indicates whether the client has received a shutdown request from the server. request_id: An integer that represents the next available request id for the client. _response_handlers: A dictionary that maps request ids to Request objects that store the results or errors of the requests. on_request_handlers: A dictionary that maps method names to callback functions that handle requests from the server. on_notification_handlers: A dictionary that maps method names to callback functions that handle notifications from the server. logger: An optional function that takes two strings (source and destination) and a payload dictionary, and logs the communication between the client and the server. tasks: A dictionary that maps task ids to asyncio.Task objects that represent the asynchronous tasks created by the handler. task_counter: An integer that represents the next available task id for the handler. loop: An asyncio.AbstractEventLoop object that represents the event loop used by the handler. """ def __init__(self, process_launch_info: ProcessLaunchInfo, logger=None) -> None: """ Params: cmd: A string that represents the command to launch the language server process. logger: An optional function that takes two strings (source and destination) and a payload dictionary, and logs the communication between the client and the server. """ self.send = LspRequest(self.send_request)
""" This file provides the implementation of the JSON-RPC client, that launches and communicates with the language server. The initial implementation of this file was obtained from https://github.com/predragnikolic/OLSP under the MIT License with the following terms: MIT License Copyright (c) 2023 Предраг Николић Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ StringDict = Dict[str, Any] PayloadLike = Union[List[StringDict], StringDict, None] CONTENT_LENGTH = "Content-Length: " ENCODING = "utf-8" @dataclasses.dataclass class ProcessLaunchInfo: """ This class is used to store the information required to launch a process. """ # The command to launch the process cmd: str # The environment variables to set for the process env: Dict[str, str] = dataclasses.field(default_factory=dict) # The working directory for the process cwd: str = os.getcwd() class Error(Exception): def __init__(self, code: ErrorCodes, message: str) -> None: super().__init__(message) self.code = code def to_lsp(self) -> StringDict: return {"code": self.code, "message": super().__str__()} @classmethod def from_lsp(cls, d: StringDict) -> "Error": return Error(d["code"], d["message"]) def __str__(self) -> str: return f"{super().__str__()} ({self.code})" def make_response(request_id: Any, params: PayloadLike) -> StringDict: return {"jsonrpc": "2.0", "id": request_id, "result": params} def make_error_response(request_id: Any, err: Error) -> StringDict: return {"jsonrpc": "2.0", "id": request_id, "error": err.to_lsp()} def make_notification(method: str, params: PayloadLike) -> StringDict: return {"jsonrpc": "2.0", "method": method, "params": params} def make_request(method: str, request_id: Any, params: PayloadLike) -> StringDict: return {"jsonrpc": "2.0", "method": method, "id": request_id, "params": params} class StopLoopException(Exception): pass def create_message(payload: PayloadLike): body = json.dumps(payload, check_circular=False, ensure_ascii=False, separators=(",", ":")).encode(ENCODING) return ( f"Content-Length: {len(body)}\r\n".encode(ENCODING), "Content-Type: application/vscode-jsonrpc; charset=utf-8\r\n\r\n".encode(ENCODING), body, ) class MessageType: error = 1 warning = 2 info = 3 log = 4 class Request: def __init__(self) -> None: self.cv = asyncio.Condition() self.result: Optional[PayloadLike] = None self.error: Optional[Error] = None async def on_result(self, params: PayloadLike) -> None: self.result = params async with self.cv: self.cv.notify() async def on_error(self, err: Error) -> None: self.error = err async with self.cv: self.cv.notify() def content_length(line: bytes) -> Optional[int]: if line.startswith(b"Content-Length: "): _, value = line.split(b"Content-Length: ") value = value.strip() try: return int(value) except ValueError: raise ValueError("Invalid Content-Length header: {}".format(value)) return None class LanguageServerHandler: """ This class provides the implementation of Python client for the Language Server Protocol. A class that launches the language server and communicates with it using the Language Server Protocol (LSP). It provides methods for sending requests, responses, and notifications to the server and for registering handlers for requests and notifications from the server. Uses JSON-RPC 2.0 for communication with the server over stdin/stdout. Attributes: send: A LspRequest object that can be used to send requests to the server and await for the responses. notify: A LspNotification object that can be used to send notifications to the server. cmd: A string that represents the command to launch the language server process. process: A subprocess.Popen object that represents the language server process. _received_shutdown: A boolean flag that indicates whether the client has received a shutdown request from the server. request_id: An integer that represents the next available request id for the client. _response_handlers: A dictionary that maps request ids to Request objects that store the results or errors of the requests. on_request_handlers: A dictionary that maps method names to callback functions that handle requests from the server. on_notification_handlers: A dictionary that maps method names to callback functions that handle notifications from the server. logger: An optional function that takes two strings (source and destination) and a payload dictionary, and logs the communication between the client and the server. tasks: A dictionary that maps task ids to asyncio.Task objects that represent the asynchronous tasks created by the handler. task_counter: An integer that represents the next available task id for the handler. loop: An asyncio.AbstractEventLoop object that represents the event loop used by the handler. """ def __init__(self, process_launch_info: ProcessLaunchInfo, logger=None) -> None: """ Params: cmd: A string that represents the command to launch the language server process. logger: An optional function that takes two strings (source and destination) and a payload dictionary, and logs the communication between the client and the server. """ self.send = LspRequest(self.send_request)
self.notify = LspNotification(self.send_notification)
0
2023-11-04 21:49:04+00:00
12k
bigai-nlco/langsuite
langsuite/envs/cwah/cwah_world.py
[ { "identifier": "CSS4_COLORS", "path": "langsuite/constants.py", "snippet": "CSS4_COLORS = {\n \"aliceblue\": \"#F0F8FF\",\n \"antiquewhite\": \"#FAEBD7\",\n \"aqua\": \"#00FFFF\",\n \"aquamarine\": \"#7FFFD4\",\n \"azure\": \"#F0FFFF\",\n \"beige\": \"#F5F5DC\",\n \"bisque\": \"#FFE4C4\",\n \"black\": \"#000000\",\n \"blanchedalmond\": \"#FFEBCD\",\n \"blue\": \"#0000FF\",\n \"blueviolet\": \"#8A2BE2\",\n \"brown\": \"#A52A2A\",\n \"burlywood\": \"#DEB887\",\n \"cadetblue\": \"#5F9EA0\",\n \"chartreuse\": \"#7FFF00\",\n \"chocolate\": \"#D2691E\",\n \"coral\": \"#FF7F50\",\n \"cornflowerblue\": \"#6495ED\",\n \"cornsilk\": \"#FFF8DC\",\n \"crimson\": \"#DC143C\",\n \"cyan\": \"#00FFFF\",\n \"darkblue\": \"#00008B\",\n \"darkcyan\": \"#008B8B\",\n \"darkgoldenrod\": \"#B8860B\",\n \"darkgray\": \"#A9A9A9\",\n \"darkgreen\": \"#006400\",\n \"darkgrey\": \"#A9A9A9\",\n \"darkkhaki\": \"#BDB76B\",\n \"darkmagenta\": \"#8B008B\",\n \"darkolivegreen\": \"#556B2F\",\n \"darkorange\": \"#FF8C00\",\n \"darkorchid\": \"#9932CC\",\n \"darkred\": \"#8B0000\",\n \"darksalmon\": \"#E9967A\",\n \"darkseagreen\": \"#8FBC8F\",\n \"darkslateblue\": \"#483D8B\",\n \"darkslategray\": \"#2F4F4F\",\n \"darkslategrey\": \"#2F4F4F\",\n \"darkturquoise\": \"#00CED1\",\n \"darkviolet\": \"#9400D3\",\n \"deeppink\": \"#FF1493\",\n \"deepskyblue\": \"#00BFFF\",\n \"dimgray\": \"#696969\",\n \"dimgrey\": \"#696969\",\n \"dodgerblue\": \"#1E90FF\",\n \"firebrick\": \"#B22222\",\n \"floralwhite\": \"#FFFAF0\",\n \"forestgreen\": \"#228B22\",\n \"fuchsia\": \"#FF00FF\",\n \"gainsboro\": \"#DCDCDC\",\n \"ghostwhite\": \"#F8F8FF\",\n \"gold\": \"#FFD700\",\n \"goldenrod\": \"#DAA520\",\n \"gray\": \"#808080\",\n \"green\": \"#008000\",\n \"greenyellow\": \"#ADFF2F\",\n \"grey\": \"#808080\",\n \"honeydew\": \"#F0FFF0\",\n \"hotpink\": \"#FF69B4\",\n \"indianred\": \"#CD5C5C\",\n \"indigo\": \"#4B0082\",\n \"ivory\": \"#FFFFF0\",\n \"khaki\": \"#F0E68C\",\n \"lavender\": \"#E6E6FA\",\n \"lavenderblush\": \"#FFF0F5\",\n \"lawngreen\": \"#7CFC00\",\n \"lemonchiffon\": \"#FFFACD\",\n \"lightblue\": \"#ADD8E6\",\n \"lightcoral\": \"#F08080\",\n \"lightcyan\": \"#E0FFFF\",\n \"lightgoldenrodyellow\": \"#FAFAD2\",\n \"lightgray\": \"#D3D3D3\",\n \"lightgreen\": \"#90EE90\",\n \"lightgrey\": \"#D3D3D3\",\n \"lightpink\": \"#FFB6C1\",\n \"lightsalmon\": \"#FFA07A\",\n \"lightseagreen\": \"#20B2AA\",\n \"lightskyblue\": \"#87CEFA\",\n \"lightslategray\": \"#778899\",\n \"lightslategrey\": \"#778899\",\n \"lightsteelblue\": \"#B0C4DE\",\n \"lightyellow\": \"#FFFFE0\",\n \"lime\": \"#00FF00\",\n \"limegreen\": \"#32CD32\",\n \"linen\": \"#FAF0E6\",\n \"magenta\": \"#FF00FF\",\n \"maroon\": \"#800000\",\n \"mediumaquamarine\": \"#66CDAA\",\n \"mediumblue\": \"#0000CD\",\n \"mediumorchid\": \"#BA55D3\",\n \"mediumpurple\": \"#9370DB\",\n \"mediumseagreen\": \"#3CB371\",\n \"mediumslateblue\": \"#7B68EE\",\n \"mediumspringgreen\": \"#00FA9A\",\n \"mediumturquoise\": \"#48D1CC\",\n \"mediumvioletred\": \"#C71585\",\n \"midnightblue\": \"#191970\",\n \"mintcream\": \"#F5FFFA\",\n \"mistyrose\": \"#FFE4E1\",\n \"moccasin\": \"#FFE4B5\",\n \"navajowhite\": \"#FFDEAD\",\n \"navy\": \"#000080\",\n \"oldlace\": \"#FDF5E6\",\n \"olive\": \"#808000\",\n \"olivedrab\": \"#6B8E23\",\n \"orange\": \"#FFA500\",\n \"orangered\": \"#FF4500\",\n \"orchid\": \"#DA70D6\",\n \"palegoldenrod\": \"#EEE8AA\",\n \"palegreen\": \"#98FB98\",\n \"paleturquoise\": \"#AFEEEE\",\n \"palevioletred\": \"#DB7093\",\n \"papayawhip\": \"#FFEFD5\",\n \"peachpuff\": \"#FFDAB9\",\n \"peru\": \"#CD853F\",\n \"pink\": \"#FFC0CB\",\n \"plum\": \"#DDA0DD\",\n \"powderblue\": \"#B0E0E6\",\n \"purple\": \"#800080\",\n \"rebeccapurple\": \"#663399\",\n \"red\": \"#FF0000\",\n \"rosybrown\": \"#BC8F8F\",\n \"royalblue\": \"#4169E1\",\n \"saddlebrown\": \"#8B4513\",\n \"salmon\": \"#FA8072\",\n \"sandybrown\": \"#F4A460\",\n \"seagreen\": \"#2E8B57\",\n \"seashell\": \"#FFF5EE\",\n \"sienna\": \"#A0522D\",\n \"silver\": \"#C0C0C0\",\n \"skyblue\": \"#87CEEB\",\n \"slateblue\": \"#6A5ACD\",\n \"slategray\": \"#708090\",\n \"slategrey\": \"#708090\",\n \"snow\": \"#FFFAFA\",\n \"springgreen\": \"#00FF7F\",\n \"steelblue\": \"#4682B4\",\n \"tan\": \"#D2B48C\",\n \"teal\": \"#008080\",\n \"thistle\": \"#D8BFD8\",\n \"tomato\": \"#FF6347\",\n \"turquoise\": \"#40E0D0\",\n \"violet\": \"#EE82EE\",\n \"wheat\": \"#F5DEB3\",\n \"white\": \"#FFFFFF\",\n \"whitesmoke\": \"#F5F5F5\",\n \"yellow\": \"#FFFF00\",\n \"yellowgreen\": \"#9ACD32\",\n}" }, { "identifier": "Geometry", "path": "langsuite/shapes.py", "snippet": "class Geometry:\n def __init__(self) -> None:\n self.shapey_geo = None\n\n def __repr__(self) -> str:\n return \"\"" }, { "identifier": "Point2D", "path": "langsuite/shapes.py", "snippet": "class Point2D(Geometry):\n def __init__(self, *args) -> None:\n if len(args) > 2:\n raise TypeError(f\"Point2D takes at most 2 arguements ({len(args)} given)\")\n elif len(args) == 2:\n self.x, self.y = float(args[0]), float(args[1])\n elif len(args) == 1:\n if isinstance(args[0], Point2D) or isinstance(args[0], Point):\n self.x, self.y = args[0].x, args[0].y\n elif type(args[0]) in [list, tuple, np.ndarray] and len(args[0]) == 2:\n self.x, self.y = args[0][:2]\n else:\n raise TypeError(\n f\"Unsupport argument type for Point2D ({type(args[0])} given)\"\n )\n else:\n raise TypeError(\"Point2D takes at least 1 argument\")\n self.shapely_geo = Point(self.x, self.y)\n\n @property\n def modulus(self) -> float:\n return math.sqrt(self.x**2 + self.y**2)\n\n def __add__(self, other):\n return Point2D(self.x + other.x, self.y + other.y)\n\n def __sub__(self, other):\n return Point2D(self.x - other.x, self.y - other.y)\n\n def __mul__(self, other: float):\n return Point2D(self.x * other, self.y * other)\n\n def __truediv__(self, other: float):\n if other == 0.0:\n raise RuntimeError(\"Div Zero in Point2D\")\n return Point2D(self.x / other, self.y / other)\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, Point2D):\n return False\n return self.x == other.x and self.y == other.y\n\n def __str__(self) -> str:\n return f\"({self.x}, {self.y})\"\n\n def to_wkt(self) -> str:\n return self.shapely_geo.wkt\n\n def to_numpy(self) -> np.ndarray:\n return np.array([self.x, self.y], dtype=np.float32)\n\n def rotate(self, angle, center, use_radians=False):\n \"\"\"Rotation of Polygon2D geometry\n Refers to https://shapely.readthedocs.io/en/stable/manual.html#shapely.affinity.rotate\n\n Args:\n angle: degrees or radians by setting `use_radians=True`\n origin: (x0, y0)\n\n \"\"\"\n if isinstance(center, Point2D):\n center = (center.x, center.y)\n # TODO\n self.shapely_geo = shapely.affinity.rotate(\n self.shapely_geo, angle, center, use_radians\n )\n self.x = self.shapely_geo.x\n self.y = self.shapely_geo.y" }, { "identifier": "Polygon2D", "path": "langsuite/shapes.py", "snippet": "class Polygon2D(Geometry):\n def __init__(\n self,\n coords: List[Union[Point2D, Tuple[float, float]]],\n holes: Optional[List[Union[Point2D, Tuple[float, float]]]] = None,\n ) -> None:\n self.coords = [Point2D(c) for c in coords]\n self.holes = [] if holes is None else [Point2D(c) for c in holes]\n self.shapely_geo = Polygon(\n shell=[c.shapely_geo for c in self.coords],\n holes=[c.shapely_geo for c in self.holes],\n )\n\n def __repr__(self) -> str:\n return \"{\" + \", \".join([str(c) for c in self.coords]) + \"}\"\n\n @property\n def area(self) -> float:\n return self.shapely_geo.area\n\n @property\n def is_closed(self) -> bool:\n return len(self.coords) > 1 and self.coords[-1] == self.coords[0]\n\n @property\n def length(self) -> float:\n return self.shapely_geo.length\n\n @property\n def centroid(self) -> Point2D:\n return Point2D(self.shapely_geo.centroid)\n\n @property\n def x_min(self) -> float:\n return np.min([c.x for c in self.coords])\n\n @property\n def x_max(self) -> float:\n return np.max([c.x for c in self.coords])\n\n @property\n def y_min(self) -> float:\n return np.min([c.y for c in self.coords])\n\n @property\n def y_max(self) -> float:\n return np.max([c.y for c in self.coords])\n\n @property\n def xy(self):\n return self.shapely_geo.exterior.xy\n\n def intersects(self, other) -> bool:\n return self.shapely_geo.intersects(other.shapely_geo)\n\n def rotate(self, angle, origin=\"center\", use_radians=False):\n \"\"\"Rotation of Polygon2D geometry\n Refers to https://shapely.readthedocs.io/en/stable/manual.html#shapely.affinity.rotate\n\n Args:\n angle: degrees or radians by setting `use_radians=True`\n origin: ['center', 'centroid', (x0, y0)]\n\n \"\"\"\n if isinstance(origin, Point2D):\n origin = (origin.x, origin.y)\n self.shapely_geo = shapely.affinity.rotate(\n self.shapely_geo, angle, origin, use_radians\n )\n self.coords = [Point2D(c) for c in self.shapely_geo.exterior.coords]\n\n def to_wkt(self) -> str:\n \"\"\"Well-known text representation of geometry\n https://en.wikipedia.org/wiki/Well-known_text_representation_of_geometry\n\n Examples:\n POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))\n POLYGON ((35 10, 45 45, 15 40, 10 20, 35 10), (20 30, 35 35, 30 20, 20 30))\n\n \"\"\"\n return self.shapely_geo.wkt\n\n def to_numpy(self) -> np.array:\n return (\n np.array([p.to_numpy() for p in self.coords[:-1]])\n if self.is_closed\n else np.array([p.to_numpy() for p in self.coords])\n )\n\n def contains(self, other) -> bool:\n \"\"\"Returns True if a Point or a Polygon is contained by the current Polygon\n Args:\n other: Point2D or Polygon2D\n\n Returns:\n a boolean value\n \"\"\"\n if not isinstance(other, Polygon2D) and not isinstance(other, Point2D):\n raise TypeError(\n f\"contains only support Polygon2D or Point2D ({type(other)} given)\"\n )\n return self.shapely_geo.contains(other.shapely_geo)" }, { "identifier": "logger", "path": "langsuite/utils/logging.py", "snippet": "class Logger:\n def __init__(\n self,\n log_level: int = logging.DEBUG,\n log_file: str = \"\",\n use_cmd: bool = False,\n console_logging=True,\n ) -> None:\n def has_cmdline_interface(self):\n def setLevel(self, level):\n def set_cmd_client(self, cmd_cli: CMDClient, disable_console_logging=True):\n def set_log_file(self, log_file):\n def close(self):\n def info(self, msg):\n def debug(self, msg):\n def error(self, msg):\n def warn(self, msg):\n def user_input(self):\n def emit(self, message):\n def robot_emit(self, message_or_streamer, name=\"Robot\", action=\"chat\"):" }, { "identifier": "WORLD_REGISTRY", "path": "langsuite/world.py", "snippet": "WORLD_REGISTRY = Registry(\"world\")" }, { "identifier": "Door", "path": "langsuite/world.py", "snippet": "class Door(Object2D):\n def __init__(\n self,\n door_id: str,\n *,\n alias: Optional[str] = None,\n geometry: Optional[Polygon2D] = None,\n asset_id: Optional[str] = None,\n room2room: Tuple[str] = [],\n openable: bool = True,\n is_open: bool = True,\n **kwargs,\n ):\n super().__init__(\n ObjectType.DOOR,\n door_id,\n alias=alias,\n geometry=geometry,\n asset_id=asset_id,\n **kwargs,\n )\n self.room2room = room2room\n self.openable = openable\n self.is_open = is_open\n self.wall = None\n self.chilren_types = []" }, { "identifier": "Object2D", "path": "langsuite/world.py", "snippet": "class Object2D:\n def __init__(\n self,\n obj_type: ObjectType,\n id: str,\n *,\n alias: Optional[str] = None,\n geometry: Optional[Geometry] = None,\n asset_id: Optional[str] = None,\n **kwargs,\n ) -> None:\n self.id = id\n self.asset_id = asset_id\n self.alias = alias\n self.obj_type = obj_type\n self.geometry = geometry\n self.props = dict()\n for k, val in kwargs.items():\n self.props[k] = val\n\n self.walls = defaultdict()\n self.doors = defaultdict()\n self.windows = defaultdict()\n if \"children\" in self.props:\n self.children = self.props[\"children\"]\n else:\n self.children = defaultdict()\n self.chilren_types = [ObjectType.OBJECT]\n\n @classmethod\n def create(cls, obj_data):\n return NotImplementedError()\n\n def __repr__(self) -> str:\n obj_string = f\"asset_id: {self.asset_id}\"\n return obj_string\n\n def contains(self, other) -> bool:\n \"\"\"Returns True is another object is in current object\n\n Args:\n other: Object2D: an object instance\n \"\"\"\n if not isinstance(other, Object2D):\n return ValueError(\n f\"Invalid input: other has to be of type Object ({type(other)} given)\"\n )\n if other.obj_type not in self.chilren_types:\n return False\n if other.obj_type == ObjectType.WALL:\n return other.id in self.walls.keys()\n elif other.obj_type == ObjectType.DOOR:\n return other.id in self.doors.keys()\n elif other.obj_type == ObjectType.WINDOW:\n return other.id in self.windows.keys()\n elif other.obj_type == ObjectType.OBJECT:\n return other.id in self.children.keys()\n else:\n raise ValueError(f\"Invalid input: {type(other)}.\")\n\n def add_wall(self, wall) -> Optional[str]:\n if ObjectType.WALL not in self.chilren_types:\n raise ValueError(f\"Unable to add type {wall.obj_type}\")\n if wall.id in self.wall:\n return wall.id\n self.walls[wall.id] = wall\n return wall.id\n\n def add_door(self, door) -> Optional[str]:\n if ObjectType.DOOR not in self.chilren_types:\n raise ValueError(f\"Unable to add type {door.obj_type}\")\n if door.id in self.doors:\n return door.id\n self.doors[door.id] = door\n return door.id\n\n def add_window(self, window) -> Optional[str]:\n if ObjectType.WINDOW not in self.chilren_types:\n raise ValueError(f\"Unable to add type {window.obj_type}\")\n\n if window.id in self.windows:\n return window.id\n self.windows[window.id] = window\n return window.id\n\n def add_object(self, object) -> Optional[str]:\n if ObjectType.OBJECT not in self.chilren_types:\n raise ValueError(f\"Unable to add type {object.obj_type}\")\n\n if object.id in self.children:\n return object.id\n self.children[object.id] = object\n return object.id\n\n def update_position(self, position):\n diff = position - self.position\n coords = []\n for i in range(len(self.geometry.coords)):\n coords.append(self.geometry.coords[i] + diff)\n self.geometry = Polygon2D(coords)\n self.position = position" }, { "identifier": "ObjectType", "path": "langsuite/world.py", "snippet": "class ObjectType(Enum):\n OBJECT = 1\n ROOM = 2\n WALL = 3\n WINDOW = 4\n DOOR = 5" }, { "identifier": "Room", "path": "langsuite/world.py", "snippet": "class Room(Object2D):\n def __init__(\n self,\n room_id: str,\n *,\n alias: Optional[str] = None,\n geometry: Optional[Polygon2D] = None,\n asset_id: Optional[str] = None,\n **kwargs,\n ):\n super().__init__(\n ObjectType.ROOM,\n room_id,\n alias=alias,\n geometry=geometry,\n asset_id=asset_id,\n **kwargs,\n )\n self.chilren_types = [\n ObjectType.OBJECT,\n ObjectType.DOOR,\n ObjectType.WINDOW,\n ObjectType.WALL,\n ]" }, { "identifier": "Wall", "path": "langsuite/world.py", "snippet": "class Wall(Object2D):\n def __init__(\n self,\n wall_id: str,\n *,\n alias: Optional[str],\n geometry: Optional[Geometry],\n asset_id: Optional[str],\n room2room: Union[Tuple[str], str] = [],\n **kwargs,\n ):\n super().__init__(\n ObjectType.WALL,\n wall_id,\n alias=alias,\n geometry=geometry,\n asset_id=asset_id,\n **kwargs,\n )\n self.chilren_types = [ObjectType.OBJECT, ObjectType.DOOR, ObjectType.WINDOW]\n self.room2room = [room2room] if type(room2room) == str else room2room" }, { "identifier": "Window", "path": "langsuite/world.py", "snippet": "class Window(Object2D):\n def __init__(\n self,\n window_id: str,\n *,\n alias: Optional[str] = None,\n geometry: Optional[Polygon2D] = None,\n asset_id: Optional[str] = None,\n room2room: Tuple[str] = [],\n **kwargs,\n ):\n super().__init__(\n ObjectType.WINDOW,\n window_id,\n alias=alias,\n geometry=geometry,\n asset_id=asset_id,\n **kwargs,\n )\n self.room2room = room2room\n self.chilren_types = []" }, { "identifier": "World", "path": "langsuite/world.py", "snippet": "class World:\n def __init__(self, world_id: str):\n self.world_id = world_id\n self.rooms: Dict[str, Room] = dict()\n self.walls: Dict[str, Wall] = dict()\n self.doors: Dict[str, Door] = dict()\n self.windows: Dict[str, Window] = dict()\n self.objects: Dict[str, Object2D] = dict()\n self.grid_size = None\n self.room_polygons = None\n self.id2object = {}\n\n @classmethod\n def create(cls, world_cfg):\n world_type = world_cfg.get(\"type\")\n if world_type is None or len(world_type) == 0:\n raise ValueError(\"World type must be provided to create a world.\")\n\n if WORLD_REGISTRY.hasRegistered(world_type):\n return WORLD_REGISTRY.get(world_type).create(world_cfg)\n else:\n raise NotImplementedError(f\"World type {world_type} not found.\")\n\n def add_room(self, room: Room) -> Optional[str]:\n return NotImplementedError()" } ]
import copy import math import random import numpy as np import plotly.graph_objects as go from collections import defaultdict from pathlib import Path from typing import Any, Dict, Optional, Tuple, Union from langsuite.constants import CSS4_COLORS from langsuite.shapes import Geometry, Point2D, Polygon2D from langsuite.utils.logging import logger from langsuite.world import ( WORLD_REGISTRY, Door, Object2D, ObjectType, Room, Wall, Window, World, )
7,253
alias: Optional[str] = None, geometry: Optional[Polygon2D] = None, class_name: Optional[str] = None, room2room: Tuple[str] = ..., openable: bool = True, is_open: bool = True, walls: Tuple[str] = ..., **kwargs, ): super().__init__( door_id, alias=alias, geometry=geometry, class_name=class_name, room2room=room2room, openable=openable, is_open=is_open, **kwargs, ) self.walls = walls self.class_name = class_name @classmethod def create(cls, door): is_open = door.get("openness", 1) == 1 openable = door.get("openable", False) polys_2d = Polygon2D(door["polygon"]) room2room = [door["room0"], door["room1"]] class_name = door["class_name"] # "wall|3|10.14|3.38|15.21|3.38" return cls( door["id"], room2room=room2room, is_open=is_open, openable=openable, class_name=class_name, geometry=polys_2d, ) def flip(self) -> None: """Flip doors wrt. wall attribute""" if len(self.walls) > 1 and "exterior" not in self.walls[1]: # Do not flip if the door is connected to outside. wall0, wall1 = self.walls self.walls = [wall1, wall0] self.room2room = [self.room2room[1], self.room2room[0]] def set_open(self, open=True): self.is_open = open def plot(self, axes=None): if self.geometry is None: return x, y = self.geometry.shapely_geo.xy axes.plot(x, y, color="green", linewidth=3) def render(self, fig=None): if self.geometry is None: return if not fig: fig = go.Figure() x, y = self.geometry.shapely_geo.exterior.xy fig.add_shape( type="rect", xref="x", yref="y", x0=self.geometry.x_min, y0=self.geometry.y_min, x1=self.geometry.x_max, y1=self.geometry.y_max, opacity=0.2, fillcolor="lightgreen", line=dict(width=0), ) class CwahWindow(Window): def __init__( self, window_id: str, *, alias: Optional[str] = None, geometry: Optional[Geometry] = None, class_name: Optional[str] = None, room2room: Tuple[str] = ..., walls: Tuple[str] = ..., **kwargs, ): super().__init__( window_id, alias=alias, geometry=geometry, class_name=class_name, room2room=room2room, **kwargs, ) self.walls = walls self.class_name = class_name @classmethod def create(cls, window): room2room = [window["room0"], window["room1"]] polys_2d = Polygon2D(window["polygon"]) return cls( window["id"], geometry=polys_2d, room2room=room2room, class_name=window["class_name"], ) def plot(self, axes=None): if self.geometry is None: return x, y = self.geometry.shapely_geo.xy axes.plot(x, y, color="blue", linewidth=5)
# Copyright (c) BIGAI Research. All rights reserved. # Licensed under the MIT license. from __future__ import annotations CwahPath = Path(__file__).parent def ToEulerAngles(q): sinp = 2 * (q[3] * q[1] - q[0] * q[2]) sinp = int(sinp) pitch = math.asin(sinp) return pitch def get_bbox(center, size): minx = center[0] - (1 / 2) * size[0] maxx = center[0] + (1 / 2) * size[0] minz = center[2] - (1 / 2) * size[2] maxz = center[2] + (1 / 2) * size[2] return [[minx, minz], [minx, maxz], [maxx, maxz], [maxx, minz]] class CwahWall(Wall): def __init__( self, wall_id: str, *, alias: Optional[str] = None, geometry: Optional[Geometry] = None, class_name: Optional[str] = None, room2room: Union[Tuple[str], str] = list(), empty: bool, **kwargs, ): super().__init__( wall_id, alias=alias, geometry=geometry, class_name=class_name, asset_id="not_exist", room2room=room2room, **kwargs, ) self.empty = empty self.class_name = class_name @classmethod def create(cls, wall_data): polys_2d = Polygon2D(wall_data["polygon"]) empty = wall_data.get("empty", False) return cls( wall_data["id"], geometry=polys_2d, class_name=wall_data["class_name"], props=wall_data, empty=empty, ) def plot(self, axes=None): if self.geometry is None: return x, y = self.geometry.shapely_geo.exterior.xy if self.empty: axes.plot(x, y, color="black", linestyle="-.", linewidth=0.5) else: axes.plot(x, y, color="black", linewidth=0.5) axes.fill(x, y, color="gray") def render(self, fig=None): if self.geometry is None: return if not fig: fig = go.Figure() x, y = self.geometry.shapely_geo.exterior.xy fig.add_shape( type="rect", xref="x", yref="y", x0=self.geometry.x_min, y0=self.geometry.y_min, x1=self.geometry.x_max, y1=self.geometry.y_max, opacity=0.2, fillcolor="black", line=dict(width=0), ) class CwahDoor(Door): def __init__( self, door_id: str, *, alias: Optional[str] = None, geometry: Optional[Polygon2D] = None, class_name: Optional[str] = None, room2room: Tuple[str] = ..., openable: bool = True, is_open: bool = True, walls: Tuple[str] = ..., **kwargs, ): super().__init__( door_id, alias=alias, geometry=geometry, class_name=class_name, room2room=room2room, openable=openable, is_open=is_open, **kwargs, ) self.walls = walls self.class_name = class_name @classmethod def create(cls, door): is_open = door.get("openness", 1) == 1 openable = door.get("openable", False) polys_2d = Polygon2D(door["polygon"]) room2room = [door["room0"], door["room1"]] class_name = door["class_name"] # "wall|3|10.14|3.38|15.21|3.38" return cls( door["id"], room2room=room2room, is_open=is_open, openable=openable, class_name=class_name, geometry=polys_2d, ) def flip(self) -> None: """Flip doors wrt. wall attribute""" if len(self.walls) > 1 and "exterior" not in self.walls[1]: # Do not flip if the door is connected to outside. wall0, wall1 = self.walls self.walls = [wall1, wall0] self.room2room = [self.room2room[1], self.room2room[0]] def set_open(self, open=True): self.is_open = open def plot(self, axes=None): if self.geometry is None: return x, y = self.geometry.shapely_geo.xy axes.plot(x, y, color="green", linewidth=3) def render(self, fig=None): if self.geometry is None: return if not fig: fig = go.Figure() x, y = self.geometry.shapely_geo.exterior.xy fig.add_shape( type="rect", xref="x", yref="y", x0=self.geometry.x_min, y0=self.geometry.y_min, x1=self.geometry.x_max, y1=self.geometry.y_max, opacity=0.2, fillcolor="lightgreen", line=dict(width=0), ) class CwahWindow(Window): def __init__( self, window_id: str, *, alias: Optional[str] = None, geometry: Optional[Geometry] = None, class_name: Optional[str] = None, room2room: Tuple[str] = ..., walls: Tuple[str] = ..., **kwargs, ): super().__init__( window_id, alias=alias, geometry=geometry, class_name=class_name, room2room=room2room, **kwargs, ) self.walls = walls self.class_name = class_name @classmethod def create(cls, window): room2room = [window["room0"], window["room1"]] polys_2d = Polygon2D(window["polygon"]) return cls( window["id"], geometry=polys_2d, room2room=room2room, class_name=window["class_name"], ) def plot(self, axes=None): if self.geometry is None: return x, y = self.geometry.shapely_geo.xy axes.plot(x, y, color="blue", linewidth=5)
class CwahRoom(Room):
9
2023-11-01 01:47:00+00:00
12k
radekd91/inferno
inferno/datasets/LRS3Pseudo3DDM.py
[ { "identifier": "LRS3DataModule", "path": "inferno/datasets/LRS3DataModule.py", "snippet": "class LRS3DataModule(FaceVideoDataModule):\nclass LRS3Dataset(VideoDatasetBase):\n def __init__(self, root_dir, output_dir, \n processed_subfolder=None, \n face_detector='mediapipe', \n # landmarks_from='sr_res',\n landmarks_from=None,\n face_detector_threshold=0.9, \n image_size=224, scale=1.25, \n batch_size_train=16,\n batch_size_val=16,\n batch_size_test=16,\n sequence_length_train=16,\n sequence_length_val=16,\n sequence_length_test=16,\n # occlusion_length_train=0,\n # occlusion_length_val=0,\n # occlusion_length_test=0,\n occlusion_settings_train=None,\n occlusion_settings_val=None,\n occlusion_settings_test=None,\n split = \"original\",\n num_workers=4,\n device=None,\n augmentation=None,\n drop_last=True,\n include_processed_audio = True,\n include_raw_audio = True,\n preload_videos=False,\n inflate_by_video_size=False,\n landmark_types = None,\n landmark_sources=None,\n segmentation_source=None,\n segmentation_type =None,\n return_mica_images=False,\n ):\n def prepare_data(self):\n def _filename2index(self, filename):\n def _get_landmark_method(self):\n def _get_segmentation_method(self):\n def _detect_faces(self):\n def _gather_data(self, exist_ok=True):\n def _video_supername(self, sequence_id):\n def _video_set(self, sequence_id):\n def _get_path_to_sequence_files(self, sequence_id, file_type, method=\"\", suffix=\"\", assert_=True): \n def _get_path_to_sequence_restored(self, sequence_id, method=\"\", suffix=\"\"):\n def _get_restoration_network(self, method):\n def _get_jpeg_network(self):\n def _get_superres_network(self, method=\"swin_ir\"):\n def _deep_restore_sequence(self, sequence_id, nets, input_videos = None, output_subfolder = None, \n batch_size=16, resize_to_original=True):\n def _deep_restore_sequence_sr_res(self, sequence_id):\n def _get_num_shards(self, videos_per_shard): \n def _process_video(self, idx, extract_audio=True, restore_videos=True, \n detect_landmarks=True, segment_videos=True, reconstruct_faces=False, \n recognize_emotions=False,):\n def _process_shard(self, videos_per_shard, shard_idx, extract_audio=True,\n restore_videos=True, detect_landmarks=True, segment_videos=True, reconstruct_faces=False,\n recognize_emotions=False,\n ):\n def _get_subsets(self, set_type=None):\n def get_single_video_dataset(self, i):\n def setup(self, stage=None):\n def train_sampler(self):\n def train_dataloader(self):\n def val_dataloader(self):\n def test_dataloader(self):\n def __init__(self,\n root_path,\n output_dir,\n video_list, \n video_metas,\n video_indices,\n # audio_paths, \n audio_metas,\n sequence_length,\n audio_noise_prob=0.0,\n stack_order_audio=4,\n audio_normalization=\"layer_norm\",\n landmark_types=None, \n segmentation_type = \"bisenet\",\n landmark_source = \"original\",\n segmentation_source = \"original\",\n occlusion_length=0,\n occlusion_probability_mouth = 0.0,\n occlusion_probability_left_eye = 0.0,\n occlusion_probability_right_eye = 0.0,\n occlusion_probability_face = 0.0,\n image_size=None, \n transforms : imgaug.augmenters.Augmenter = None,\n hack_length=False,\n use_original_video=True,\n include_processed_audio = True,\n include_raw_audio = True,\n temporal_split_start=None,\n temporal_split_end=None,\n preload_videos=False,\n inflate_by_video_size=False,\n include_filename=False, # if True includes the filename of the video in the sample\n read_video=True,\n read_audio=True,\n reconstruction_type=None,\n return_global_pose = False,\n return_appearance = False,\n average_shape_decode = True,\n emotion_type=None,\n return_emotion_feature=False,\n return_mica_images=False,\n ) -> None:\ndef to_torch(what):\ndef stacker(feats, stack_order):" }, { "identifier": "bbox2point", "path": "inferno/datasets/ImageDatasetHelpers.py", "snippet": "def bbox2point(left, right, top, bottom, type='bbox'):\n ''' bbox from detector and landmarks are different\n '''\n if type == 'kpt68':\n old_size = (right - left + bottom - top) / 2 * 1.1\n center_x = right - (right - left) / 2.0\n center_y = bottom - (bottom - top) / 2.0\n # center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0])\n elif type == 'bbox':\n old_size = (right - left + bottom - top) / 2\n center_x = right - (right - left) / 2.0 \n center_y = bottom - (bottom - top) / 2.0 + old_size * 0.12\n # center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0 + old_size * 0.12])\n elif type == \"mediapipe\":\n old_size = (right - left + bottom - top) / 2 * 1.1\n center_x = right - (right - left) / 2.0 \n center_y = bottom - (bottom - top) / 2.0\n # center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0])\n else:\n raise NotImplementedError(f\" bbox2point not implemented for {type} \")\n if isinstance(center_x, np.ndarray):\n center = np.stack([center_x, center_y], axis=1)\n else: \n center = np.array([center_x, center_y])\n return old_size, center" }, { "identifier": "bbpoint_warp", "path": "inferno/datasets/ImageDatasetHelpers.py", "snippet": "def bbpoint_warp(image, center, size, target_size_height, target_size_width=None, output_shape=None, inv=True, landmarks=None, \n order=3 # order of interpolation, bicubic by default\n ):\n target_size_width = target_size_width or target_size_height\n tform = point2transform(center, size, target_size_height, target_size_width)\n tf = tform.inverse if inv else tform\n output_shape = output_shape or (target_size_height, target_size_width)\n dst_image = warp(image, tf, output_shape=output_shape, order=order)\n if landmarks is None:\n return dst_image\n # points need the matrix\n if isinstance(landmarks, np.ndarray):\n assert isinstance(landmarks, np.ndarray)\n tf_lmk = tform if inv else tform.inverse\n dst_landmarks = tf_lmk(landmarks[:, :2])\n elif isinstance(landmarks, list): \n tf_lmk = tform if inv else tform.inverse\n dst_landmarks = [] \n for i in range(len(landmarks)):\n dst_landmarks += [tf_lmk(landmarks[i][:, :2])]\n elif isinstance(landmarks, dict): \n tf_lmk = tform if inv else tform.inverse\n dst_landmarks = {}\n for key, value in landmarks.items():\n dst_landmarks[key] = tf_lmk(landmarks[key][:, :2])\n else: \n raise ValueError(\"landmarks must be np.ndarray, list or dict\")\n return dst_image, dst_landmarks" }, { "identifier": "ConditionedVideoTestDatasetWrapper", "path": "inferno/datasets/ConditionedVideoTestDatasetWrapper.py", "snippet": "class ConditionedVideoTestDatasetWrapper(torch.utils.data.Dataset): \n\n def __init__(self,\n dataset : VideoDatasetBase,\n condition_source, \n condition_settings, \n key_prefix = \"\",\n conditioned_intensity = None, # defaults to 3 if needed and not provided\n ): \n self.dataset = dataset\n self.condition_source = condition_source or \"original\"\n self.condition_settings = condition_settings or None\n self.expand_temporal = True\n self.condition_prefix = key_prefix\n \n if self.condition_source == \"original\":\n self.condition_settings = None\n elif self.condition_source == \"expression\":\n if self.condition_settings is None: \n self.condition_settings = list(range(8)) \n \n for i, cond in enumerate(self.condition_settings):\n if isinstance(cond, str):\n self.condition_settings[i] = AffectNetExpressions[cond]\n if self.condition_settings[i] is None or self.condition_settings[i] > 7:\n raise ValueError(f\"Invalid basic expression {cond}\")\n assert isinstance(self.condition_settings, list), \"Condition_settings must be a list of integers\"\n\n elif self.condition_source in [\"gt_expression\", \"gt_expression_intensity\", \"gt_expression_intensity_identity\"]:\n if self.condition_settings is None: \n self.condition_settings = list(range(8)) \n \n for i, cond in enumerate(self.condition_settings):\n if isinstance(cond, str):\n self.condition_settings[i] = AffectNetExpressions[cond]\n if self.condition_settings[i] is None or self.condition_settings[i] > 7:\n raise ValueError(f\"Invalid basic expression {cond}\")\n \n self.conditioned_intensity = conditioned_intensity or 3\n assert isinstance(self.condition_settings, list), \"Condition_settings must be a list of integers\"\n\n elif self.condition_source == \"valence_arousal\":\n if isinstance(self.condition_settings, list):\n self.valence = np.array([self.condition_settings[0]])\n self.arousal = np.array([self.condition_settings[1]])\n else:\n if self.condition_settings is None: \n self.va_step_size = 0.25 \n elif isinstance(self.condition_settings, float):\n self.va_step_size = self.condition_settings\n else:\n raise ValueError(\"Condition settings must be a list or a float when using valence_arousal as source.\")\n # create grid of valence and arousal\n self.valence = np.arange(-1, 1+self.va_step_size, self.va_step_size)\n self.arousal = np.arange(-1, 1+self.va_step_size, self.va_step_size)\n \n elif self.condition_source == \"ravdess_expression\":\n if self.condition_settings is None: \n self.condition_settings = list(range(8)) \n assert isinstance(self.condition_settings, list), \"Condition_settings must be a list of integers\"\n\n elif self.condition_source == \"iemocap_expression\":\n if self.condition_settings is None: \n self.condition_settings = list(range(4)) \n \n assert isinstance(self.condition_settings, list), \"Condition_settings must be a list of integers\"\n\n else:\n raise ValueError(\"Condition source must be either original, expression or valence_arousal or original\")\n\n def __len__(self):\n if self.condition_source == \"expression\":\n return len(self.dataset) * len(self.condition_settings)\n if self.condition_source in [\"gt_expression\", \"gt_expression_intensity\", \"gt_expression_intensity_identity\"]:\n return len(self.dataset) * len(self.condition_settings)\n elif self.condition_source == \"valence_arousal\":\n return len(self.dataset) * len(self.valence) * len(self.arousal)\n elif self.condition_source == \"original\":\n return len(self.dataset)\n elif self.condition_source == \"ravdess_expression\": # ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition \n # {'angry': 0, 'calm': 1, 'disgust': 2, 'fearful': 3, 'happy': 4, 'neutral': 5, 'sad': 6, 'surprised': 7} \n return len(self.dataset) * 8\n elif self.condition_source == 'iemocap_expression': # superb/wav2vec2-base-superb-er\n # {0: 'neu', 1: 'hap', 2: 'ang', 3: 'sad'} \n return len(self.dataset) * 4\n raise NotImplementedError(f\"Condition source {self.condition_source} not implemented\")\n\n def __getitem__(self, index):\n if self.condition_source == \"expression\":\n video_index = index // len(self.condition_settings)\n expression_index = index % len(self.condition_settings)\n sample = self.dataset[video_index]\n sample[self.condition_prefix + \"expression\"] = torch.nn.functional.one_hot(torch.tensor(expression_index), len(self.condition_settings)).to(torch.float32)\n # hack for when the conditioning comes from a video emotion net during training and hence needs to be inserted for conditioned generation here\n sample[\"gt_emotion_video_logits\"] = {}\n cam = \"front\" # ugly hack\n sample[\"gt_emotion_video_logits\"][cam] = {}\n sample[\"gt_emotion_video_logits\"][cam] = - (1.-sample[self.condition_prefix + \"expression\"].clone()) * 99\n sample[\"condition_name\"] = AffectNetExpressions(expression_index).name\n elif self.condition_source in [\"gt_expression\", \"gt_expression_intensity\", \"gt_expression_intensity_identity\"]:\n video_index = index // len(self.condition_settings)\n expression_index = index % len(self.condition_settings)\n sample = self.dataset[video_index]\n # sample[self.condition_prefix + \"expression_label\"] = torch.nn.functional.one_hot(torch.tensor(expression_index), len(self.condition_settings)).to(torch.float32)\n sample[self.condition_prefix + \"expression_label\"] = torch.tensor(expression_index)\n if self.condition_source == \"gt_expression_intensity\":\n # intensity = 3 # highest intensity (1 gets subtracted and then one-hot encoded but this happens in the styling module)\n intensity = 3 if self.conditioned_intensity is None else self.conditioned_intensity\n sample[self.condition_prefix + \"expression_intensity\"] = torch.nn.functional.one_hot(torch.tensor(intensity), 3).to(torch.float32)\n elif self.condition_source == \"gt_expression_intensity_identity\":\n # sample[self.condition_prefix + \"expression_intensity\"] = torch.nn.functional.one_hot(torch.tensor(intensity), 3).to(torch.float32)\n # intensity = 3 # highest intensity (1 gets subtracted and then one-hot encoded but this happens in the styling module)\n intensity = 3 if self.conditioned_intensity is None else self.conditioned_intensity\n sample[self.condition_prefix + \"expression_intensity\"] = torch.tensor(intensity)\n identity = 0 # just use the style of the first identity\n # n_identities = len(self.dataset.identity_labels)\n # sample[self.condition_prefix + \"expression_identity\"] = torch.nn.functional.one_hot(torch.tensor(identity), n_identities).to(torch.float32)\n sample[self.condition_prefix + \"expression_identity\"] = torch.tensor(identity)\n sample[\"condition_name\"] = AffectNetExpressions(expression_index).name\n # hack for when the conditioning comes from a video emotion net during training and hence needs to be inserted for conditioned generation here\n sample[\"gt_emotion_video_logits\"] = {}\n cam = \"front\" # ugly hack\n sample[\"gt_emotion_video_logits\"][cam] = {}\n sample[\"gt_emotion_video_logits\"] = sample[self.condition_prefix + \"expression_label\"].clone()\n sample[\"gt_expression_label\"] = sample[self.condition_prefix + \"expression_label\"].clone()\n if self.condition_source == \"gt_expression_intensity\":\n sample[\"condition_name\"] += f\"_int_{intensity}\"\n elif self.condition_source == \"gt_expression_intensity_identity\":\n sample[\"condition_name\"] += f\"_int_{intensity}_id_{identity}\"\n elif self.condition_source == \"valence_arousal\":\n video_index = index // (len(self.valence) * len(self.arousal))\n va_index = index % (len(self.valence) * len(self.arousal))\n valence_index = va_index // len(self.arousal)\n arousal_index = va_index % len(self.arousal)\n # sample = self.dataset._getitem(video_index)\n sample = self.dataset[video_index]\n sample[self.condition_prefix + \"valence\"] = torch.tensor(self.valence[valence_index], dtype=torch.float32)\n sample[self.condition_prefix + \"arousal\"] = torch.tensor(self.arousal[arousal_index], dtype=torch.float32)\n sample[\"condition_name\"] = f\"valence_{self.valence[valence_index]:0.2f}_arousal_{self.arousal[arousal_index]:0.2f}\"\n \n elif self.condition_source == \"original\":\n video_index = index\n sample = self.dataset[video_index]\n return sample\n elif self.condition_source == \"ravdess_expression\":\n exp_dict = {0: 'angry', 1: 'calm', 2: 'disgust', 3: 'fearful', 4: 'happy', 5:'neutral', 6:'sad', 7: 'surprised'} \n video_index = index // len(exp_dict)\n expression_index = index % len(exp_dict)\n sample = self.dataset[video_index]\n sample[self.condition_prefix + \"expression\"] = torch.nn.functional.one_hot(torch.tensor(expression_index), len(self.exp_dict)).to(torch.float32)\n sample[\"condition_name\"] = exp_dict[expression_index] \n \n elif self.condition_source == \"iemocap_expression\":\n exp_dict = {0: 'neu', 1: 'hap', 2: 'ang', 'sad': 3} \n video_index = index // len(exp_dict)\n expression_index = index % len(exp_dict)\n sample = self.dataset[video_index]\n sample[self.condition_prefix + \"expression\"] = torch.nn.functional.one_hot(torch.tensor(expression_index), len(self.exp_dict)).to(torch.float32)\n sample[\"condition_name\"] = exp_dict[expression_index] \n else:\n raise NotImplementedError(f\"Condition source '{self.condition_source}' not implemented\")\n \n try:\n T = sample[\"raw_audio\"].size(0)\n except KeyError:\n try:\n T = sample[\"video\"].size(0)\n except KeyError:\n T = sample[\"gt_vertices\"].size(0)\n if self.expand_temporal: \n if self.condition_source in [\"expression\", \"iemocap_expression\", \"ravdess_expression\"]:\n if self.condition_prefix + \"expression\" in sample:\n sample[self.condition_prefix + \"expression\"] = sample[self.condition_prefix + \"expression\"][None, ...].repeat(T, 1)\n elif self.condition_source == \"valence_arousal\":\n if self.condition_prefix + \"valence\" in sample:\n sample[self.condition_prefix + \"valence\"] = sample[self.condition_prefix + \"valence\"][None, ...].repeat(T, 1)\n if self.condition_prefix + \"arousal\" in sample:\n sample[self.condition_prefix + \"arousal\"] = sample[self.condition_prefix + \"arousal\"][None, ...].repeat(T, 1)\n # TODO: expression intensity \n elif self.condition_source == \"gt_expression\":\n if self.condition_prefix + \"expression_label\" in sample:\n sample[self.condition_prefix + \"expression_label\"] = sample[self.condition_prefix + \"expression_label\"][None, ...].repeat(T)\n elif self.condition_source == \"gt_expression_intensity\":\n if self.condition_prefix + \"expression_label\" in sample:\n sample[self.condition_prefix + \"expression_label\"] = sample[self.condition_prefix + \"expression_label\"][None, ...].repeat(T)\n if self.condition_prefix + \"expression_intensity\" in sample:\n sample[self.condition_prefix + \"expression_intensity\"] = sample[self.condition_prefix + \"expression_intensity\"][None, ...].repeat(T)\n elif self.condition_source == \"gt_expression_intensity_identity\":\n if self.condition_prefix + \"expression_label\" in sample:\n sample[self.condition_prefix + \"expression_label\"] = sample[self.condition_prefix + \"expression_label\"][None, ...].repeat(T)\n if self.condition_prefix + \"expression_intensity\" in sample:\n sample[self.condition_prefix + \"expression_intensity\"] = sample[self.condition_prefix + \"expression_intensity\"][None, ...].repeat(T)\n if self.condition_prefix + \"expression_identity\" in sample:\n sample[self.condition_prefix + \"expression_identity\"] = sample[self.condition_prefix + \"expression_identity\"][None, ...].repeat(T)\n else:\n raise NotImplementedError(f\"Condition source '{self.condition_source}' not implemented\")\n sample[\"condition_name\"] = [sample[\"condition_name\"] ] * T\n # add video name to sample\n # sample[\"video_name\"] = str(self.dataset.video_list[video_index])\n return sample" } ]
from inferno.datasets.LRS3DataModule import LRS3DataModule, LRS3Dataset, robust_collate from inferno.datasets.ImageDatasetHelpers import bbox2point, bbpoint_warp from inferno.datasets.ConditionedVideoTestDatasetWrapper import ConditionedVideoTestDatasetWrapper from pathlib import Path import imgaug import numpy as np import torch import omegaconf import time
7,705
# landmark_source=self.landmark_sources, # segmentation_source=self.segmentation_source, temporal_split_start=self.temporal_split[0] + self.temporal_split[1] if self.temporal_split is not None else None, temporal_split_end= sum(self.temporal_split) if self.temporal_split is not None else None, # preload_videos=self.preload_videos, # inflate_by_video_size=self.inflate_by_video_size, inflate_by_video_size=False, include_filename=True, read_video=self.read_video, read_audio=self.read_audio, reconstruction_type=self.reconstruction_type, return_global_pose=self.return_global_pose, return_appearance=self.return_appearance, average_shape_decode=self.average_shape_decode, emotion_type=self.emotion_type, return_emotion_feature=self.return_emotion_feature, ) self.test_set_cond = ConditionedVideoTestDatasetWrapper( self.test_set_cond_, self.test_condition_source, self.test_condition_settings, key_prefix="gt_", ) max_training_test_samples = 2 self.test_set_train_cond_ = LRS3Pseudo3dDataset(self.root_dir, self.output_dir, self.video_list, self.video_metas, sorted(train)[:max_training_test_samples], self.audio_metas, # sequence_length=self.sequence_length_test, sequence_length="all", image_size=self.image_size, **self.occlusion_settings_test, hack_length=False, # use_original_video=self.use_original_video, include_processed_audio = self.include_processed_audio, include_raw_audio = self.include_raw_audio, # landmark_types=self.landmark_types, # landmark_source=self.landmark_sources, # segmentation_source=self.segmentation_source, temporal_split_start= 0 if self.temporal_split is not None else None, temporal_split_end=self.temporal_split[0] if self.temporal_split is not None else None, # preload_videos=self.preload_videos, # inflate_by_video_size=self.inflate_by_video_size, inflate_by_video_size=False, include_filename=True, read_video=self.read_video, read_audio=self.read_audio, reconstruction_type=self.reconstruction_type, return_global_pose=self.return_global_pose, return_appearance=self.return_appearance, average_shape_decode=self.average_shape_decode, emotion_type=self.emotion_type, return_emotion_feature=self.return_emotion_feature, ) self.test_set_train_cond = ConditionedVideoTestDatasetWrapper( self.test_set_train_cond_, self.test_condition_source, self.test_condition_settings, key_prefix="gt_", ) max_validation_test_samples = 2 self.test_set_val_cond_ = LRS3Pseudo3dDataset(self.root_dir, self.output_dir, self.video_list, self.video_metas, sorted(val)[:max_validation_test_samples], self.audio_metas, # sequence_length=self.sequence_length_test, sequence_length="all", image_size=self.image_size, **self.occlusion_settings_val, hack_length=False, # use_original_video=self.use_original_video, include_processed_audio = self.include_processed_audio, include_raw_audio = self.include_raw_audio, # landmark_types=self.landmark_types, # landmark_source=self.landmark_sources, # segmentation_source=self.segmentation_source, temporal_split_start=self.temporal_split[0] if self.temporal_split is not None else None, temporal_split_end= self.temporal_split[0] + self.temporal_split[1] if self.temporal_split is not None else None, # preload_videos=self.preload_videos, inflate_by_video_size=False, include_filename=True, read_video=self.read_video, read_audio=self.read_audio, reconstruction_type=self.reconstruction_type, return_global_pose=self.return_global_pose, return_appearance=self.return_appearance, average_shape_decode=self.average_shape_decode, emotion_type=self.emotion_type, return_emotion_feature=self.return_emotion_feature, ) self.test_set_val_cond = ConditionedVideoTestDatasetWrapper( self.test_set_val_cond_, self.test_condition_source, self.test_condition_settings, key_prefix="gt_", ) def test_dataloader(self): test_dls = [] test_dl = super().test_dataloader() if test_dl is not None: if not isinstance(test_dl, list): test_dl = [test_dl] test_dls += test_dl self.test_set_names += ["test"] test_dls += [torch.utils.data.DataLoader(self.test_set_train, shuffle=False, # num_workers=self.num_workers, num_workers=0, pin_memory=True, batch_size=self.batch_size_test, drop_last=False, # drop_last=self.drop_last,
class LRS3Pseudo3DDM(LRS3DataModule): def __init__(self, root_dir, output_dir, processed_subfolder=None, face_detector='mediapipe', # landmarks_from='sr_res', landmarks_from=None, face_detector_threshold=0.9, image_size=224, scale=1.25, batch_size_train=16, batch_size_val=16, batch_size_test=16, sequence_length_train=16, sequence_length_val=16, sequence_length_test=16, # occlusion_length_train=0, # occlusion_length_val=0, # occlusion_length_test=0, occlusion_settings_train=None, occlusion_settings_val=None, occlusion_settings_test=None, split = "original", num_workers=4, device=None, augmentation=None, drop_last=True, include_processed_audio = True, include_raw_audio = True, test_condition_source=None, test_condition_settings=None, inflate_by_video_size=False, preload_videos=False, read_video=True, read_audio=True, reconstruction_type=None, return_global_pose= False, return_appearance= False, average_shape_decode= True, emotion_type=None, return_emotion_feature=False, ): super().__init__(root_dir, output_dir, processed_subfolder, face_detector, landmarks_from, face_detector_threshold, image_size, scale, batch_size_train, batch_size_val, batch_size_test, sequence_length_train, sequence_length_val, sequence_length_test, occlusion_settings_train, occlusion_settings_val, occlusion_settings_test, split, num_workers, device, augmentation, drop_last, include_processed_audio=include_processed_audio, include_raw_audio=include_raw_audio, inflate_by_video_size=inflate_by_video_size, preload_videos=preload_videos ) self.test_condition_source = test_condition_source or "original" self.test_condition_settings = test_condition_settings self.read_video = read_video self.read_audio = read_audio self.reconstruction_type = reconstruction_type if self.reconstruction_type is not None: if isinstance(self.reconstruction_type, str): self.reconstruction_type = [self.reconstruction_type] elif isinstance(self.reconstruction_type, omegaconf.listconfig.ListConfig): self.reconstruction_type = list(self.reconstruction_type) assert isinstance(self.reconstruction_type, list), "reconstruction_type must be a list or None" self.return_global_pose = return_global_pose self.return_appearance = return_appearance self.average_shape_decode = average_shape_decode self.emotion_type = emotion_type self.return_emotion_feature = return_emotion_feature def setup(self, stage=None): train, val, test = self._get_subsets(self.split) # training_augmenter = create_image_augmenter(self.image_size, self.augmentation) training_augmenter = None self.training_set = LRS3Pseudo3dDataset(self.root_dir, self.output_dir, self.video_list, self.video_metas, train, self.audio_metas, self.sequence_length_train, image_size=self.image_size, transforms=training_augmenter, **self.occlusion_settings_train, hack_length=False, # use_original_video=self.use_original_video, include_processed_audio = self.include_processed_audio, include_raw_audio = self.include_raw_audio, # landmark_types=self.landmark_types, # landmark_source=self.landmark_sources, # segmentation_source=self.segmentation_source, temporal_split_start= 0 if self.temporal_split is not None else None, temporal_split_end=self.temporal_split[0] if self.temporal_split is not None else None, preload_videos=self.preload_videos, inflate_by_video_size=self.inflate_by_video_size, read_video=self.read_video, read_audio=self.read_audio, reconstruction_type=self.reconstruction_type, return_global_pose=self.return_global_pose, return_appearance=self.return_appearance, average_shape_decode=self.average_shape_decode, emotion_type=self.emotion_type, return_emotion_feature=self.return_emotion_feature, ) self.validation_set = LRS3Pseudo3dDataset(self.root_dir, self.output_dir, self.video_list, self.video_metas, val, self.audio_metas, self.sequence_length_val, image_size=self.image_size, **self.occlusion_settings_val, hack_length=False, # use_original_video=self.use_original_video, include_processed_audio = self.include_processed_audio, include_raw_audio = self.include_raw_audio, # landmark_types=self.landmark_types, # landmark_source=self.landmark_sources, # segmentation_source=self.segmentation_source, temporal_split_start=self.temporal_split[0] if self.temporal_split is not None else None, temporal_split_end= self.temporal_split[0] + self.temporal_split[1] if self.temporal_split is not None else None, preload_videos=self.preload_videos, inflate_by_video_size=self.inflate_by_video_size, read_video=self.read_video, read_audio=self.read_audio, reconstruction_type=self.reconstruction_type, return_global_pose=self.return_global_pose, return_appearance=self.return_appearance, average_shape_decode=self.average_shape_decode, emotion_type=self.emotion_type, return_emotion_feature=self.return_emotion_feature, ) self.test_set_names = [] if len(test) > 0: self.test_set_ = LRS3Pseudo3dDataset(self.root_dir, self.output_dir, self.video_list, self.video_metas, test, self.audio_metas, # sequence_length=self.sequence_length_test, sequence_length="all", image_size=self.image_size, **self.occlusion_settings_test, hack_length=False, # use_original_video=self.use_original_video, include_processed_audio = self.include_processed_audio, include_raw_audio = self.include_raw_audio, # landmark_types=self.landmark_types, # landmark_source=self.landmark_sources, # segmentation_source=self.segmentation_source, temporal_split_start=self.temporal_split[0] + self.temporal_split[1] if self.temporal_split is not None else None, temporal_split_end= sum(self.temporal_split) if self.temporal_split is not None else None, # preload_videos=self.preload_videos, # inflate_by_video_size=self.inflate_by_video_size, inflate_by_video_size=False, include_filename=True, read_video=self.read_video, read_audio=self.read_audio, reconstruction_type=self.reconstruction_type, return_global_pose=self.return_global_pose, return_appearance=self.return_appearance, average_shape_decode=self.average_shape_decode, emotion_type=self.emotion_type, return_emotion_feature=self.return_emotion_feature, ) self.test_set = ConditionedVideoTestDatasetWrapper( self.test_set_, None, None, key_prefix="gt_", ) max_training_test_samples = 2 self.test_set_train_ = LRS3Pseudo3dDataset(self.root_dir, self.output_dir, self.video_list, self.video_metas, sorted(train)[:max_training_test_samples], self.audio_metas, # sequence_length=self.sequence_length_test, sequence_length="all", image_size=self.image_size, **self.occlusion_settings_test, hack_length=False, # use_original_video=self.use_original_video, include_processed_audio = self.include_processed_audio, include_raw_audio = self.include_raw_audio, # landmark_types=self.landmark_types, # landmark_source=self.landmark_sources, # segmentation_source=self.segmentation_source, temporal_split_start= 0 if self.temporal_split is not None else None, temporal_split_end=self.temporal_split[0] if self.temporal_split is not None else None, # preload_videos=self.preload_videos, # inflate_by_video_size=self.inflate_by_video_size, inflate_by_video_size=False, include_filename=True, read_video=self.read_video, read_audio=self.read_audio, reconstruction_type=self.reconstruction_type, return_global_pose=self.return_global_pose, return_appearance=self.return_appearance, average_shape_decode=self.average_shape_decode, emotion_type=self.emotion_type, return_emotion_feature=self.return_emotion_feature, ) self.test_set_train = ConditionedVideoTestDatasetWrapper( self.test_set_train_, None, None, key_prefix="gt_", ) max_validation_test_samples = 2 self.test_set_val_ = LRS3Pseudo3dDataset(self.root_dir, self.output_dir, self.video_list, self.video_metas, sorted(val)[:max_validation_test_samples], self.audio_metas, # sequence_length=self.sequence_length_test, sequence_length="all", image_size=self.image_size, **self.occlusion_settings_test, hack_length=False, # use_original_video=self.use_original_video, include_processed_audio = self.include_processed_audio, include_raw_audio = self.include_raw_audio, # landmark_types=self.landmark_types, # landmark_source=self.landmark_sources, # segmentation_source=self.segmentation_source, temporal_split_start=self.temporal_split[0] if self.temporal_split is not None else None, temporal_split_end= self.temporal_split[0] + self.temporal_split[1] if self.temporal_split is not None else None, # preload_videos=self.preload_videos, inflate_by_video_size=False, include_filename=True, read_video=self.read_video, read_audio=self.read_audio, reconstruction_type=self.reconstruction_type, return_global_pose=self.return_global_pose, return_appearance=self.return_appearance, average_shape_decode=self.average_shape_decode, emotion_type=self.emotion_type, return_emotion_feature=self.return_emotion_feature, ) self.test_set_val = ConditionedVideoTestDatasetWrapper( self.test_set_val_, None, None, key_prefix="gt_", ) # conditioned test set if self.test_condition_source != "original": if len(test) > 0: self.test_set_cond_ = LRS3Pseudo3dDataset(self.root_dir, self.output_dir, self.video_list, self.video_metas, test, self.audio_metas, # sequence_length=self.sequence_length_test, sequence_length="all", image_size=self.image_size, **self.occlusion_settings_test, hack_length=False, # use_original_video=self.use_original_video, include_processed_audio = self.include_processed_audio, include_raw_audio = self.include_raw_audio, # landmark_types=self.landmark_types, # landmark_source=self.landmark_sources, # segmentation_source=self.segmentation_source, temporal_split_start=self.temporal_split[0] + self.temporal_split[1] if self.temporal_split is not None else None, temporal_split_end= sum(self.temporal_split) if self.temporal_split is not None else None, # preload_videos=self.preload_videos, # inflate_by_video_size=self.inflate_by_video_size, inflate_by_video_size=False, include_filename=True, read_video=self.read_video, read_audio=self.read_audio, reconstruction_type=self.reconstruction_type, return_global_pose=self.return_global_pose, return_appearance=self.return_appearance, average_shape_decode=self.average_shape_decode, emotion_type=self.emotion_type, return_emotion_feature=self.return_emotion_feature, ) self.test_set_cond = ConditionedVideoTestDatasetWrapper( self.test_set_cond_, self.test_condition_source, self.test_condition_settings, key_prefix="gt_", ) max_training_test_samples = 2 self.test_set_train_cond_ = LRS3Pseudo3dDataset(self.root_dir, self.output_dir, self.video_list, self.video_metas, sorted(train)[:max_training_test_samples], self.audio_metas, # sequence_length=self.sequence_length_test, sequence_length="all", image_size=self.image_size, **self.occlusion_settings_test, hack_length=False, # use_original_video=self.use_original_video, include_processed_audio = self.include_processed_audio, include_raw_audio = self.include_raw_audio, # landmark_types=self.landmark_types, # landmark_source=self.landmark_sources, # segmentation_source=self.segmentation_source, temporal_split_start= 0 if self.temporal_split is not None else None, temporal_split_end=self.temporal_split[0] if self.temporal_split is not None else None, # preload_videos=self.preload_videos, # inflate_by_video_size=self.inflate_by_video_size, inflate_by_video_size=False, include_filename=True, read_video=self.read_video, read_audio=self.read_audio, reconstruction_type=self.reconstruction_type, return_global_pose=self.return_global_pose, return_appearance=self.return_appearance, average_shape_decode=self.average_shape_decode, emotion_type=self.emotion_type, return_emotion_feature=self.return_emotion_feature, ) self.test_set_train_cond = ConditionedVideoTestDatasetWrapper( self.test_set_train_cond_, self.test_condition_source, self.test_condition_settings, key_prefix="gt_", ) max_validation_test_samples = 2 self.test_set_val_cond_ = LRS3Pseudo3dDataset(self.root_dir, self.output_dir, self.video_list, self.video_metas, sorted(val)[:max_validation_test_samples], self.audio_metas, # sequence_length=self.sequence_length_test, sequence_length="all", image_size=self.image_size, **self.occlusion_settings_val, hack_length=False, # use_original_video=self.use_original_video, include_processed_audio = self.include_processed_audio, include_raw_audio = self.include_raw_audio, # landmark_types=self.landmark_types, # landmark_source=self.landmark_sources, # segmentation_source=self.segmentation_source, temporal_split_start=self.temporal_split[0] if self.temporal_split is not None else None, temporal_split_end= self.temporal_split[0] + self.temporal_split[1] if self.temporal_split is not None else None, # preload_videos=self.preload_videos, inflate_by_video_size=False, include_filename=True, read_video=self.read_video, read_audio=self.read_audio, reconstruction_type=self.reconstruction_type, return_global_pose=self.return_global_pose, return_appearance=self.return_appearance, average_shape_decode=self.average_shape_decode, emotion_type=self.emotion_type, return_emotion_feature=self.return_emotion_feature, ) self.test_set_val_cond = ConditionedVideoTestDatasetWrapper( self.test_set_val_cond_, self.test_condition_source, self.test_condition_settings, key_prefix="gt_", ) def test_dataloader(self): test_dls = [] test_dl = super().test_dataloader() if test_dl is not None: if not isinstance(test_dl, list): test_dl = [test_dl] test_dls += test_dl self.test_set_names += ["test"] test_dls += [torch.utils.data.DataLoader(self.test_set_train, shuffle=False, # num_workers=self.num_workers, num_workers=0, pin_memory=True, batch_size=self.batch_size_test, drop_last=False, # drop_last=self.drop_last,
collate_fn=robust_collate
0
2023-11-07 20:13:32+00:00
12k
hxz393/ConfigCenterComparer
ui/table_main.py
[ { "identifier": "COL_INFO", "path": "config/settings.py", "snippet": "COL_INFO = {\n \"name\": {\"col\": 0},\n \"group\": {\"col\": 1},\n \"key\": {\"col\": 2},\n \"pro_value\": {\"col\": 3},\n \"pro_time\": {\"col\": 4},\n \"pre_value\": {\"col\": 5},\n \"pre_time\": {\"col\": 6},\n \"test_value\": {\"col\": 7},\n \"test_time\": {\"col\": 8},\n \"dev_value\": {\"col\": 9},\n \"dev_time\": {\"col\": 10},\n \"consistency\": {\"col\": 11},\n \"skip\": {\"col\": 12},\n\n}" }, { "identifier": "COLOR_SKIP", "path": "config/settings.py", "snippet": "COLOR_SKIP = '#e0e0e0'" }, { "identifier": "COLOR_CONSISTENCY_FULLY", "path": "config/settings.py", "snippet": "COLOR_CONSISTENCY_FULLY = '#ccffcc'" }, { "identifier": "COLOR_CONSISTENCY_PARTIALLY", "path": "config/settings.py", "snippet": "COLOR_CONSISTENCY_PARTIALLY = '#bbddff'" }, { "identifier": "COLOR_EMPTY", "path": "config/settings.py", "snippet": "COLOR_EMPTY = '#ffdbcd'" }, { "identifier": "COLOR_DEFAULT", "path": "config/settings.py", "snippet": "COLOR_DEFAULT = '#ffffff'" }, { "identifier": "log_time", "path": "lib/log_time.py", "snippet": "def log_time(func: Callable) -> Callable:\n \"\"\"\n 一个装饰器,用于记录被装饰函数的运行时间。\n\n 此装饰器在函数执行前后记录时间,计算并记录函数的运行时间。如果函数执行期间出现异常,将记录异常并返回 None。\n\n :param func: 被装饰的函数。\n :type func: Callable\n :return: 包装后的函数。\n :rtype: Callable\n\n :example:\n >>> @log_time\n ... def test_function():\n ... time.sleep(1)\n ...\n >>> test_function() # 这将记录 test_function 的运行时间\n \"\"\"\n\n @wraps(func)\n def wrapper(*args, **kwargs) -> Any:\n \"\"\"\n 包装函数,用于实际执行被装饰的函数并计算其运行时间。\n\n 此函数首先记录开始时间,然后尝试执行原始函数,最后记录结束时间并计算运行时长。如果在执行过程中出现异常,会记录异常信息。\n\n :param args: 原始函数的位置参数。\n :param kwargs: 原始函数的关键字参数。\n :return: 原始函数的返回值,如果出现异常则返回 None。\n :rtype: Any\n \"\"\"\n start_time = time.time()\n try:\n result = func(*args, **kwargs)\n except Exception as e:\n logger.exception(f\"Exception occurred in {func.__name__}: {e}\")\n return None\n else:\n end_time = time.time()\n logger.debug(f\"{func.__name__} executed in {end_time - start_time:.2f} seconds.\")\n return result\n\n return wrapper" }, { "identifier": "ActionCopy", "path": "ui/action_copy.py", "snippet": "class ActionCopy(QObject):\n \"\"\"\n 实现表格数据的复制功能。\n\n 此类用于在表格界面中提供复制操作,允许用户复制选中的表格数据。\n\n :param lang_manager: 用于管理语言设置的对象。\n :type lang_manager: LangManager\n :param table: 表格对象,用于操作表格数据。\n :type table: QTableWidget\n \"\"\"\n status_updated = pyqtSignal(str)\n\n def __init__(self,\n lang_manager: LangManager,\n table: QTableWidget):\n super().__init__()\n self.lang_manager = lang_manager\n self.lang_manager.lang_updated.connect(self.update_lang)\n self.table = table\n self.initUI()\n\n def initUI(self) -> None:\n \"\"\"\n 初始化用户界面组件。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.action_copy = QAction(QIcon(get_resource_path('media/icons8-copy-26.png')), 'Copy')\n self.action_copy.setShortcut('Ctrl+C')\n self.action_copy.triggered.connect(self.copy_selected)\n self.update_lang()\n\n def update_lang(self) -> None:\n \"\"\"\n 更新界面语言设置。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.lang = self.lang_manager.get_lang()\n self.action_copy.setText(self.lang['ui.action_copy_1'])\n self.action_copy.setStatusTip(self.lang['ui.action_copy_2'])\n\n def copy_selected(self) -> Optional[str]:\n \"\"\"\n 执行复制选中的表格数据。\n\n 获取选中的表格范围,并将其中的数据格式化后复制到剪贴板。\n\n :rtype: Optional[str]\n :return: 复制的数据字符串,如果没有选中任何内容,则返回 None。\n \"\"\"\n try:\n selected_ranges = self.table.selectedRanges()\n if not selected_ranges:\n return None\n\n clipboard_data = self._format_selected_data(selected_ranges)\n QApplication.clipboard().setText(clipboard_data)\n logger.info(f\"Data copied, size: {len(clipboard_data)}\")\n return clipboard_data\n except Exception:\n logger.exception(\"Error during copying\")\n self.status_updated.emit(self.lang['label_status_error'])\n return None\n\n def _format_selected_data(self, selected_ranges: List[QTableWidgetSelectionRange]) -> str:\n \"\"\"\n 格式化选中的数据为字符串。\n\n 遍历选中的每个区域,提取并格式化数据。\n\n :param selected_ranges: 选中的表格区域列表。\n :type selected_ranges: List[QTableWidgetSelectionRange]\n :rtype: str\n :return: 格式化后的数据字符串。\n \"\"\"\n return '\\n'.join(\n data for selected_range in selected_ranges\n for data in self._extract_range_data(selected_range)\n ).strip()\n\n def _extract_range_data(self, selected_range: QTableWidgetSelectionRange) -> List[str]:\n \"\"\"\n 提取选中区域的数据。\n\n 对给定的表格区域,按行提取数据。\n\n :param selected_range: 选中的表格区域。\n :type selected_range: QTableWidgetSelectionRange\n :rtype: List[str]\n :return: 提取的行数据列表。\n \"\"\"\n return [\n '\\t'.join(self._extract_row_data(row, selected_range))\n for row in range(selected_range.topRow(), selected_range.bottomRow() + 1)\n if not self.table.isRowHidden(row)\n ]\n\n def _extract_row_data(self, row: int, selected_range: QTableWidgetSelectionRange) -> List[str]:\n \"\"\"\n 提取指定行的数据。\n\n 对给定行和列范围,提取每个单元格的文本。\n\n :param row: 行号。\n :type row: int\n :param selected_range: 选中的表格区域。\n :type selected_range: QTableWidgetSelectionRange\n :rtype: List[str]\n :return: 提取的单元格数据列表。\n \"\"\"\n return [\n self.table.item(row, col).text() if self.table.item(row, col) else ''\n for col in range(selected_range.leftColumn(), selected_range.rightColumn() + 1)\n if not self.table.isColumnHidden(col)\n ]" }, { "identifier": "ActionSave", "path": "ui/action_save.py", "snippet": "class ActionSave(QObject):\n \"\"\"\n 实现表格数据的保存功能。\n\n 此类用于在表格界面中提供保存操作,允许用户将表格数据保存到文件。\n\n :param lang_manager: 用于管理语言设置的对象。\n :type lang_manager: LangManager\n :param table: 表格对象,用于操作表格数据。\n :type table: QTableWidget\n \"\"\"\n status_updated = pyqtSignal(str)\n\n def __init__(self,\n lang_manager: LangManager,\n table: QTableWidget):\n super().__init__()\n self.lang_manager = lang_manager\n self.lang_manager.lang_updated.connect(self.update_lang)\n self.table = table\n self.initUI()\n\n def initUI(self) -> None:\n \"\"\"\n 初始化用户界面组件。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.action_save = QAction(QIcon(get_resource_path('media/icons8-save-26.png')), 'Save')\n self.action_save.setShortcut('Ctrl+S')\n self.action_save.triggered.connect(self.save_file)\n self.update_lang()\n\n def update_lang(self) -> None:\n \"\"\"\n 更新界面语言设置。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.lang = self.lang_manager.get_lang()\n self.action_save.setText(self.lang['ui.action_save_1'])\n self.action_save.setStatusTip(self.lang['ui.action_save_2'])\n\n def save_file(self) -> None:\n \"\"\"\n 触发保存文件的操作。\n\n 此方法弹出文件保存对话框,允许用户选择保存格式和位置,并执行保存操作。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n try:\n table_data = self._extract_table_data()\n if table_data is None:\n message_show('Critical', self.lang['ui.action_save_8'])\n return None\n\n file_name, file_type = QFileDialog.getSaveFileName(None, self.lang['ui.action_save_3'], \"\", \"CSV Files (*.csv);;JSON Files (*.json)\", options=QFileDialog.Options())\n if not file_name or not file_type:\n return None\n\n save_result = save_data_to_file(file_name, file_type, table_data)\n if save_result:\n self.status_updated.emit(self.lang['ui.action_save_5'])\n logger.info(f\"File saved to: '{file_name}', File size: {os.path.getsize(file_name):,} Bytes\")\n else:\n message_show('Critical', self.lang['ui.action_save_7'])\n except Exception:\n logger.exception(\"Error saving file\")\n self.status_updated.emit(self.lang['label_status_error'])\n\n def _extract_table_data(self) -> Optional[Dict[int, Dict[str, str]]]:\n \"\"\"\n 从表格中提取数据。\n\n 此方法遍历配置中心比较器的表格,提取不隐藏的行和列的数据。\n\n :return: 表格数据的字典,键为行号,值为该行的数据字典;如果提取失败,则返回None。\n :rtype: Optional[Dict[int, Dict[str, str]]]\n \"\"\"\n return {\n row: {\n self.table.horizontalHeaderItem(col).text(): self.table.item(row, col).text()\n for col in range(self.table.columnCount()) if not self.table.isColumnHidden(col)\n }\n for row in range(self.table.rowCount()) if not self.table.isRowHidden(row)\n }" }, { "identifier": "ActionSkip", "path": "ui/action_skip.py", "snippet": "class ActionSkip(QObject):\n \"\"\"\n 处理用户界面中忽略操作的类。\n\n :param lang_manager: 语言管理器,用于处理界面语言设置。\n :type lang_manager: LangManager\n :param config_manager: 配置管理器,用于管理应用配置。\n :type config_manager: ConfigManager\n :param table: 主表格界面对象。\n :type table: QTableWidget\n \"\"\"\n status_updated = pyqtSignal(str)\n filter_updated = pyqtSignal(list)\n\n def __init__(self,\n lang_manager: LangManager,\n config_manager: ConfigManager,\n table: QTableWidget):\n super().__init__()\n self.lang_manager = lang_manager\n self.lang_manager.lang_updated.connect(self.update_lang)\n self.config_manager = config_manager\n self.table = table\n self.initUI()\n\n def initUI(self) -> None:\n \"\"\"\n 初始化用户界面组件。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.action_skip = QAction(QIcon(get_resource_path('media/icons8-do-not-disturb-26.png')), 'Skip')\n self.action_skip.setShortcut('F4')\n self.action_skip.triggered.connect(self.skip_items)\n self.update_lang()\n\n def update_lang(self) -> None:\n \"\"\"\n 更新界面语言设置。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.lang = self.lang_manager.get_lang()\n self.action_skip.setText(self.lang['ui.action_skip_1'])\n self.action_skip.setStatusTip(self.lang['ui.action_skip_2'])\n\n def skip_items(self) -> None:\n \"\"\"\n 执行忽略选中项目的操作。\n\n 此方法负责更新忽略列表,并将其写入配置文件。同时更新配置管理器中的配置,并重新应用过滤器。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n try:\n updated_skip_list = self.update_skip_list()\n # 更新配置管理器中的配置\n self.config_manager.update_skip_list(updated_skip_list)\n # 重新应用过略器\n self.filter_updated.emit([item.row() for item in self.table.selectedItems()])\n # 发送到状态栏\n self.status_updated.emit(self.lang['ui.action_skip_3'])\n logger.info(f\"Items skipped. Skip list length: {len(updated_skip_list)}\")\n except Exception:\n logger.exception(\"Error occurred while skipping items\")\n self.status_updated.emit(self.lang['label_status_error'])\n\n def update_skip_list(self) -> List[str]:\n \"\"\"\n 更新忽略列表并应用颜色。\n\n 此方法遍历选中的项目,将它们添加到忽略列表。\n\n :rtype: List[str]\n :return: 更新后的忽略列表。\n \"\"\"\n # 获取配置\n skip_list = self.config_manager.get_skip_list()\n\n for item in self.table.selectedItems():\n row = item.row()\n self.update_table_item(row)\n skip_list.append(f\"{self.table.item(row, COL_INFO['name']['col']).text()}+{self.table.item(row, COL_INFO['group']['col']).text()}+{self.table.item(row, COL_INFO['key']['col']).text()}\")\n\n return list(set(skip_list))\n\n def update_table_item(self, row: int) -> None:\n \"\"\"\n 更新表格中指定行的项目。\n\n 此方法设置指定行的项目为“已忽略”。\n\n :param row: 要更新的行。\n :type row: int\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.table.item(row, COL_INFO['skip']['col']).setData(Qt.UserRole, \"yes\")\n self.table.item(row, COL_INFO['skip']['col']).setData(Qt.DisplayRole, self.lang['ui.action_start_12'])" }, { "identifier": "ActionUnskip", "path": "ui/action_unskip.py", "snippet": "class ActionUnskip(QObject):\n \"\"\"\n 处理用户界面中取消忽略操作的类。\n\n :param lang_manager: 语言管理器,用于处理界面语言设置。\n :type lang_manager: LangManager\n :param config_manager: 配置管理器,用于管理应用配置。\n :type config_manager: ConfigManager\n :param table: 主表格界面对象。\n :type table: QTableWidget\n \"\"\"\n status_updated = pyqtSignal(str)\n filter_updated = pyqtSignal(list)\n\n def __init__(self,\n lang_manager: LangManager,\n config_manager: ConfigManager,\n table: QTableWidget):\n super().__init__()\n self.lang_manager = lang_manager\n self.lang_manager.lang_updated.connect(self.update_lang)\n self.config_manager = config_manager\n self.table = table\n self.initUI()\n\n def initUI(self) -> None:\n \"\"\"\n 初始化用户界面组件。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.action_unskip = QAction(QIcon(get_resource_path('media/icons8-ok-26.png')), 'UnSkip')\n self.action_unskip.setShortcut('F5')\n self.action_unskip.triggered.connect(self.unskip_items)\n self.update_lang()\n\n def update_lang(self) -> None:\n \"\"\"\n 更新界面语言设置。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.lang = self.lang_manager.get_lang()\n self.action_unskip.setText(self.lang['ui.action_unskip_1'])\n self.action_unskip.setStatusTip(self.lang['ui.action_unskip_2'])\n\n def unskip_items(self) -> None:\n \"\"\"\n 执行取消忽略选中项目的操作。\n\n 此方法负责更新忽略列表,并将其写入配置文件。同时更新配置管理器中的配置,并重新应用过滤器。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n try:\n updated_skip_list = self.update_skip_list()\n # 更新配置管理器中的配置\n self.config_manager.update_skip_list(updated_skip_list)\n # 重新应用过略器\n self.filter_updated.emit([item.row() for item in self.table.selectedItems()])\n # 发送到状态栏\n self.status_updated.emit(self.lang['ui.action_unskip_3'])\n logger.info(f\"Items unskipped. Skip list length: {len(updated_skip_list)}\")\n except Exception:\n logger.exception(\"Error occurred while unskipping items\")\n self.status_updated.emit(self.lang['label_status_error'])\n\n def update_skip_list(self) -> list:\n \"\"\"\n 更新忽略列表并应用颜色。\n\n 此方法遍历选中的项目,将它们从忽略列表去除。\n\n :rtype: List[str]\n :return: 更新后的忽略列表。\n \"\"\"\n # 获取配置\n skip_list = self.config_manager.get_skip_list()\n selected_keys = []\n\n for item in self.table.selectedItems():\n row = item.row()\n self.update_table_item(row)\n selected_keys.append(f\"{self.table.item(row, COL_INFO['name']['col']).text()}+{self.table.item(row, COL_INFO['group']['col']).text()}+{self.table.item(row, COL_INFO['key']['col']).text()}\")\n\n return list(set([f for f in skip_list if f not in selected_keys]))\n\n def update_table_item(self, row: int) -> None:\n \"\"\"\n 更新表格中指定行的项目。\n\n 此方法设置指定行的项目为“不忽略”。\n\n :param row: 要更新的行。\n :type row: int\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.table.item(row, COL_INFO['skip']['col']).setData(Qt.UserRole, \"no\")\n self.table.item(row, COL_INFO['skip']['col']).setData(Qt.DisplayRole, self.lang['ui.action_start_11'])" }, { "identifier": "ConfigManager", "path": "ui/config_manager.py", "snippet": "class ConfigManager(QObject):\n \"\"\"\n 配置管理器类,负责管理和更新应用程序的配置信息。\n\n 该类包括获取和设置主配置、连接配置和跳过列表的方法,同时提供信号以通知配置更新。\n\n :ivar config_main_updated: 当主配置更新时发出的信号。\n :ivar config_connection_updated: 当连接配置更新时发出的信号。\n :ivar skip_list_updated: 当跳过列表更新时发出的信号。\n \"\"\"\n config_main_updated = pyqtSignal()\n config_connection_updated = pyqtSignal()\n skip_list_updated = pyqtSignal()\n\n def __init__(self):\n super().__init__()\n self._config_main, self._config_apollo, self._config_nacos = read_config_all()\n self._skip_list = read_file_to_list(CONFIG_SKIP_PATH) or []\n\n def get_config_main(self) -> Optional[Dict[str, str]]:\n \"\"\"\n 获取主配置的副本。\n\n :return: 包含主配置的字典,如果出现错误则返回 None。\n :rtype: Optional[Dict[str, str]]\n \"\"\"\n try:\n return copy.deepcopy(self._config_main)\n except Exception:\n logger.exception(\"Failed to get config_main.\")\n return None\n\n def get_config_connection(self) -> Optional[Dict[str, Dict[str, Union[Dict[str, str], bool]]]]:\n \"\"\"\n 根据当前配置中心获取连接配置的副本。\n\n :return: 包含连接配置的字典,如果出现错误则返回 None。\n :rtype: Optional[Dict[str, Dict[str, Union[Dict[str, str], bool]]]]\n \"\"\"\n try:\n if self._config_main['config_center'] == 'Apollo':\n return copy.deepcopy(self._config_apollo)\n else:\n return copy.deepcopy(self._config_nacos)\n except Exception:\n logger.exception(\"Failed to get config_connection.\")\n return None\n\n def get_skip_list(self) -> Optional[List[str]]:\n \"\"\"\n 获取忽略列表的副本。\n\n :return: 包含跳过项的列表,如果出现错误则返回 None。\n :rtype: Optional[List[str]]\n \"\"\"\n try:\n return copy.deepcopy(self._skip_list)\n except Exception:\n logger.exception(\"Failed to get skip_list.\")\n return None\n\n def update_config_main(self, new_config: Dict[str, str]) -> None:\n \"\"\"\n 更新主配置。\n\n :param new_config: 新的主配置。\n :type new_config: Dict[str, str]\n \"\"\"\n try:\n self._config_main = new_config\n self.config_main_updated.emit()\n write_dict_to_json(CONFIG_MAIN_PATH, new_config)\n logger.info(\"Config updated: config_main\")\n except Exception:\n logger.exception(\"Failed to update config: config_main\")\n\n def update_config_connection(self, new_config: Dict[str, Dict[str, Union[Dict[str, str], bool]]]) -> None:\n \"\"\"\n 更新连接配置。\n\n :param new_config: 新的连接配置。\n :type new_config: Dict[str, Dict[str, Union[Dict[str, str], bool]]]\n \"\"\"\n try:\n if self._config_main['config_center'] == 'Apollo':\n self._config_apollo = new_config\n write_dict_to_json(CONFIG_APOLLO_PATH, new_config)\n else:\n self._config_nacos = new_config\n write_dict_to_json(CONFIG_NACOS_PATH, new_config)\n self.config_connection_updated.emit()\n logger.info(\"Config updated: config_connection\")\n except Exception:\n logger.exception(\"Failed to update config: config_connection\")\n\n def update_skip_list(self, new_config: List[str]) -> None:\n \"\"\"\n 更新忽略列表。\n\n :param new_config: 新忽略列表。\n :type new_config: List[str]\n \"\"\"\n try:\n self._skip_list = new_config\n # 写入到配置文件\n self.skip_list_updated.emit()\n write_list_to_file(CONFIG_SKIP_PATH, new_config)\n logger.info(\"Config updated: skip_list\")\n except Exception:\n logger.exception(\"Failed to update config: skip_list\")" }, { "identifier": "LangManager", "path": "ui/lang_manager.py", "snippet": "class LangManager(QObject):\n \"\"\"\n 语言管理类,用于管理和更新应用程序的语言字典。\n\n 此类继承自 QObject,可发出语言更新的信号。它通过 `get_lang_dict` 函数获取当前语言字典,并提供了更新语言的功能。\n\n :ivar _lang_dict: 当前使用的语言字典。\n :vartype _lang_dict: dict\n \"\"\"\n lang_updated = pyqtSignal()\n\n def __init__(self):\n super().__init__()\n self._lang_dict = get_lang_dict()\n\n def get_lang(self) -> Optional[Dict[str, str]]:\n \"\"\"\n 获取当前使用的语言字典的副本。\n\n :return: 当前语言字典的深拷贝。\n :rtype: Optional[Dict[str, str]]\n \"\"\"\n try:\n return copy.deepcopy(self._lang_dict)\n except Exception:\n logger.exception(\"Failed to retrieve language dictionary.\")\n return None\n\n def update_lang(self, new_lang: str) -> None:\n \"\"\"\n 更新当前使用的语言字典。\n\n :param new_lang: 新语言的标识符。\n :type new_lang: str\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n try:\n self._lang_dict = LANG_DICTS.get(new_lang, \"English\")\n self.lang_updated.emit()\n logger.info(f\"Language changed to {new_lang}\")\n except Exception:\n logger.exception(f\"Failed to changed language to {new_lang}\")" } ]
import logging from typing import List, Optional, Dict from PyQt5.QtCore import Qt, QPoint, pyqtSignal from PyQt5.QtGui import QBrush, QColor, QKeyEvent from PyQt5.QtWidgets import QTableWidget, QTableWidgetItem, QMenu, QAction, QHeaderView from config.settings import COL_INFO, COLOR_SKIP, COLOR_CONSISTENCY_FULLY, COLOR_CONSISTENCY_PARTIALLY, COLOR_EMPTY, COLOR_DEFAULT from lib.log_time import log_time from ui.action_copy import ActionCopy from ui.action_save import ActionSave from ui.action_skip import ActionSkip from ui.action_unskip import ActionUnskip from ui.config_manager import ConfigManager from ui.lang_manager import LangManager
9,203
action.setData(index) action.triggered.connect(self._toggle_column_visibility) # 在鼠标右键点击位置显示菜单 menu.exec_(self.horizontalHeader().viewport().mapToGlobal(pos)) def _toggle_column_visibility(self) -> None: """ 根据用户选择,切换列的可见性。 此方法用于根据用户在上下文菜单中的选择,显示或隐藏特定的列。 :rtype: None :return: 无返回值。 """ action = self.sender() if isinstance(action, QAction): column_index = action.data() if action.isChecked(): self.showColumn(column_index) else: self.hideColumn(column_index) def add_row(self, data: List[List[str]]) -> None: """ 向表格中添加一行数据。 :param data: 要添加的数据列表,每个元素是一个列表,第一个元素代表显示的字符串,第二个元素代表附加数据。 :type data: List[List[str]] :rtype: None :return: 无返回值。 """ row_position = 0 try: # 获取最后行数 row_position = self.rowCount() # 插入最后一行 self.insertRow(row_position) # 插入单元格数据 self._fill_row_data(row_position, data) except Exception: logger.exception(f"Error occurred while adding a new row at position {row_position}") self.removeRow(row_position) def _fill_row_data(self, row_position: int, data: List[List[str]]) -> None: """ 填充指定行的数据。 :param row_position: 行位置 :param data: 行数据 :type row_position: int :type data: List[List[str]] :rtype: None :return: 无返回值。 """ for column, (display_text, user_data) in enumerate(data): # 默认设置显示字符串,也叫 Qt.DisplayRole。获取方法item.text() 或 item.data(Qt.DisplayRole) item = QTableWidgetItem(str(display_text)) # 设置实际数据,也叫 Qt.UserRole。获取方法 item.data(Qt.UserRole) item.setData(Qt.UserRole, user_data) # 设置单元格不可编辑状态 item.setFlags(item.flags() & ~Qt.ItemIsEditable) # 正常表格插入方法 self.setItem(row_position, column, item) @log_time def apply_color_to_table(self, rows: List[int] = None) -> None: """ 对整个表格进行着色。通常只有初始化时才不带rows参数,以应用到整表。 :param rows: 可选,要应用颜色的行号列表。 :type rows: List[int], optional :rtype: None :return: 无返回值。 """ color_switch = self.config_manager.get_config_main().get('color_set', 'ON') if color_switch == 'OFF': return if rows is None or not isinstance(rows, list): rows = range(self.rowCount()) try: for row in rows: # 不给隐藏行设置颜色 if self.isRowHidden(row): continue self._process_row_for_color(row) except Exception: logger.exception("Exception in apply_color_to_table method") self.status_updated.emit(self.lang['label_status_error']) def _process_row_for_color(self, row: int) -> None: """ 根据一致性、跳过状态和是否为空值给单行应用颜色。 :param row: 行号,对每行进行颜色处理。 :type row: int :rtype: None :return: 无返回值。 """ consistency_data = self.item(row, COL_INFO['consistency']['col']).data(Qt.UserRole) skip_data = self.item(row, COL_INFO['skip']['col']).data(Qt.UserRole) # 忽略状态为是时设置颜色 if skip_data == 'yes': self.apply_color(row, COLOR_SKIP) return # 根据一致性值设置颜色 if consistency_data == 'fully': self.apply_color(row, COLOR_CONSISTENCY_FULLY) elif consistency_data == 'partially': self.apply_color(row, COLOR_CONSISTENCY_PARTIALLY) else:
""" 此文件定义了 TableMain 类,一个基于 PyQt5 的 QTableWidget 的高级实现。 TableMain 类主要用于显示和管理表格数据,提供了多种扩展功能,包括语言国际化支持、动态配置管理、右键菜单操作等。 该类与多个辅助类(如 LangManager 和 ConfigManager)集成,实现了复杂的功能逻辑。 :author: assassing :contact: https://github.com/hxz393 :copyright: Copyright 2023, hxz393. 保留所有权利。 """ logger = logging.getLogger(__name__) class TableMain(QTableWidget): """ 主表格类,用于展示和管理数据行。 此类继承自 PyQt5 的 QTableWidget,提供了丰富的数据展示和管理功能。包括但不限于数据的展示、行的颜色标记、右键菜单功能以及快捷键支持。 通过与 LangManager 和 ConfigManager 的集成,支持动态语言切换和配置管理。 :param lang_manager: 用于管理界面语言的 LangManager 实例。 :type lang_manager: LangManager :param config_manager: 用于管理配置的 ConfigManager 实例。 :type config_manager: ConfigManager :author: assassing :contact: https://github.com/hxz393 :copyright: Copyright 2023, hxz393. 保留所有权利。 """ status_updated = pyqtSignal(str) filter_updated = pyqtSignal(list) def __init__(self, lang_manager: LangManager, config_manager: ConfigManager): super().__init__() self.lang_manager = lang_manager self.lang_manager.lang_updated.connect(self.update_lang) self.config_manager = config_manager # 实例化用到的组件 self.actionCopy = ActionCopy(self.lang_manager, self) self.actionSave = ActionSave(self.lang_manager, self) self.actionSkip = ActionSkip(self.lang_manager, self.config_manager, self) self.actionUnskip = ActionUnskip(self.lang_manager, self.config_manager, self) # 手动连接实例化的组件信号到转发函数 self.actionCopy.status_updated.connect(self.forward_status) self.actionSave.status_updated.connect(self.forward_status) self.actionSkip.status_updated.connect(self.forward_status) self.actionSkip.filter_updated.connect(self.forward_filter) self.actionUnskip.status_updated.connect(self.forward_status) self.actionUnskip.filter_updated.connect(self.forward_filter) self.initUI() def initUI(self) -> None: """ 初始化用户界面。 此方法负责设置表格的基本属性,如列数、表头标签、选择行为等。还包括对特定列的隐藏和宽度调整策略的设置。 :rtype: None :return: 无返回值。 """ # 先运行语言更新,里面有表头定义 self.update_lang() self.hidden_cols = ["pro_time", "pre_time", "test_time", "dev_time"] self.resize_cols = ["name", "group", "consistency", "skip"] # 配置表格基本属性 self.setColumnCount(len(self.column_headers)) self.setHorizontalHeaderLabels(self.column_headers) self.setEditTriggers(QTableWidget.NoEditTriggers) self.setSelectionBehavior(QTableWidget.SelectItems) # 隐藏垂直表头 self.verticalHeader().setVisible(False) # 启用自动换行,没生效 self.setWordWrap(True) self.setTextElideMode(Qt.ElideNone) # 为表头视图设置上下文菜单事件 self.horizontalHeader().setContextMenuPolicy(Qt.CustomContextMenu) self.horizontalHeader().customContextMenuRequested.connect(self._header_context_menu) # 为表单设置上下文菜单事件 self.setContextMenuPolicy(Qt.CustomContextMenu) self.customContextMenuRequested.connect(self._cell_context_menu) # 隐藏指定列 [self.hideColumn(COL_INFO[i]['col']) for i in self.hidden_cols] # 设置表宽度策略 self.set_header_resize() def set_header_resize(self): """ 设置表头的列宽度和调整策略。 此方法负责定义表头列的宽度调整策略和其他相关属性。它设置了表头列的默认宽度、是否可拖动以及列的自动调整策略。 例如,某些列被设置为根据内容自动调整宽度,而其他列则被设置为可伸缩以适应表格的大小。 :rtype: None :return: 无返回值。 """ # 设置默认列宽度,列宽调整策略,列可拖动 self.horizontalHeader().setSectionsMovable(True) self.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch) self.horizontalHeader().setMinimumSectionSize(100) # 设置要自动调整宽度的列 [self.horizontalHeader().setSectionResizeMode(COL_INFO[i]['col'], QHeaderView.ResizeToContents) for i in self.resize_cols] def update_lang(self) -> None: """ 更新界面语言设置。 :rtype: None :return: 无返回值。 """ self.lang = self.lang_manager.get_lang() self.column_headers = [ self.lang['ui.table_main_1'], self.lang['ui.table_main_2'], self.lang['ui.table_main_3'], self.lang['ui.dialog_settings_connection_2'], f"{self.lang['ui.dialog_settings_connection_2']} {self.lang['ui.table_main_4']}", self.lang['ui.dialog_settings_connection_3'], f"{self.lang['ui.dialog_settings_connection_3']} {self.lang['ui.table_main_4']}", self.lang['ui.dialog_settings_connection_4'], f"{self.lang['ui.dialog_settings_connection_4']} {self.lang['ui.table_main_4']}", self.lang['ui.dialog_settings_connection_5'], f"{self.lang['ui.dialog_settings_connection_5']} {self.lang['ui.table_main_4']}", self.lang['ui.table_main_5'], self.lang['ui.table_main_6'], ] # 重新应用到表头 self.setHorizontalHeaderLabels(self.column_headers) # 定义数据和显示映射的字典 consistency_status_mapping = { "inconsistent": self.lang['ui.action_start_8'], "fully": self.lang['ui.action_start_9'], "partially": self.lang['ui.action_start_10'], "unknown": self.lang['ui.action_start_13'], } skip_status_mapping = { "no": self.lang['ui.action_start_11'], "yes": self.lang['ui.action_start_12'], "unknown": self.lang['ui.action_start_13'], } for row in range(self.rowCount()): # 更新忽略状态文字 self._update_item_text(row, "skip", skip_status_mapping) # 更新一致性状态文字 self._update_item_text(row, "consistency", consistency_status_mapping) def _update_item_text(self, row: int, user_data_key: str, text_mapping: Dict[str, str]) -> None: """ 根据提供的文本映射更新指定行的项文本。 此方法用于更新表格或列表中特定行的文本。它根据用户数据键(user_data_key)获取对应行的项,然后根据提供的文本映射(text_mapping)更新该项的文本。 :param row: 要更新的行索引。 :type row: int :param user_data_key: 用于获取项的用户数据键。 :type user_data_key: str :param text_mapping: 用户数据到文本的映射字典。 :type text_mapping: Dict[str, str] :return: 无返回值。 :rtype: None """ item = self.item(row, COL_INFO[user_data_key]['col']) if item is not None: user_data = item.data(Qt.UserRole) if user_data in text_mapping: item.setText(text_mapping[user_data]) def keyPressEvent(self, event: QKeyEvent) -> None: """ 处理键盘事件。 此方法用于处理键盘事件,特别是复制功能的快捷键。如果按下 Ctrl+C,则复制选中的单元格内容。 :param event: 键盘事件对象。 :type event: QKeyEvent :rtype: None :return: 无返回值。 """ if event.key() == Qt.Key_C and (event.modifiers() & Qt.ControlModifier): self.actionCopy.action_copy() else: super().keyPressEvent(event) def _cell_context_menu(self, pos: QPoint) -> None: """ 实现表格单元格的右键菜单功能。 :param pos: 右键点击的位置。 :type pos: QPoint :rtype: None :return: 无返回值。 """ menu = QMenu(self) menu.addAction(self.actionCopy.action_copy) separator = QAction(menu) separator.setSeparator(True) menu.addAction(separator) menu.addAction(self.actionSkip.action_skip) menu.addAction(self.actionUnskip.action_unskip) sep = QAction(menu) sep.setSeparator(True) menu.addAction(sep) menu.addAction(self.actionSave.action_save) menu.exec_(self.viewport().mapToGlobal(pos)) def _header_context_menu(self, pos: QPoint) -> None: """ 实现表头的右键菜单功能。 :param pos: 右键点击的位置。 :type pos: QPoint :rtype: None :return: 无返回值。 """ menu = QMenu(self) # 动态创建一个菜单项,用于隐藏/显示列 for index in range(self.columnCount()): column_name = self.horizontalHeaderItem(index).text() action = menu.addAction(f"{column_name}") action.setCheckable(True) action.setChecked(not self.isColumnHidden(index)) action.setData(index) action.triggered.connect(self._toggle_column_visibility) # 在鼠标右键点击位置显示菜单 menu.exec_(self.horizontalHeader().viewport().mapToGlobal(pos)) def _toggle_column_visibility(self) -> None: """ 根据用户选择,切换列的可见性。 此方法用于根据用户在上下文菜单中的选择,显示或隐藏特定的列。 :rtype: None :return: 无返回值。 """ action = self.sender() if isinstance(action, QAction): column_index = action.data() if action.isChecked(): self.showColumn(column_index) else: self.hideColumn(column_index) def add_row(self, data: List[List[str]]) -> None: """ 向表格中添加一行数据。 :param data: 要添加的数据列表,每个元素是一个列表,第一个元素代表显示的字符串,第二个元素代表附加数据。 :type data: List[List[str]] :rtype: None :return: 无返回值。 """ row_position = 0 try: # 获取最后行数 row_position = self.rowCount() # 插入最后一行 self.insertRow(row_position) # 插入单元格数据 self._fill_row_data(row_position, data) except Exception: logger.exception(f"Error occurred while adding a new row at position {row_position}") self.removeRow(row_position) def _fill_row_data(self, row_position: int, data: List[List[str]]) -> None: """ 填充指定行的数据。 :param row_position: 行位置 :param data: 行数据 :type row_position: int :type data: List[List[str]] :rtype: None :return: 无返回值。 """ for column, (display_text, user_data) in enumerate(data): # 默认设置显示字符串,也叫 Qt.DisplayRole。获取方法item.text() 或 item.data(Qt.DisplayRole) item = QTableWidgetItem(str(display_text)) # 设置实际数据,也叫 Qt.UserRole。获取方法 item.data(Qt.UserRole) item.setData(Qt.UserRole, user_data) # 设置单元格不可编辑状态 item.setFlags(item.flags() & ~Qt.ItemIsEditable) # 正常表格插入方法 self.setItem(row_position, column, item) @log_time def apply_color_to_table(self, rows: List[int] = None) -> None: """ 对整个表格进行着色。通常只有初始化时才不带rows参数,以应用到整表。 :param rows: 可选,要应用颜色的行号列表。 :type rows: List[int], optional :rtype: None :return: 无返回值。 """ color_switch = self.config_manager.get_config_main().get('color_set', 'ON') if color_switch == 'OFF': return if rows is None or not isinstance(rows, list): rows = range(self.rowCount()) try: for row in rows: # 不给隐藏行设置颜色 if self.isRowHidden(row): continue self._process_row_for_color(row) except Exception: logger.exception("Exception in apply_color_to_table method") self.status_updated.emit(self.lang['label_status_error']) def _process_row_for_color(self, row: int) -> None: """ 根据一致性、跳过状态和是否为空值给单行应用颜色。 :param row: 行号,对每行进行颜色处理。 :type row: int :rtype: None :return: 无返回值。 """ consistency_data = self.item(row, COL_INFO['consistency']['col']).data(Qt.UserRole) skip_data = self.item(row, COL_INFO['skip']['col']).data(Qt.UserRole) # 忽略状态为是时设置颜色 if skip_data == 'yes': self.apply_color(row, COLOR_SKIP) return # 根据一致性值设置颜色 if consistency_data == 'fully': self.apply_color(row, COLOR_CONSISTENCY_FULLY) elif consistency_data == 'partially': self.apply_color(row, COLOR_CONSISTENCY_PARTIALLY) else:
self.apply_color(row, COLOR_DEFAULT)
5
2023-11-07 01:02:38+00:00
12k
pytorch-labs/ao
test/test.py
[ { "identifier": "DynamicallyPerAxisQuantizedLinear", "path": "torchao/quantization/dynamic_quant.py", "snippet": "class DynamicallyPerAxisQuantizedLinear(torch.nn.Linear):\n \"\"\"\n This class is a replacement for `torch.nn.Linear`. It implements a\n quantized matmul using int8 dynamic symmetric per-token activation,\n and int8 symmetric per-channel weight quantization\n \"\"\"\n\n def __init__(\n self,\n in_features: int,\n out_features: int,\n bias: bool = True,\n ) -> None:\n super().__init__(in_features, out_features, bias)\n\n def forward(self, X: torch.Tensor, *args, **kwargs) -> torch.Tensor:\n \"\"\"\n Performs the forward pass of the quantized linear layer which consists\n of int8 dynamic symmetric per-token activation and int8 symmetric per-channel weight\n quantization\n\n Args:\n X (torch.Tensor): The input floating point tensor to the quantized linear layer.\n\n Returns:\n torch.Tensor: The output floating point tensor after the quantized matmul and rescale.\n\n \"\"\"\n\n Y = quant_int8_dynamic_per_token_linear(\n X, self.W_int_repr_t, self.W_scales, self.bias, X.dtype\n )\n return Y\n\n @classmethod\n def from_float(\n cls, mod: torch.nn.Linear\n ) -> \"DynamicallyPerAxisQuantizedLinear\":\n \"\"\"\n Converts a `mod` of class `torch.nn.Linear` to the\n `DynamicallyPerAxisQuantizedLinear` class\n\n Args:\n mod (torch.nn.Linear): The original `torch.nn.Linear` module to convert.\n\n Returns:\n DynamicallyPerAxisQuantizedLinear: The converted quantized linear module.\n\n \"\"\"\n\n # create the new module with a toy size to ensure initialization is fast\n fake_in_features, fake_out_features = 8, 8\n new_mod = cls(\n fake_in_features,\n fake_out_features,\n bias=mod.bias is not None,\n )\n new_mod.in_features = mod.in_features\n new_mod.out_features = mod.out_features\n W_int_repr, W_scales, _W_zps = dynamically_quantize_per_channel(\n mod.weight, -128, 127, torch.int8\n )\n new_mod.register_buffer(\"W_int_repr_t\", W_int_repr.contiguous().t())\n new_mod.W_scales = nn.Parameter(W_scales)\n new_mod.bias = mod.bias\n del new_mod.weight\n\n device_to_use = next(mod.parameters()).device\n new_mod.to(device_to_use)\n return new_mod" }, { "identifier": "apply_dynamic_quant", "path": "torchao/quantization/quant_api.py", "snippet": "def apply_dynamic_quant(model, filter_fn=None):\n \"\"\"\n Applies dynamic symmetric per-token activation and per-channel weight\n quantization to all linear layers in the given model using\n module swaps.\n \"\"\"\n _replace_with_custom_fn_if_matches_filter(\n model,\n lambda mod: DynamicallyPerAxisQuantizedLinear.from_float(mod),\n _is_linear if filter_fn is None else filter_fn,\n )" }, { "identifier": "apply_weight_only_int8_quant", "path": "torchao/quantization/quant_api.py", "snippet": "def apply_weight_only_int8_quant(model, filter_fn=None):\n \"\"\"\n Applies weight-only symmetric per-channel int8 quantization to all linear layers\n in the given model using module swaps.\n \"\"\"\n _replace_with_custom_fn_if_matches_filter(\n model,\n WeightOnlyInt8QuantLinear.from_float,\n _is_linear if filter_fn is None else filter_fn,\n )" }, { "identifier": "change_linear_weights_to_int8_dqtensors", "path": "torchao/quantization/quant_api.py", "snippet": "def change_linear_weights_to_int8_dqtensors(model, filter_fn=None):\n \"\"\"\n Converts all linear weight tensors to the `Int8DynamicallyQuantizedLinearWeight`\n Tensor subclass, effectively applying the same form of quantization\n as apply_dynamic_quant while not modifying the linear modules.\n \"\"\"\n if filter_fn is None:\n filter_fn = (\n lambda *args:\n _is_linear(*args) and\n _in_features_greater_than_16(*args)\n )\n\n _replace_with_custom_fn_if_matches_filter(\n model,\n _get_subclass_inserter(Int8DynamicallyQuantizedLinearWeight),\n filter_fn\n )" }, { "identifier": "change_linear_weights_to_int8_woqtensors", "path": "torchao/quantization/quant_api.py", "snippet": "def change_linear_weights_to_int8_woqtensors(model, filter_fn=None):\n \"\"\"\n Converts all linear weight tensors to the\n `Int8WeightOnlyQuantizedLinearWeight` tensor subclass,\n effectively applying the same form of quantization\n as apply_dynamic_quant while not modifying the linear modules.\n \"\"\"\n _replace_with_custom_fn_if_matches_filter(\n model,\n _get_subclass_inserter(Int8WeightOnlyQuantizedLinearWeight),\n _is_linear if filter_fn is None else filter_fn,\n )" }, { "identifier": "change_linear_weights_to_int4_woqtensors", "path": "torchao/quantization/quant_api.py", "snippet": "def change_linear_weights_to_int4_woqtensors(model, **kwargs):\n \"\"\"\n Converts all linear weight tensors to the\n `Int4WeightOnlyQuantizedLinearWeight` tensor subclass,\n effectively applying the same form of quantization\n as apply_dynamic_quant while not modifying the linear modules.\n \"\"\"\n filter_fn = kwargs.pop(\"filter_fn\", _is_linear)\n\n _replace_with_custom_fn_if_matches_filter(\n model,\n _get_subclass_inserter(Int4WeightOnlyQuantizedLinearWeight, **kwargs),\n filter_fn,\n )" }, { "identifier": "_replace_with_custom_fn_if_matches_filter", "path": "torchao/quantization/quant_api.py", "snippet": "def _replace_with_custom_fn_if_matches_filter(\n model, replacement_fn, filter_fn, cur_fqn=\"\"\n) -> None:\n \"\"\"\n For each `child` in `model`, replaces it with `replacement_fn(child)`\n if `filter_fn(child)` is `True`\n \"\"\"\n if filter_fn(model, cur_fqn[:-1]):\n model = replacement_fn(model)\n return model\n else:\n for name, child in model.named_children():\n new_child = _replace_with_custom_fn_if_matches_filter(\n child, replacement_fn, filter_fn, f\"{cur_fqn}{name}.\"\n )\n if new_child is not child:\n setattr(model, name, new_child)\n return model" }, { "identifier": "dequantize_per_channel", "path": "torchao/quantization/quant_primitives.py", "snippet": "def dequantize_per_channel(int_repr, scales, zero_points, out_dtype=torch.float32):\n # assumes axis is 0\n y = int_repr.transpose(0, 1)\n y = y.to(out_dtype)\n y = y - zero_points\n y = y * scales\n y = y.transpose(0, 1)\n return y" }, { "identifier": "dequantize_per_tensor", "path": "torchao/quantization/quant_primitives.py", "snippet": "def dequantize_per_tensor(int_repr, scale, zero_point, out_dtype=torch.float32):\n y = int_repr.to(out_dtype)\n if zero_point is not None:\n y -= zero_point\n return y * scale" }, { "identifier": "dynamically_quantize_per_channel", "path": "torchao/quantization/quant_primitives.py", "snippet": "def dynamically_quantize_per_channel(x, quant_min, quant_max, target_dtype):\n # assumes symmetric quantization\n # assumes axis == 0\n # assumes dense memory format\n # TODO(future): relax ^ as needed\n\n # default setup for affine quantization of activations\n eps = torch.finfo(torch.float32).eps\n\n # get min and max\n min_val, max_val = torch.aminmax(x, dim=1)\n\n # calculate scale and zero point based on min and max\n # reference: https://fburl.com/code/srbiybme\n min_val_neg = torch.min(min_val, torch.zeros_like(min_val))\n max_val_pos = torch.max(max_val, torch.zeros_like(max_val))\n device = min_val_neg.device\n\n # reference: https://fburl.com/code/4wll53rk\n max_val_pos = torch.max(-min_val_neg, max_val_pos)\n scale = max_val_pos / (float(quant_max - quant_min) / 2)\n # ensure scale is the same dtype as the original tensor\n scale = torch.clamp(scale, min=eps).to(x.dtype)\n zero_point = torch.zeros(min_val_neg.size(), dtype=torch.int64, device=device)\n\n # quantize based on qmin/qmax/scale/zp\n # reference: torch/ao/quantization/fx/_decomposed.py?lines=63\n x_div = x.transpose(0, 1) / scale\n x_round = torch.round(x_div)\n x_zp = x_round + zero_point\n x_zp = x_zp.transpose(0, 1)\n quant = torch.clamp(x_zp, quant_min, quant_max).to(target_dtype)\n\n return quant, scale, zero_point" }, { "identifier": "dynamically_quantize_per_tensor", "path": "torchao/quantization/quant_primitives.py", "snippet": "def dynamically_quantize_per_tensor(\n x,\n quant_min,\n quant_max,\n target_dtype,\n qscheme=torch.per_tensor_affine, # for now, reuse existing qscheme enum\n):\n # assumes affine quantization\n\n # default setup for affine quantization of activations\n eps = torch.finfo(torch.float32).eps\n\n if qscheme == torch.per_tensor_affine:\n # get min and max\n # TODO(future): make torch.aminmax work on cpu-half\n # min_val, max_val = torch.aminmax(x)\n min_val = torch.min(x)\n max_val = torch.max(x)\n\n # calculate scale and zero point based on min and max\n # reference: https://fburl.com/code/srbiybme\n min_val_neg = torch.min(min_val, torch.zeros_like(min_val))\n max_val_pos = torch.max(max_val, torch.zeros_like(max_val))\n device = min_val_neg.device\n\n scale = (max_val_pos - min_val_neg) / float(quant_max - quant_min)\n # TODO(future): make torch.clamp with scalar work on cpu-half\n scale = torch.clamp(scale, min=eps).reshape(1)\n zero_point = quant_min - torch.round(min_val_neg / scale).to(torch.int)\n zero_point = torch.clamp(zero_point, quant_min, quant_max)\n\n # quantize based on qmin/qmax/scale/zp\n # reference: torch/ao/quantization/fx/_decomposed.py?lines=63\n quant = torch.clamp(\n torch.round(x / scale) + zero_point, quant_min, quant_max\n ).to(target_dtype)\n\n else:\n assert qscheme == torch.per_tensor_symmetric, f\"unsupported qscheme {qscheme}\"\n # assert quant_min == -1 * quant_max, \"unsupported quant_min/quant_max\"\n amax = torch.max(torch.abs(x))\n scale = amax / (float(quant_max - quant_min) / 2)\n scale = torch.clamp(scale, min=eps).reshape(1)\n quant = torch.clamp(torch.round(x / scale), quant_min, quant_max).to(\n target_dtype\n )\n # do not create a tensor for zero_point as this is expensive\n zero_point = None\n\n return quant, scale, zero_point" }, { "identifier": "quant_int8_dynamic_linear", "path": "torchao/quantization/quant_primitives.py", "snippet": "def quant_int8_dynamic_linear(\n x,\n x_quant_min,\n x_quant_max,\n x_q_dtype,\n w_vals_int8_t,\n w_scales,\n w_vals_int8_t_sums_int64,\n bias,\n out_dtype=torch.float32,\n):\n # like F.linear, but with int8 dynamic quantization of activation,\n # and a quantized weight\n x_vals_int8, x_scale, x_zp = dynamically_quantize_per_tensor(\n x, x_quant_min, x_quant_max, x_q_dtype\n )\n # w_vals_int8_t_sums_int64 = w_vals_int8_t.sum(dim=0)\n mm_out = quant_int8_matmul(\n x_vals_int8,\n x_scale,\n x_zp,\n w_vals_int8_t,\n w_vals_int8_t_sums_int64,\n w_scales,\n out_dtype,\n )\n if bias is not None:\n mm_out += bias\n return mm_out" }, { "identifier": "quant_int8_dynamic_per_token_linear", "path": "torchao/quantization/quant_primitives.py", "snippet": "def quant_int8_dynamic_per_token_linear(\n x,\n w_vals_int8_t,\n w_scales,\n bias,\n out_dtype,\n):\n # like F.linear, but with int8 dynamic quantization of activation,\n # and a quantized weight\n x_vals_int8, x_scales = quantize_activation_per_token_absmax(x)\n mm_out = quant_int8_per_token_matmul(\n x_vals_int8, x_scales, w_vals_int8_t, w_scales, out_dtype\n )\n if bias is not None:\n mm_out += bias\n return mm_out" }, { "identifier": "quantize_activation_per_token_absmax", "path": "torchao/quantization/quant_primitives.py", "snippet": "def quantize_activation_per_token_absmax(t):\n n_bits = 8\n # if the shape of t is [B, N, K], the shape of scales will be [B, N, 1]\n\n scales = t.abs().amax(dim=-1, keepdim=True)\n if scales.dtype == torch.float16:\n scales = (\n scales.float()\n ) # want float scales to avoid overflows for fp16, (bf16 has wide enough range)\n q_max = 2 ** (n_bits - 1) - 1\n scales = scales.clamp(min=1e-5).div(q_max)\n # Note: the original smoothquant does not clamp to qmin/qmax here,\n # but some of the tests with bfloat16 ended up with a flipped sign\n # if we don't clamp. TODO(future) look into this further.\n t = torch.round(t / scales).clamp(-127, 127).to(torch.int8)\n return t, scales" }, { "identifier": "safe_int_mm", "path": "torchao/quantization/quant_primitives.py", "snippet": "def safe_int_mm(input: torch.Tensor, mat2: torch.Tensor) -> torch.Tensor:\n r\"\"\"\n This function wraps torch._int_mm and avoids several undesirable behaviors of the function for certain inputs while still\n returning correct results and being torch.compiled in a performant way.\n\n Assumes both tensors have dimension of 2.\n\n Note: no error checking for torch.compiled path, if input.shape = [i, j] and j<=16 then the triton kernel\n will error.\n\n Args:\n input (Tensor, int8): the first tensor to be multiplied\n mat2 (Tensor, int8): the second tensor to be multiplied\n\n Return:\n out (Tensor, int32): the result of the matmul with device matching that of the inputs\n \"\"\"\n\n # torch.compile path\n if dynamo_is_compiling() or \"FakeTensor\" in input.__repr__():\n return out_dtype(torch.ops.aten.mm.default, torch.int32, input, mat2)\n\n # error checking for cublas path\n assert (\n mat2.device == input.device\n ), f\"need both tensors to be on the same device but got {mat2.device} and {input.device}\"\n device_cpu = \"cpu\" in [mat2.device.type, input.device.type]\n # with input.shape = [i,j] and mat2.shape = [j,k]\n i_is_strictly_greater_than_16 = input.shape[0] > 16\n j_is_nonzero_multiple_of_8 = (input.shape[1] % 8 == 0) and (input.shape[1] > 0)\n k_is_nonzero_multiple_of_8 = (mat2.shape[1] % 8 == 0) and (mat2.shape[1] > 0)\n bad_dimensions_for_cublas = not (\n i_is_strictly_greater_than_16\n and j_is_nonzero_multiple_of_8\n and k_is_nonzero_multiple_of_8\n )\n\n if device_cpu or bad_dimensions_for_cublas:\n # fallback path\n return torch.matmul(input.cpu().to(torch.int32), mat2.cpu().to(torch.int32)).to(\n input.device.type\n )\n\n # cublas paths\n if not mat2.is_contiguous(): # silently gives incorrect result without this\n mat2 = mat2.contiguous()\n if (not input.is_contiguous()) and (\n input.shape[0] % 8 != 0\n ): # gives cryptic error without this\n input = (\n input.contiguous()\n ) # (it seems the transpose makes cublas check the above j constraint on i)\n return out_dtype(torch.ops.aten.mm.default, torch.int32, input, mat2)" }, { "identifier": "get_scale", "path": "torchao/quantization/smoothquant.py", "snippet": "def get_scale(X_absmax, W_absmax, alpha=0.5):\n \"\"\"\n Calculate the scale based on abs(max(X)), abs(max(W)) and alpha\n If X is of dimension `b*n*k` and W is dimension `k*m`, the returned\n scale is of dimension `k`.\n Note: X_absmax is calculated outside of this function because we\n need to keep a running version of it during calibration. W_absmax\n is calculated outside of this function for consistency with X_absmax.\n \"\"\"\n X_pow = torch.pow(X_absmax, alpha)\n W_pow = torch.pow(W_absmax, 1.0 - alpha)\n div = X_pow / W_pow\n return div.reshape(-1)" }, { "identifier": "smooth_fq_linear_to_inference", "path": "torchao/quantization/smoothquant.py", "snippet": "def smooth_fq_linear_to_inference(model, debug_skip_calibration=False) -> None:\n for _, mod in model.named_modules():\n if isinstance(mod, tuple(source_cls_to_target_cls.values())):\n if debug_skip_calibration:\n mod.set_debug_x_absmax()\n mod.to_inference()" }, { "identifier": "SmoothFakeDynamicallyQuantizedLinear", "path": "torchao/quantization/smoothquant.py", "snippet": "class SmoothFakeDynamicallyQuantizedLinear(SmoothFakeDynQuantMixin, torch.nn.Linear):\n \"\"\"\n This is a replacement for `torch.nn.Linear` which implements dynamic per-token\n activation quantization and dynamic per-channel weight quantization based on\n Smoothquant scaling.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n alpha = kwargs.pop(\"alpha\")\n super().__init__(*args, **kwargs)\n self.init_smoothquant_variables(alpha)\n\n def forward(self, X, *args, **kwargs):\n if self.calibrating:\n self.update_x_running_abs_max(X)\n Y = F.linear(X, self.weight, self.bias)\n else:\n if not self.debug_skip_scaling:\n # Ideally this would be fused into preceding layers\n # but in practice torch.compile fuses it with other\n # ops so the slowdown is minimal\n X = X / self.smooth_scale\n W_int_repr_t = (\n self.W_int_repr if self.store_w_int_repr_t else self.W_int_repr.t()\n )\n Y = quant_int8_dynamic_per_token_linear(\n X, W_int_repr_t, self.W_scales, self.bias, X.dtype\n )\n return Y\n\n @classmethod\n def from_float(cls, mod, alpha=0.5):\n \"\"\"\n Converts a `mod` of class `torch.nn.Linear` to the smooth fake quantized\n version of it. Note: requires calibration.\n \"\"\"\n # create the new module with a toy size to ensure initialization is fast\n fake_in_features, fake_out_features = 8, 8\n new_mod = cls(\n fake_in_features, fake_out_features, bias=mod.bias is not None, alpha=alpha\n )\n new_mod.in_features = mod.in_features\n new_mod.out_features = mod.out_features\n new_mod.weight = mod.weight\n new_mod.bias = mod.bias\n # TODO: test when creation is on cuda\n device_to_use = next(mod.parameters()).device\n new_mod.to(device_to_use)\n return new_mod\n\n def to_inference(self):\n \"\"\"\n Calculates the smoothquant scale based on calibration\n in preparation for inference\n \"\"\"\n assert self.x_running_abs_max is not None, \"no calibration data found\"\n self.calibrating = False\n self.smooth_scale = get_scale(\n self.x_running_abs_max,\n torch.max(torch.abs(self.weight.transpose(0, 1)), dim=1).values,\n alpha=self.alpha,\n )\n self.fold_weight()\n\n def set_debug_x_absmax(self):\n w_absmax = torch.max(torch.abs(self.weight.transpose(0, 1)), dim=1).values\n self.x_running_abs_max = w_absmax" }, { "identifier": "swap_linear_with_smooth_fq_linear", "path": "torchao/quantization/smoothquant.py", "snippet": "def swap_linear_with_smooth_fq_linear(\n model, skip_fqn_list=None, cur_fqn=\"\", alpha=0.5\n) -> None:\n\n name_to_child = dict(model.named_children())\n for name, child in name_to_child.items():\n if cur_fqn == \"\":\n new_fqn = name\n else:\n new_fqn = f\"{cur_fqn}.{name}\"\n if ((skip_fqn_list is None) or (new_fqn not in skip_fqn_list)) and (\n type(child) in source_cls_to_target_cls.keys()\n ):\n target_cls = source_cls_to_target_cls[type(child)]\n new_child = target_cls.from_float(child, alpha=alpha)\n setattr(model, name, new_child)\n else:\n swap_linear_with_smooth_fq_linear(child, skip_fqn_list, new_fqn, alpha)" }, { "identifier": "Int8DynamicallyQuantizedLinearWeight", "path": "torchao/quantization/subclass.py", "snippet": "class Int8DynamicallyQuantizedLinearWeight(QuantizedLinearWeightBase):\n \"\"\"\n A Tensor subclass that when applied to a weight used in a linear op/module, changes the\n linear op to a dynamically quantized linear op with symmetric per-token and per-channel\n quantization on the activation and weight respectively.\n \"\"\"\n\n @staticmethod\n def __new__(cls, int_data, q_scales, transposed, shape, **kwargs):\n kwargs[\"dtype\"] = kwargs.get(\"dtype\", q_scales.dtype)\n return super().__new__(cls, int_data, transposed, shape, **kwargs) # type: ignore[attr-defined]\n\n def __init__(self, int_data, q_scales, transposed, shape, **kwargs):\n self.q_scales = q_scales\n super().__init__(int_data, transposed)\n\n @staticmethod\n def _quantized_op(act_mat, w_qtensor, bias):\n return quant_int8_dynamic_per_token_linear(\n act_mat, w_qtensor.int_data, w_qtensor.q_scales, bias, act_mat.dtype\n )\n\n def dequantize(self, dtype=None):\n \"\"\"\n Obtain the dequantized version of the quantized tensor subclass\n \"\"\"\n dq_t = dequantize_per_channel(\n self.int_data.t(), self.q_scales, 0, self.dtype if dtype is None else dtype\n ).to(self.dtype)\n # data was transposed to dequantize so make sure shape is correct\n return dq_t if not self.transposed else dq_t.t()\n\n def int_repr(self):\n \"\"\"\n Get the internal integer representation of the quantized tensor\n \"\"\"\n return self.int_data if self.transposed else self.int_data.t()\n\n def q_params(self):\n \"\"\"\n Get the quantization scales for the quantized tensor\n \"\"\"\n return {\"q_scales\": self.q_scales}\n\n def to(self, *args, **kwargs):\n kwargs = self._get_to_kwargs(*args, **kwargs)\n return self.__class__(\n self.int_data.to(kwargs[\"device\"]),\n self.q_scales.to(kwargs[\"device\"]),\n self.transposed,\n self.shape,\n **kwargs,\n )\n\n def _apply_fn_to_data(self, fn):\n return self.__class__(\n fn(self.int_data), fn(self.q_scales), self.transposed, self.shape, dtype=self.dtype\n )\n\n def _change_shape(self, shape):\n return self.__class__(\n self.int_data, self.q_scales, self.transposed, shape, dtype=self.dtype\n )\n\n def __tensor_flatten__(self):\n return [\"int_data\", \"q_scales\"], [self.transposed, self.dtype, self.shape]\n\n @classmethod\n def __tensor_unflatten__(cls, tensor_data_dict, tensor_attributes, outer_size=None, outer_stride=None):\n int_data, q_scales = tensor_data_dict[\"int_data\"], tensor_data_dict[\"q_scales\"]\n transposed, dtype, shape = tensor_attributes\n return cls(int_data, q_scales, transposed, shape if outer_size is None else outer_size, dtype=dtype, strides=outer_stride)\n\n @classmethod\n def from_float(cls, input_float, qmin=-128, qmax=127):\n \"\"\"\n Method used to convert a linear weight tensor to an instance of the\n Int8DynamicallyQuantizedLinearWeight subclass.\n\n Example usage::\n\n model.lin_mod.weight = (\n Int8DynamicallyQuantizedLinearWeight.from_float(model.lin_mod.weight)\n )\n \"\"\"\n w_int_repr, w_scales, _ = dynamically_quantize_per_channel(\n input_float, qmin, qmax, torch.int8\n )\n # the desired representation shape for fast quantized matmul is\n # transposed compared to how it's stored as a linear weight,\n # i.e. we want in_channels as dim=0 and out_channels (and quantized axis) as dim=1\n # however the external representation of our tensor will maintain the correct\n # shape attribute which needs to be tracked directly.\n int_data = w_int_repr.contiguous().t()\n if cls is not Int8DynamicallyQuantizedLinearWeight:\n int_data = int_data.contiguous()\n return cls(\n int_data, w_scales, False, input_float.shape, dtype=input_float.dtype\n )" }, { "identifier": "Int8WeightOnlyQuantizedLinearWeight", "path": "torchao/quantization/subclass.py", "snippet": "class Int8WeightOnlyQuantizedLinearWeight(Int8DynamicallyQuantizedLinearWeight):\n \"\"\"\n A Tensor subclass that when applied to a weight used in a linear op/module,\n changes the linear op to a weight-only quantized linear op with symmetric\n per-channel quantization on the weight.\n \"\"\"\n\n @staticmethod\n def _quantized_op(act_mat, w_qtensor, bias):\n orig_dtype = act_mat.dtype\n y = torch.mm(act_mat.reshape(-1, act_mat.shape[-1]), w_qtensor.int_data.to(act_mat.dtype)) * w_qtensor.q_scales\n y = y.reshape(*act_mat.shape[:-1], y.shape[-1])\n if bias is not None:\n y += bias\n return y.to(orig_dtype)" }, { "identifier": "Int4WeightOnlyQuantizedLinearWeight", "path": "torchao/quantization/subclass.py", "snippet": "class Int4WeightOnlyQuantizedLinearWeight(QuantizedLinearWeightBase):\n \"\"\"\n A Tensor subclass that when applied to a weight used in a linear op/module,\n changes that linear op to a weight-only int4 quantized linear op with groupwise\n affine quantization on the weight.\n \"\"\"\n\n @staticmethod\n def __new__(\n cls,\n int_data,\n scales_and_zeros,\n transposed,\n shape,\n groupsize=128,\n inner_k_tiles=8,\n **kwargs,\n ):\n kwargs[\"dtype\"] = kwargs.get(\"dtype\", scales_and_zeros.dtype)\n return super().__new__(cls, int_data, transposed, shape, **kwargs) # type: ignore[attr-defined]\n\n def __init__(\n self,\n int_data,\n scales_and_zeros,\n transposed,\n shape,\n groupsize,\n inner_k_tiles,\n **kwargs,\n ):\n # the transposed flag tracks whether the tensor subclass has been transposed relative\n # to how a weight is normally stored in a linear i.e. [out_features, in_features].\n # tracking both transposed and shape is slightly redundant but corner cases like\n # square matrices can cause issues otherwise\n self.scales_and_zeros = scales_and_zeros\n self.groupsize = groupsize\n self.inner_k_tiles = inner_k_tiles\n super().__init__(int_data, transposed)\n\n @staticmethod\n def _quantized_op(act_mat, w_qtensor, bias):\n orig_act_size = act_mat.size()\n orig_dtype = act_mat.dtype\n\n # reshape and pad activation\n act_mat = act_mat.reshape(-1, act_mat.shape[-1]).to(torch.bfloat16)\n pad_size = find_multiple(act_mat.shape[-1], 1024)\n act_mat = torch.nn.functional.pad(act_mat, (0, pad_size - act_mat.shape[-1]))\n\n # matmul\n y = aten._weight_int4pack_mm(\n act_mat.contiguous(), w_qtensor.int_data, w_qtensor.groupsize, w_qtensor.scales_and_zeros\n )\n\n # remove out_feature padding\n orig_out_features = w_qtensor.shape[-1] if w_qtensor.transposed else w_qtensor.shape[-2]\n y = y[:, :orig_out_features]\n\n y = y.reshape(*orig_act_size[:-1], orig_out_features)\n if bias is not None:\n y += bias\n return y.to(orig_dtype)\n\n def dequantize(self):\n eye_shape = self.shape[1] if not self.transposed else self.shape[0]\n w_dq = self._quantized_op(\n torch.eye(eye_shape, device=self.device, dtype=self.dtype), self, None\n )\n # we dequantized using linear with the identity matrix, output has shape [in_channels, out_channels]\n # so we need to transpose back to get the original shape unless self.transposed is set.\n w_dq = w_dq if self.transposed else w_dq.t()\n return w_dq.to(self.dtype)\n\n def int_repr(self):\n return self.int_data\n\n def q_params(self):\n scales, zero_points = unpack_tinygemm_scales_and_zeros(\n self.scales_and_zeros,\n )\n return {\"q_scales\": scales, \"q_zero_points\": zero_points}\n\n def to(self, *args, **kwargs):\n kwargs = self._get_to_kwargs(*args, **kwargs)\n return self.__class__(\n self.int_data.to(kwargs[\"device\"]),\n self.scales_and_zeros.to(kwargs[\"device\"]),\n self.transposed,\n self.shape,\n self.groupsize,\n self.inner_k_tiles,\n **kwargs,\n )\n\n def _apply_fn_to_data(self, fn):\n return self.__class__(\n fn(self.int_data),\n fn(self.scales_and_zeros),\n self.transposed,\n self.shape,\n self.groupsize,\n self.inner_k_tiles,\n dtype=self.dtype,\n )\n\n def _change_shape(self, shape):\n return self.__class__(\n self.int_data,\n self.scales_and_zeros,\n self.transposed,\n shape,\n self.groupsize,\n self.inner_k_tiles,\n dtype=self.dtype\n )\n\n def __tensor_flatten__(self):\n return [\"int_data\", \"scales_and_zeros\"], (\n self.transposed,\n self.groupsize,\n self.inner_k_tiles,\n self.dtype,\n self.shape\n )\n\n @classmethod\n def __tensor_unflatten__(cls, tensor_data_dict, attributes, outer_size=None, outer_stride=None):\n int_data, scales_and_zeros = (\n tensor_data_dict[\"int_data\"],\n tensor_data_dict[\"scales_and_zeros\"],\n )\n transposed, groupsize, inner_k_tiles, dtype, shape = attributes\n return cls(\n int_data,\n scales_and_zeros,\n transposed,\n shape if outer_size is None else outer_size,\n groupsize,\n inner_k_tiles,\n dtype=dtype,\n strides=outer_stride,\n )\n\n @classmethod\n def from_float(cls, input_float, groupsize=128, inner_k_tiles=8):\n \"\"\"\n Method used to convert a linear weight tensor to an instance of the\n Int4WeightOnlyQuantizedLinearWeight subclass.\n\n Example usage::\n\n model.lin_mod.weight = (\n Int4WeightOnlyQuantizedLinearWeight.from_float(model.lin_mod.weight)\n )\n \"\"\"\n assert groupsize in [256, 128, 64, 32]\n assert inner_k_tiles in [8, 4, 2]\n orig_shape = input_float.shape\n orig_out_features, orig_in_features = input_float.shape\n\n # padding\n in_features = find_multiple(orig_in_features, 1024)\n out_features = find_multiple(orig_out_features, 8)\n input_float = torch.nn.functional.pad(\n input_float, (0, in_features - orig_in_features, 0, out_features - orig_out_features)\n )\n\n # quantization and packing\n input_int4x8, scales_and_zeros = groupwise_affine_quantize_tensor(\n input_float, 4, groupsize\n )\n int_data = aten._convert_weight_to_int4pack(\n input_int4x8, inner_k_tiles\n )\n\n return cls(\n int_data,\n scales_and_zeros,\n False,\n orig_shape,\n groupsize,\n inner_k_tiles,\n dtype=input_float.dtype,\n )" }, { "identifier": "_apply_logging_hook", "path": "torchao/quantization/utils.py", "snippet": "def find_multiple(n: int, k: int) -> int:\ndef compute_error(x, y):\ndef _get_logging_hook(fqn):\n def forward_hook(module, input):\ndef _apply_logging_hook(model):\n def __torch_dispatch__(self, func, types, args=(), kwargs=None):\ndef get_model_size_in_bytes(model):\nclass LoggingTensorMode(TorchDispatchMode):" } ]
import copy import unittest import torch import torch.nn as nn import os from torch._inductor.utils import run_and_get_code from torch._dynamo import config from torch.ao.quantization import MinMaxObserver, QConfigMapping from torchao.quantization.dynamic_quant import ( DynamicallyPerAxisQuantizedLinear, ) from torchao.quantization.quant_api import ( apply_dynamic_quant, apply_weight_only_int8_quant, change_linear_weights_to_int8_dqtensors, change_linear_weights_to_int8_woqtensors, change_linear_weights_to_int4_woqtensors, _replace_with_custom_fn_if_matches_filter, ) from torchao.quantization.quant_primitives import ( dequantize_per_channel, dequantize_per_tensor, dynamically_quantize_per_channel, dynamically_quantize_per_tensor, quant_int8_dynamic_linear, quant_int8_dynamic_per_token_linear, quantize_activation_per_token_absmax, safe_int_mm, ) from torchao.quantization.smoothquant import ( get_scale, smooth_fq_linear_to_inference, SmoothFakeDynamicallyQuantizedLinear, swap_linear_with_smooth_fq_linear, ) from torchao.quantization.subclass import ( Int8DynamicallyQuantizedLinearWeight, Int8WeightOnlyQuantizedLinearWeight, Int4WeightOnlyQuantizedLinearWeight ) from torchao.quantization.utils import ( _apply_logging_hook, compute_error, compute_error as SQNR, _fqn_to_op_to_shape_to_count, LoggingTensorMode, ) from torch.ao.quantization.quantize_fx import convert_to_reference_fx, prepare_fx from transformers import ( # type: ignore[import-untyped] DistilBertModel, DistilBertTokenizer, )
8,662
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # mypy: ignore-errors torch.manual_seed(0) config.cache_size_limit = 100 class SmoothquantUnitTest(unittest.TestCase): # first, let's reproduce the graphic from the paper, Figure 4, to ensure # we are calculating the scales correctly def test_figure_4(self): X = torch.FloatTensor([1, -16, 2, 6, -2, 8, -1, -9]).reshape(1, 2, 4) W = torch.FloatTensor([2, 1, -2, 1, -1, -1, 2, -1, -2, -1, -1, 1]).reshape(4, 3) X_mul_W = torch.matmul(X, W)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # mypy: ignore-errors torch.manual_seed(0) config.cache_size_limit = 100 class SmoothquantUnitTest(unittest.TestCase): # first, let's reproduce the graphic from the paper, Figure 4, to ensure # we are calculating the scales correctly def test_figure_4(self): X = torch.FloatTensor([1, -16, 2, 6, -2, 8, -1, -9]).reshape(1, 2, 4) W = torch.FloatTensor([2, 1, -2, 1, -1, -1, 2, -1, -2, -1, -1, 1]).reshape(4, 3) X_mul_W = torch.matmul(X, W)
smoothquant_scale = get_scale(
15
2023-11-03 21:27:36+00:00
12k
google-research/semivl
third_party/unimatch/supervised.py
[ { "identifier": "__version__", "path": "version.py", "snippet": "" }, { "identifier": "gen_code_archive", "path": "utils/gen_code_archive.py", "snippet": "def gen_code_archive(out_dir, file='code.tar.gz'):\n archive = os.path.join(out_dir, file)\n os.makedirs(os.path.dirname(archive), exist_ok=True)\n with tarfile.open(archive, mode='w:gz') as tar:\n tar.add('.', filter=is_source_file)\n return archive" }, { "identifier": "SemiDataset", "path": "third_party/unimatch/dataset/semi.py", "snippet": "class SemiDataset(Dataset):\n def __init__(self, cfg, mode, id_path=None, nsample=None):\n self.name = cfg['dataset']\n self.root = os.path.expandvars(os.path.expanduser(cfg['data_root']))\n self.mode = mode\n self.size = cfg['crop_size']\n self.img_scale = cfg['img_scale']\n self.scale_ratio_range = cfg.get('scale_ratio_range', (0.5, 2.0))\n self.reduce_zero_label = cfg.get('reduce_zero_label', False)\n\n if isinstance(self.img_scale, list):\n self.img_scale = tuple(self.img_scale)\n self.labeled_photometric_distortion = cfg['labeled_photometric_distortion']\n\n if mode == 'train_l' or mode == 'train_u':\n with open(id_path, 'r') as f:\n self.ids = f.read().splitlines()\n if mode == 'train_l' and nsample is not None:\n self.ids *= math.ceil(nsample / len(self.ids))\n self.ids = self.ids[:nsample]\n else:\n if id_path is None:\n id_path = 'splits/%s/val.txt' % self.name\n with open(id_path, 'r') as f:\n self.ids = f.read().splitlines()\n\n def __getitem__(self, item):\n id = self.ids[item]\n img = Image.open(os.path.join(self.root, id.split(' ')[0])).convert('RGB')\n mask = Image.fromarray(np.array(Image.open(os.path.join(self.root, id.split(' ')[1]))))\n if self.reduce_zero_label:\n mask = np.array(mask)\n mask[mask == 0] = 255\n mask = mask - 1\n mask[mask == 254] = 255\n mask = Image.fromarray(mask)\n\n if self.mode == 'val':\n if self.img_scale is not None:\n res = Resize(img_scale=self.img_scale, min_size=512)(dict(\n img=np.array(img),\n ))\n img = Image.fromarray(res['img'])\n img, mask = normalize(img, mask)\n return img, mask, id\n\n if self.img_scale is not None:\n # print('Size before', img.size)\n res = Resize(img_scale=self.img_scale, ratio_range=self.scale_ratio_range)(dict(\n img=np.array(img),\n mask=np.array(mask),\n seg_fields=['mask']\n ))\n img = Image.fromarray(res['img'])\n mask = Image.fromarray(res['mask'])\n # print('Size after', mask.size)\n else:\n img, mask = resize(img, mask, self.scale_ratio_range)\n ignore_value = 254 if self.mode == 'train_u' else 255\n img, mask = crop(img, mask, self.size, ignore_value)\n img, mask = hflip(img, mask, p=0.5)\n\n if self.mode == 'train_l':\n if self.labeled_photometric_distortion:\n img = Image.fromarray(\n PhotoMetricDistortion()({'img': np.array(img)[..., ::-1]})['img'][..., ::-1]\n )\n return normalize(img, mask)\n\n img_w, img_s1, img_s2 = deepcopy(img), deepcopy(img), deepcopy(img)\n\n if random.random() < 0.8:\n img_s1 = transforms.ColorJitter(0.5, 0.5, 0.5, 0.25)(img_s1)\n img_s1 = transforms.RandomGrayscale(p=0.2)(img_s1)\n img_s1 = blur(img_s1, p=0.5)\n cutmix_box1 = obtain_cutmix_box(img_s1.size[0], p=0.5)\n\n if random.random() < 0.8:\n img_s2 = transforms.ColorJitter(0.5, 0.5, 0.5, 0.25)(img_s2)\n img_s2 = transforms.RandomGrayscale(p=0.2)(img_s2)\n img_s2 = blur(img_s2, p=0.5)\n cutmix_box2 = obtain_cutmix_box(img_s2.size[0], p=0.5)\n\n ignore_mask = Image.fromarray(np.zeros((mask.size[1], mask.size[0])))\n\n img_s1, ignore_mask = normalize(img_s1, ignore_mask)\n img_s2 = normalize(img_s2)\n\n mask = torch.from_numpy(np.array(mask)).long()\n ignore_mask[mask == 254] = 255\n\n return normalize(img_w), img_s1, img_s2, ignore_mask, cutmix_box1, cutmix_box2\n\n def __len__(self):\n return len(self.ids)" }, { "identifier": "build_model", "path": "model/builder.py", "snippet": "def build_model(cfg):\n model_type = cfg['model']\n if model_type == 'deeplabv3plus':\n model = DeepLabV3Plus(cfg)\n elif 'mmseg.' in model_type:\n model_type = model_type.replace('mmseg.', '')\n model_cfg_file = f'configs/_base_/models/{model_type}.py'\n mmseg_cfg = Config.fromfile(model_cfg_file)\n mmseg_cfg['model']['decode_head']['num_classes'] = cfg['nclass']\n if 'zegclip' in model_type or 'vlm' in model_type:\n if mmseg_cfg['img_size'] != cfg['crop_size']:\n print('Modify model image_size to match crop_size', cfg['crop_size'])\n nested_set(mmseg_cfg, 'img_size', cfg['crop_size'])\n nested_set(mmseg_cfg, 'model.backbone.img_size', (cfg['crop_size'], cfg['crop_size']))\n nested_set(mmseg_cfg, 'model.decode_head.img_size', cfg['crop_size'])\n emb_dataset_prefix = {\n 'pascal': 'voc12_wbg',\n 'cityscapes': 'cityscapes',\n 'coco': 'coco',\n 'ade': 'ade',\n }[cfg['dataset']]\n text_embedding_variant = cfg['text_embedding_variant']\n text_embedding = f'configs/_base_/datasets/text_embedding/{emb_dataset_prefix}_{text_embedding_variant}.npy'\n nested_set(mmseg_cfg, 'model.load_text_embedding', text_embedding)\n mcc_text_embedding_variant = cfg['mcc_text']\n mcc_text_embedding = f'configs/_base_/datasets/text_embedding/{emb_dataset_prefix}_{mcc_text_embedding_variant}.npy'\n nested_set(mmseg_cfg, 'model.load_mcc_text_embedding', mcc_text_embedding)\n pl_text_embedding_variant = cfg['pl_text']\n pl_text_embedding = f'configs/_base_/datasets/text_embedding/{emb_dataset_prefix}_{pl_text_embedding_variant}.npy'\n nested_set(mmseg_cfg, 'model.load_pl_text_embedding', pl_text_embedding)\n if mmseg_cfg['model']['decode_head']['type'] == 'ATMSingleHeadSeg':\n mmseg_cfg['model']['decode_head']['seen_idx'] = list(range(cfg['nclass']))\n mmseg_cfg['model']['decode_head']['all_idx'] = list(range(cfg['nclass']))\n if mmseg_cfg['model']['decode_head'].get('loss_decode') is not None and \\\n mmseg_cfg['model']['decode_head']['loss_decode']['type'] == 'SegLossPlus':\n mmseg_cfg['model']['decode_head']['loss_decode']['num_classes'] = cfg['nclass']\n if cfg['clip_encoder'] is not None:\n clip_encoder_cfg = Config.fromfile(f'configs/_base_/models/{cfg[\"clip_encoder\"]}.py')\n clip_encoder_cfg['img_size'] = mmseg_cfg['img_size']\n if cfg.get('mcc_fix_resize_pos'):\n clip_encoder_cfg['backbone']['img_size'] = mmseg_cfg['img_size']\n mmseg_cfg['model']['clip_encoder'] = clip_encoder_cfg['backbone']\n if 'model_args' in cfg:\n mmseg_cfg['model'].update(cfg['model_args'])\n model = build_segmentor(\n mmseg_cfg.model,\n train_cfg=mmseg_cfg.get('train_cfg'),\n test_cfg=mmseg_cfg.get('test_cfg'))\n model.disable_dropout = cfg['disable_dropout']\n model.fp_rate = cfg['fp_rate']\n model.forward = types.MethodType(forward_wrapper, model)\n model.init_weights()\n else:\n raise ValueError(model_type)\n \n return model" }, { "identifier": "get_git_revision", "path": "experiments.py", "snippet": "def get_git_revision() -> str:\n try:\n return subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode('ascii').strip()\n except subprocess.CalledProcessError:\n return ''" }, { "identifier": "CLASSES", "path": "datasets/classes.py", "snippet": "CLASSES = {'pascal': ['background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', \n 'car', 'cat', 'chair', 'cow', 'dining table', 'dog', 'horse', 'motorbike', \n 'person', 'potted plant', 'sheep', 'sofa', 'train', 'tv/monitor'],\n \n 'cityscapes': ['road', 'sidewalk', 'building', 'wall', 'fence', 'pole', 'traffic light',\n 'traffic sign', 'vegetation', 'terrain', 'sky', 'person', 'rider', 'car',\n 'truck', 'bus', 'train', 'motorcycle', 'bicycle'],\n \n 'coco': ['void', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', \n 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', \n 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra',\n 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', \n 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',\n 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',\n 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', \n 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', \n 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', \n 'teddy bear', 'hair drier', 'toothbrush', 'banner', 'blanket', 'branch', 'bridge', \n 'building-other', 'bush', 'cabinet', 'cage', 'cardboard', 'carpet', 'ceiling-other', \n 'ceiling-tile', 'cloth', 'clothes', 'clouds', 'counter', 'cupboard', 'curtain',\n 'desk-stuff', 'dirt', 'door-stuff', 'fence', 'floor-marble', 'floor-other', 'floor-stone', \n 'floor-tile', 'floor-wood', 'flower', 'fog', 'food-other', 'fruit', 'furniture-other', \n 'grass', 'gravel', 'ground-other', 'hill', 'house', 'leaves', 'light', 'mat', 'metal', \n 'mirror-stuff', 'moss', 'mountain', 'mud', 'napkin', 'net', 'paper', 'pavement', 'pillow', \n 'plant-other', 'plastic', 'platform', 'playingfield', 'railing', 'railroad', 'river', \n 'road', 'rock', 'roof', 'rug', 'salad', 'sand', 'sea', 'shelf', 'sky-other', 'skyscraper',\n 'snow', 'solid-other', 'stairs', 'stone', 'straw', 'structural-other', 'table', 'tent',\n 'textile-other', 'towel', 'tree', 'vegetable', 'wall-brick', 'wall-concrete', 'wall-other', \n 'wall-panel', 'wall-stone', 'wall-tile', 'wall-wood', 'water-other', 'waterdrops',\n 'window-blind', 'window-other', 'wood'],\n\n 'ade': ['wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ', 'windowpane', 'grass', 'cabinet',\n 'sidewalk', 'person', 'earth', 'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car', 'water',\n 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug', 'field', 'armchair', 'seat', 'fence', 'desk',\n 'rock', 'wardrobe', 'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column', 'signboard',\n 'chest of drawers','counter', 'sand', 'sink', 'skyscraper', 'fireplace', 'refrigerator', 'grandstand',\n 'path', 'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door', 'stairway', 'river', 'bridge',\n 'bookcase', 'blind', 'coffee table', 'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove',\n 'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar', 'arcade machine', 'hovel', 'bus',\n 'towel', 'light', 'truck', 'tower', 'chandelier', 'awning', 'streetlight', 'booth', 'television receiver',\n 'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister', 'escalator', 'ottoman', 'bottle',\n 'buffet', 'poster', 'stage', 'van', 'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything',\n 'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent', 'bag', 'minibike', 'cradle', 'oven',\n 'ball', 'food', 'step', 'tank', 'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake', 'dishwasher',\n 'screen', 'blanket', 'sculpture', 'hood', 'sconce', 'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier',\n 'crt screen', 'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass', 'clock', 'flag'],\n }" }, { "identifier": "ProbOhemCrossEntropy2d", "path": "third_party/unimatch/util/ohem.py", "snippet": "class ProbOhemCrossEntropy2d(nn.Module):\n def __init__(self, ignore_index, reduction='mean', thresh=0.7, min_kept=256,\n down_ratio=1, use_weight=False):\n super(ProbOhemCrossEntropy2d, self).__init__()\n self.ignore_index = ignore_index\n self.thresh = float(thresh)\n self.min_kept = int(min_kept)\n self.down_ratio = down_ratio\n if use_weight:\n weight = torch.FloatTensor(\n [0.8373, 0.918, 0.866, 1.0345, 1.0166, 0.9969, 0.9754, 1.0489,\n 0.8786, 1.0023, 0.9539, 0.9843, 1.1116, 0.9037, 1.0865, 1.0955,\n 1.0865, 1.1529, 1.0507])\n self.criterion = torch.nn.CrossEntropyLoss(reduction=reduction,\n weight=weight,\n ignore_index=ignore_index)\n else:\n self.criterion = torch.nn.CrossEntropyLoss(reduction=reduction,\n ignore_index=ignore_index)\n\n def forward(self, pred, target):\n b, c, h, w = pred.size()\n target = target.view(-1)\n valid_mask = target.ne(self.ignore_index)\n target = target * valid_mask.long()\n num_valid = valid_mask.sum()\n\n prob = F.softmax(pred, dim=1)\n prob = (prob.transpose(0, 1)).reshape(c, -1)\n\n if self.min_kept > num_valid:\n pass\n elif num_valid > 0:\n prob = prob.masked_fill_(~valid_mask, 1)\n mask_prob = prob[\n target, torch.arange(len(target), dtype=torch.long)]\n threshold = self.thresh\n if self.min_kept > 0:\n index = mask_prob.argsort()\n threshold_index = index[min(len(index), self.min_kept) - 1]\n if mask_prob[threshold_index] > self.thresh:\n threshold = mask_prob[threshold_index]\n kept_mask = mask_prob.le(threshold)\n target = target * kept_mask.long()\n valid_mask = valid_mask * kept_mask\n\n target = target.masked_fill_(~valid_mask, self.ignore_index)\n target = target.view(b, h, w)\n\n return self.criterion(pred, target)" }, { "identifier": "count_params", "path": "third_party/unimatch/util/utils.py", "snippet": "def count_params(model):\n param_num = sum(p.numel() for p in model.parameters())\n return param_num / 1e6" }, { "identifier": "AverageMeter", "path": "third_party/unimatch/util/utils.py", "snippet": "class AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self, length=0):\n self.length = length\n self.reset()\n\n def reset(self):\n if self.length > 0:\n self.history = []\n else:\n self.count = 0\n self.sum = 0.0\n self.val = 0.0\n self.avg = 0.0\n\n def update(self, val, num=1):\n if self.length > 0:\n # currently assert num==1 to avoid bad usage, refine when there are some explict requirements\n assert num == 1\n self.history.append(val)\n if len(self.history) > self.length:\n del self.history[0]\n\n self.val = self.history[-1]\n self.avg = np.mean(self.history)\n else:\n self.val = val\n self.sum += val * num\n self.count += num\n self.avg = self.sum / self.count" }, { "identifier": "intersectionAndUnion", "path": "third_party/unimatch/util/utils.py", "snippet": "def intersectionAndUnion(output, target, K, ignore_index=255):\n # 'K' classes, output and target sizes are N or N * L or N * H * W, each value in range 0 to K - 1.\n assert output.ndim in [1, 2, 3]\n assert output.shape == target.shape, f'{output.shape} != {target.shape}'\n output = output.reshape(output.size).copy()\n target = target.reshape(target.size)\n output[np.where(target == ignore_index)[0]] = ignore_index\n intersection = output[np.where(output == target)[0]]\n area_intersection, _ = np.histogram(intersection, bins=np.arange(K + 1))\n area_output, _ = np.histogram(output, bins=np.arange(K + 1))\n area_target, _ = np.histogram(target, bins=np.arange(K + 1))\n area_union = area_output + area_target - area_intersection\n return area_intersection, area_union, area_target" }, { "identifier": "init_log", "path": "third_party/unimatch/util/utils.py", "snippet": "def init_log(name, level=logging.INFO):\n if (name, level) in logs:\n return\n logs.add((name, level))\n logger = logging.getLogger(name)\n logger.setLevel(level)\n ch = logging.StreamHandler()\n ch.setLevel(level)\n if \"SLURM_PROCID\" in os.environ:\n rank = int(os.environ[\"SLURM_PROCID\"])\n logger.addFilter(lambda record: rank == 0)\n else:\n rank = 0\n format_str = \"[%(asctime)s][%(levelname)8s] %(message)s\"\n formatter = logging.Formatter(format_str)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger" }, { "identifier": "setup_distributed", "path": "third_party/unimatch/util/dist_helper.py", "snippet": "def setup_distributed(backend=\"nccl\", port=None):\n \"\"\"AdaHessian Optimizer\n Lifted from https://github.com/BIGBALLON/distribuuuu/blob/master/distribuuuu/utils.py\n Originally licensed MIT, Copyright (c) 2020 Wei Li\n \"\"\"\n num_gpus = torch.cuda.device_count()\n\n rank = int(os.environ[\"RANK\"])\n world_size = int(os.environ[\"WORLD_SIZE\"])\n\n torch.cuda.set_device(rank % num_gpus)\n\n dist.init_process_group(\n backend=backend,\n world_size=world_size,\n rank=rank,\n )\n return rank, world_size" } ]
import argparse import logging import os import pprint import shutil import uuid import torch import numpy as np import torch.distributed as dist import torch.backends.cudnn as cudnn import yaml import mmseg from version import __version__ from datetime import datetime from utils.gen_code_archive import gen_code_archive from tqdm import tqdm from torch import nn from torch.nn import functional as F from torch.optim import SGD from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from third_party.unimatch.dataset.semi import SemiDataset from model.builder import build_model from mmseg.core import build_optimizer from experiments import get_git_revision from datasets.classes import CLASSES from third_party.unimatch.util.ohem import ProbOhemCrossEntropy2d from third_party.unimatch.util.utils import count_params, AverageMeter, intersectionAndUnion, init_log from third_party.unimatch.util.dist_helper import setup_distributed
7,448
else: if mode == 'center_crop': h, w = img.shape[-2:] start_h, start_w = (h - cfg['crop_size']) // 2, (w - cfg['crop_size']) // 2 img = img[:, :, start_h:start_h + cfg['crop_size'], start_w:start_w + cfg['crop_size']] mask = mask[:, start_h:start_h + cfg['crop_size'], start_w:start_w + cfg['crop_size']] final = model(img) pred = final.argmax(dim=1) if return_logits: return pred, final else: return pred def evaluate(model, loader, mode, cfg): model.eval() assert mode in ['original', 'center_crop', 'padded_sliding_window', 'zegclip_sliding_window', 'sliding_window'] intersection_meter = AverageMeter() union_meter = AverageMeter() with torch.no_grad(): for img, mask, id in tqdm(loader, total=len(loader)): img = img.cuda() pred = predict(model, img, mask, mode, cfg) intersection, union, target = \ intersectionAndUnion(pred.cpu().numpy(), mask.numpy(), cfg['nclass'], 255) reduced_intersection = torch.from_numpy(intersection).cuda() reduced_union = torch.from_numpy(union).cuda() reduced_target = torch.from_numpy(target).cuda() dist.all_reduce(reduced_intersection) dist.all_reduce(reduced_union) dist.all_reduce(reduced_target) intersection_meter.update(reduced_intersection.cpu().numpy()) union_meter.update(reduced_union.cpu().numpy()) iou_class = intersection_meter.sum / (union_meter.sum + 1e-10) * 100.0 mIOU = np.mean(iou_class) return mIOU, iou_class def main(): args = parser.parse_args() with open(args.config, "r") as fp: cfg = yaml.load(fp, Loader=yaml.Loader) labeled_id_path = f'splits/{cfg["dataset"]}/{cfg["split"]}/labeled.txt' unlabeled_id_path = f'splits/{cfg["dataset"]}/{cfg["split"]}/unlabeled.txt' logger = init_log('global', logging.INFO) logger.propagate = 0 rank, world_size = setup_distributed(port=args.port) if rank == 0: timestr = datetime.now().strftime("%y%m%d-%H%M") uid = str(uuid.uuid4())[:5] run_name = f'{timestr}_{cfg["name"]}_v{__version__}_{uid}'.replace('.', '-') save_path = f'exp/exp-{cfg["exp"]}/{run_name}' os.makedirs(save_path, exist_ok=True) formatter = logging.Formatter(fmt='[%(asctime)s] [%(levelname)-8s] %(message)s') fileHandler = logging.FileHandler(f'{save_path}/debug.log') fileHandler.setFormatter(formatter) logger.addHandler(fileHandler) all_args = {**cfg, **vars(args), 'labeled_id_path': labeled_id_path, 'unlabeled_id_path': unlabeled_id_path, 'ngpus': world_size, 'run_name': run_name, 'save_path': save_path, 'exec_git_rev': get_git_revision(), 'exec_version': __version__} logger.info('{}\n'.format(pprint.pformat(all_args))) writer = SummaryWriter(save_path) shutil.copyfile(args.config, os.path.join(save_path, 'config.yaml')) with open(os.path.join(save_path, 'all_args.yaml'), 'w') as f: yaml.dump(all_args, f, default_flow_style=None, sort_keys=False, indent=2) gen_code_archive(save_path) cudnn.enabled = True cudnn.benchmark = True model = build_model(cfg) if rank == 0: logger.info(model) logger.info('Total params: {:.1f}M\n'.format(count_params(model))) if 'optimizer' not in cfg: optimizer = SGD([{'params': model.backbone.parameters(), 'lr': cfg['lr']}, {'params': [param for name, param in model.named_parameters() if 'backbone' not in name], 'lr': cfg['lr'] * cfg['lr_multi']}], lr=cfg['lr'], momentum=0.9, weight_decay=1e-4) else: optimizer = build_optimizer(model, cfg['optimizer']) # print(len(optimizer.param_groups), 'param groups') for group in optimizer.param_groups: group.setdefault('initial_lr', group['lr']) # print(group['initial_lr'], group['lr'], group['weight_decay']) local_rank = int(os.environ["LOCAL_RANK"]) model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) model.cuda(local_rank) model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], broadcast_buffers=False, output_device=local_rank, find_unused_parameters=('zegclip' in cfg['model'])) if cfg['criterion']['name'] == 'CELoss': criterion = nn.CrossEntropyLoss(**cfg['criterion']['kwargs']).cuda(local_rank) elif cfg['criterion']['name'] == 'OHEM': criterion = ProbOhemCrossEntropy2d(**cfg['criterion']['kwargs']).cuda(local_rank) elif cfg['criterion']['name'] == 'mmseg': criterion = None else: raise NotImplementedError('%s criterion is not implemented' % cfg['criterion']['name'])
parser = argparse.ArgumentParser(description='Revisiting Weak-to-Strong Consistency in Semi-Supervised Semantic Segmentation') parser.add_argument('--config', type=str, required=True) parser.add_argument('--local_rank', default=0, type=int) parser.add_argument('--port', default=None, type=int) def predict(model, img, mask, mode, cfg, return_logits=False): if mode == 'padded_sliding_window': grid = cfg['crop_size'] stride = cfg['stride'] if stride < 1: stride = int(grid * stride) b, _, h, w = img.shape final = torch.zeros(b, cfg['nclass'], h, w).cuda() row = 0 while row < h: col = 0 while col < w: y1 = row y2 = min(h, row + grid) x1 = col x2 = min(w, col + grid) crop_h = y2 - y1 crop_w = x2 - x1 # print(y1, y2, x1, x2, crop_h, crop_w) cropped_img = torch.zeros((b, 3, grid, grid), device=img.device) cropped_img[:, :, :crop_h, :crop_w] = img[:, :, y1: y2, x1: x2] pred = model(cropped_img) final[:, :, y1: y2, x1: x2] += pred.softmax(dim=1)[:, :, :crop_h, :crop_w] col += stride row += stride pred = final.argmax(dim=1) elif mode == 'zegclip_sliding_window': h_stride, w_stride = cfg['stride'], cfg['stride'] h_crop, w_crop = cfg['crop_size'], cfg['crop_size'] batch_size, _, h_img, w_img = img.size() num_classes = cfg['nclass'] h_grids = max(h_img - h_crop + h_stride - 1, 0) // h_stride + 1 w_grids = max(w_img - w_crop + w_stride - 1, 0) // w_stride + 1 preds = img.new_zeros((batch_size, num_classes, h_img, w_img)) count_mat = img.new_zeros((batch_size, 1, h_img, w_img)) for h_idx in range(h_grids): for w_idx in range(w_grids): y1 = h_idx * h_stride x1 = w_idx * w_stride y2 = min(y1 + h_crop, h_img) x2 = min(x1 + w_crop, w_img) y1 = max(y2 - h_crop, 0) x1 = max(x2 - w_crop, 0) crop_img = img[:, :, y1:y2, x1:x2] crop_seg_logit = model(crop_img) preds += F.pad(crop_seg_logit, (int(x1), int(preds.shape[3] - x2), int(y1), int(preds.shape[2] - y2))) count_mat[:, :, y1:y2, x1:x2] += 1 assert (count_mat == 0).sum() == 0 preds = preds / count_mat final = mmseg.ops.resize( preds, size=mask.shape[-2:], mode='bilinear', align_corners=True, warning=False) pred = final.argmax(dim=1) elif mode == 'sliding_window': grid = cfg['crop_size'] b, _, h, w = img.shape final = torch.zeros(b, cfg['nclass'], h, w).cuda() row = 0 while row < h: col = 0 while col < w: pred = model(img[:, :, row: min(h, row + grid), col: min(w, col + grid)]) final[:, :, row: min(h, row + grid), col: min(w, col + grid)] += pred.softmax(dim=1) col += int(grid * 2 / 3) row += int(grid * 2 / 3) pred = final.argmax(dim=1) else: if mode == 'center_crop': h, w = img.shape[-2:] start_h, start_w = (h - cfg['crop_size']) // 2, (w - cfg['crop_size']) // 2 img = img[:, :, start_h:start_h + cfg['crop_size'], start_w:start_w + cfg['crop_size']] mask = mask[:, start_h:start_h + cfg['crop_size'], start_w:start_w + cfg['crop_size']] final = model(img) pred = final.argmax(dim=1) if return_logits: return pred, final else: return pred def evaluate(model, loader, mode, cfg): model.eval() assert mode in ['original', 'center_crop', 'padded_sliding_window', 'zegclip_sliding_window', 'sliding_window'] intersection_meter = AverageMeter() union_meter = AverageMeter() with torch.no_grad(): for img, mask, id in tqdm(loader, total=len(loader)): img = img.cuda() pred = predict(model, img, mask, mode, cfg) intersection, union, target = \ intersectionAndUnion(pred.cpu().numpy(), mask.numpy(), cfg['nclass'], 255) reduced_intersection = torch.from_numpy(intersection).cuda() reduced_union = torch.from_numpy(union).cuda() reduced_target = torch.from_numpy(target).cuda() dist.all_reduce(reduced_intersection) dist.all_reduce(reduced_union) dist.all_reduce(reduced_target) intersection_meter.update(reduced_intersection.cpu().numpy()) union_meter.update(reduced_union.cpu().numpy()) iou_class = intersection_meter.sum / (union_meter.sum + 1e-10) * 100.0 mIOU = np.mean(iou_class) return mIOU, iou_class def main(): args = parser.parse_args() with open(args.config, "r") as fp: cfg = yaml.load(fp, Loader=yaml.Loader) labeled_id_path = f'splits/{cfg["dataset"]}/{cfg["split"]}/labeled.txt' unlabeled_id_path = f'splits/{cfg["dataset"]}/{cfg["split"]}/unlabeled.txt' logger = init_log('global', logging.INFO) logger.propagate = 0 rank, world_size = setup_distributed(port=args.port) if rank == 0: timestr = datetime.now().strftime("%y%m%d-%H%M") uid = str(uuid.uuid4())[:5] run_name = f'{timestr}_{cfg["name"]}_v{__version__}_{uid}'.replace('.', '-') save_path = f'exp/exp-{cfg["exp"]}/{run_name}' os.makedirs(save_path, exist_ok=True) formatter = logging.Formatter(fmt='[%(asctime)s] [%(levelname)-8s] %(message)s') fileHandler = logging.FileHandler(f'{save_path}/debug.log') fileHandler.setFormatter(formatter) logger.addHandler(fileHandler) all_args = {**cfg, **vars(args), 'labeled_id_path': labeled_id_path, 'unlabeled_id_path': unlabeled_id_path, 'ngpus': world_size, 'run_name': run_name, 'save_path': save_path, 'exec_git_rev': get_git_revision(), 'exec_version': __version__} logger.info('{}\n'.format(pprint.pformat(all_args))) writer = SummaryWriter(save_path) shutil.copyfile(args.config, os.path.join(save_path, 'config.yaml')) with open(os.path.join(save_path, 'all_args.yaml'), 'w') as f: yaml.dump(all_args, f, default_flow_style=None, sort_keys=False, indent=2) gen_code_archive(save_path) cudnn.enabled = True cudnn.benchmark = True model = build_model(cfg) if rank == 0: logger.info(model) logger.info('Total params: {:.1f}M\n'.format(count_params(model))) if 'optimizer' not in cfg: optimizer = SGD([{'params': model.backbone.parameters(), 'lr': cfg['lr']}, {'params': [param for name, param in model.named_parameters() if 'backbone' not in name], 'lr': cfg['lr'] * cfg['lr_multi']}], lr=cfg['lr'], momentum=0.9, weight_decay=1e-4) else: optimizer = build_optimizer(model, cfg['optimizer']) # print(len(optimizer.param_groups), 'param groups') for group in optimizer.param_groups: group.setdefault('initial_lr', group['lr']) # print(group['initial_lr'], group['lr'], group['weight_decay']) local_rank = int(os.environ["LOCAL_RANK"]) model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) model.cuda(local_rank) model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], broadcast_buffers=False, output_device=local_rank, find_unused_parameters=('zegclip' in cfg['model'])) if cfg['criterion']['name'] == 'CELoss': criterion = nn.CrossEntropyLoss(**cfg['criterion']['kwargs']).cuda(local_rank) elif cfg['criterion']['name'] == 'OHEM': criterion = ProbOhemCrossEntropy2d(**cfg['criterion']['kwargs']).cuda(local_rank) elif cfg['criterion']['name'] == 'mmseg': criterion = None else: raise NotImplementedError('%s criterion is not implemented' % cfg['criterion']['name'])
trainset = SemiDataset(cfg, 'train_l', id_path=labeled_id_path)
2
2023-11-02 14:49:38+00:00
12k
intellerce/controlanimate
animatediff/models/unet.py
[ { "identifier": "CrossAttnDownBlock3D", "path": "animatediff/models/unet_blocks.py", "snippet": "class CrossAttnDownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n downsample_padding=1,\n add_downsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n use_inflated_groupnorm=None,\n \n use_motion_module=None,\n\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n attentions = []\n motion_modules = []\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n )\n )\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n out_channels // attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample3D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):\n output_states = ()\n\n for resnet, attn, motion_module in zip(self.resnets, self.attentions, self.motion_modules):\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n )[0]\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n \n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n \n # add motion module\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n output_states += (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states += (hidden_states,)\n\n return hidden_states, output_states" }, { "identifier": "CrossAttnUpBlock3D", "path": "animatediff/models/unet_blocks.py", "snippet": "class CrossAttnUpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n prev_output_channel: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n add_upsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n use_inflated_groupnorm=None,\n \n use_motion_module=None,\n\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n attentions = []\n motion_modules = []\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock3D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n )\n )\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n out_channels // attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states,\n res_hidden_states_tuple,\n temb=None,\n encoder_hidden_states=None,\n upsample_size=None,\n attention_mask=None,\n ):\n for resnet, attn, motion_module in zip(self.resnets, self.attentions, self.motion_modules):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n )[0]\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n \n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n \n # add motion module\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states" }, { "identifier": "DownBlock3D", "path": "animatediff/models/unet_blocks.py", "snippet": "class DownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_downsample=True,\n downsample_padding=1,\n\n use_inflated_groupnorm=None,\n \n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n motion_modules = []\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample3D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None):\n output_states = ()\n\n for resnet, motion_module in zip(self.resnets, self.motion_modules):\n if self.training and self.gradient_checkpointing:\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n else:\n hidden_states = resnet(hidden_states, temb)\n\n # add motion module\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n output_states += (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states += (hidden_states,)\n\n return hidden_states, output_states" }, { "identifier": "UNetMidBlock3DCrossAttn", "path": "animatediff/models/unet_blocks.py", "snippet": "class UNetMidBlock3DCrossAttn(nn.Module):\n def __init__(\n self,\n in_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n output_scale_factor=1.0,\n cross_attention_dim=1280,\n dual_cross_attention=False,\n use_linear_projection=False,\n upcast_attention=False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n use_inflated_groupnorm=None,\n\n use_motion_module=None,\n \n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)\n\n # there is always at least one resnet\n resnets = [\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n )\n ]\n attentions = []\n motion_modules = []\n\n for _ in range(num_layers):\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n in_channels // attn_num_head_channels,\n in_channels=in_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=in_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n )\n )\n\n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):\n hidden_states = self.resnets[0](hidden_states, temb)\n for attn, resnet, motion_module in zip(self.attentions, self.resnets[1:], self.motion_modules):\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n hidden_states = resnet(hidden_states, temb)\n\n return hidden_states" }, { "identifier": "UpBlock3D", "path": "animatediff/models/unet_blocks.py", "snippet": "class UpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n prev_output_channel: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_upsample=True,\n\n use_inflated_groupnorm=None,\n\n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n motion_modules = []\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock3D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, encoder_hidden_states=None,):\n for resnet, motion_module in zip(self.resnets, self.motion_modules):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states" }, { "identifier": "get_down_block", "path": "animatediff/models/unet_blocks.py", "snippet": "def get_down_block(\n down_block_type,\n num_layers,\n in_channels,\n out_channels,\n temb_channels,\n add_downsample,\n resnet_eps,\n resnet_act_fn,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n downsample_padding=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n \n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n use_inflated_groupnorm=None,\n\n use_motion_module=None,\n \n motion_module_type=None,\n motion_module_kwargs=None,\n):\n down_block_type = down_block_type[7:] if down_block_type.startswith(\"UNetRes\") else down_block_type\n if down_block_type == \"DownBlock3D\":\n return DownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n elif down_block_type == \"CrossAttnDownBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnDownBlock3D\")\n return CrossAttnDownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n \n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n raise ValueError(f\"{down_block_type} does not exist.\")" }, { "identifier": "get_up_block", "path": "animatediff/models/unet_blocks.py", "snippet": "def get_up_block(\n up_block_type,\n num_layers,\n in_channels,\n out_channels,\n prev_output_channel,\n temb_channels,\n add_upsample,\n resnet_eps,\n resnet_act_fn,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n use_inflated_groupnorm=None,\n \n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n):\n up_block_type = up_block_type[7:] if up_block_type.startswith(\"UNetRes\") else up_block_type\n if up_block_type == \"UpBlock3D\":\n return UpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n use_inflated_groupnorm=use_inflated_groupnorm,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n elif up_block_type == \"CrossAttnUpBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnUpBlock3D\")\n return CrossAttnUpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n raise ValueError(f\"{up_block_type} does not exist.\")" }, { "identifier": "InflatedConv3d", "path": "animatediff/models/resnet.py", "snippet": "class InflatedConv3d(LoRACompatibleConv):\n def forward(self, x):\n video_length = x.shape[2]\n\n x = rearrange(x, \"b c f h w -> (b f) c h w\")\n x = super().forward(x)\n x = rearrange(x, \"(b f) c h w -> b c f h w\", f=video_length)\n\n return x" }, { "identifier": "InflatedGroupNorm", "path": "animatediff/models/resnet.py", "snippet": "class InflatedGroupNorm(nn.GroupNorm):\n def forward(self, x):\n video_length = x.shape[2]\n\n x = rearrange(x, \"b c f h w -> (b f) c h w\")\n x = super().forward(x)\n x = rearrange(x, \"(b f) c h w -> b c f h w\", f=video_length)\n\n return x" } ]
from dataclasses import dataclass from typing import List, Optional, Tuple, Union from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.models.modeling_utils import ModelMixin from diffusers.loaders import UNet2DConditionLoadersMixin from diffusers.utils import BaseOutput, logging from diffusers.models.embeddings import TimestepEmbedding, Timesteps from .unet_blocks import ( CrossAttnDownBlock3D, CrossAttnUpBlock3D, DownBlock3D, UNetMidBlock3DCrossAttn, UpBlock3D, get_down_block, get_up_block, ) from .resnet import InflatedConv3d, InflatedGroupNorm from typing import Any, Dict, List, Optional, Tuple, Union from diffusers.models.attention_processor import ( ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnAddedKVProcessor, AttnProcessor, ) from diffusers.utils import WEIGHTS_NAME, SAFETENSORS_WEIGHTS_NAME import os import json import pdb import torch import torch.nn as nn import torch.utils.checkpoint import safetensors
8,195
cross_attention_dim: int = 1280, attention_head_dim: Union[int, Tuple[int]] = 8, dual_cross_attention: bool = False, use_linear_projection: bool = False, class_embed_type: Optional[str] = None, num_class_embeds: Optional[int] = None, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", use_inflated_groupnorm=False, addition_embed_type: Optional[str] = None, addition_time_embed_dim: Optional[int] = None, dropout: float = 0.0, encoder_hid_dim: Optional[int] = None, encoder_hid_dim_type: Optional[str] = None, conv_in_kernel: int = 3, conv_out_kernel: int = 3, attention_type: str = "default", class_embeddings_concat: bool = False, mid_block_only_cross_attention: Optional[bool] = None, cross_attention_norm: Optional[str] = None, addition_embed_type_num_heads=64, transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1, time_embedding_type: str = "positional", time_embedding_dim: Optional[int] = None, time_embedding_act_fn: Optional[str] = None, timestep_post_act: Optional[str] = None, time_cond_proj_dim: Optional[int] = None, # Additional use_motion_module = False, motion_module_resolutions = ( 1,2,4,8 ), motion_module_mid_block = False, motion_module_decoder_only = False, motion_module_type = None, motion_module_kwargs = {}, unet_use_cross_frame_attention = None, unet_use_temporal_attention = None, ): super().__init__() self.sample_size = sample_size time_embed_dim = block_out_channels[0] * 4 # input self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1)) # time self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) timestep_input_dim = block_out_channels[0] # self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) self.time_embedding = TimestepEmbedding( timestep_input_dim, time_embed_dim, act_fn=act_fn, post_act_fn=timestep_post_act, cond_proj_dim=time_cond_proj_dim, ) # class embedding if class_embed_type is None and num_class_embeds is not None: self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) elif class_embed_type == "timestep": self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) elif class_embed_type == "identity": self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) else: self.class_embedding = None self.down_blocks = nn.ModuleList([]) self.mid_block = None self.up_blocks = nn.ModuleList([]) if isinstance(only_cross_attention, bool): only_cross_attention = [only_cross_attention] * len(down_block_types) if isinstance(attention_head_dim, int): attention_head_dim = (attention_head_dim,) * len(down_block_types) if class_embeddings_concat: # The time embeddings are concatenated with the class embeddings. The dimension of the # time embeddings passed to the down, middle, and up blocks is twice the dimension of the # regular time embeddings blocks_time_embed_dim = time_embed_dim * 2 else: blocks_time_embed_dim = time_embed_dim # self.time_embedding = TimestepEmbedding( # timestep_input_dim, # time_embed_dim, # act_fn=act_fn, # post_act_fn=timestep_post_act, # cond_proj_dim=time_cond_proj_dim, # ) # down output_channel = block_out_channels[0] for i, down_block_type in enumerate(down_block_types): res = 2 ** i input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1
# Adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_condition.py logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class UNet3DConditionOutput(BaseOutput): sample: torch.FloatTensor class UNet3DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin): _supports_gradient_checkpointing = True @register_to_config def __init__( self, sample_size: Optional[int] = None, in_channels: int = 4, out_channels: int = 4, center_input_sample: bool = False, flip_sin_to_cos: bool = True, freq_shift: int = 0, down_block_types: Tuple[str] = ( "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D", ), mid_block_type: str = "UNetMidBlock3DCrossAttn", up_block_types: Tuple[str] = ( "UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D" ), only_cross_attention: Union[bool, Tuple[bool]] = False, block_out_channels: Tuple[int] = (320, 640, 1280, 1280), layers_per_block: int = 2, downsample_padding: int = 1, mid_block_scale_factor: float = 1, act_fn: str = "silu", norm_num_groups: int = 32, norm_eps: float = 1e-5, cross_attention_dim: int = 1280, attention_head_dim: Union[int, Tuple[int]] = 8, dual_cross_attention: bool = False, use_linear_projection: bool = False, class_embed_type: Optional[str] = None, num_class_embeds: Optional[int] = None, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", use_inflated_groupnorm=False, addition_embed_type: Optional[str] = None, addition_time_embed_dim: Optional[int] = None, dropout: float = 0.0, encoder_hid_dim: Optional[int] = None, encoder_hid_dim_type: Optional[str] = None, conv_in_kernel: int = 3, conv_out_kernel: int = 3, attention_type: str = "default", class_embeddings_concat: bool = False, mid_block_only_cross_attention: Optional[bool] = None, cross_attention_norm: Optional[str] = None, addition_embed_type_num_heads=64, transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1, time_embedding_type: str = "positional", time_embedding_dim: Optional[int] = None, time_embedding_act_fn: Optional[str] = None, timestep_post_act: Optional[str] = None, time_cond_proj_dim: Optional[int] = None, # Additional use_motion_module = False, motion_module_resolutions = ( 1,2,4,8 ), motion_module_mid_block = False, motion_module_decoder_only = False, motion_module_type = None, motion_module_kwargs = {}, unet_use_cross_frame_attention = None, unet_use_temporal_attention = None, ): super().__init__() self.sample_size = sample_size time_embed_dim = block_out_channels[0] * 4 # input self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1)) # time self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) timestep_input_dim = block_out_channels[0] # self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) self.time_embedding = TimestepEmbedding( timestep_input_dim, time_embed_dim, act_fn=act_fn, post_act_fn=timestep_post_act, cond_proj_dim=time_cond_proj_dim, ) # class embedding if class_embed_type is None and num_class_embeds is not None: self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) elif class_embed_type == "timestep": self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) elif class_embed_type == "identity": self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) else: self.class_embedding = None self.down_blocks = nn.ModuleList([]) self.mid_block = None self.up_blocks = nn.ModuleList([]) if isinstance(only_cross_attention, bool): only_cross_attention = [only_cross_attention] * len(down_block_types) if isinstance(attention_head_dim, int): attention_head_dim = (attention_head_dim,) * len(down_block_types) if class_embeddings_concat: # The time embeddings are concatenated with the class embeddings. The dimension of the # time embeddings passed to the down, middle, and up blocks is twice the dimension of the # regular time embeddings blocks_time_embed_dim = time_embed_dim * 2 else: blocks_time_embed_dim = time_embed_dim # self.time_embedding = TimestepEmbedding( # timestep_input_dim, # time_embed_dim, # act_fn=act_fn, # post_act_fn=timestep_post_act, # cond_proj_dim=time_cond_proj_dim, # ) # down output_channel = block_out_channels[0] for i, down_block_type in enumerate(down_block_types): res = 2 ** i input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
5
2023-11-04 01:35:44+00:00
12k
Zaczero/openstreetmap-ng
scripts/load_osm.py
[ { "identifier": "Format06", "path": "src/lib/format/format06.py", "snippet": "class Format06:\n @staticmethod\n def _encode_tags(tags: dict) -> Sequence[dict] | dict:\n if format_is_json():\n return tags\n else:\n return tuple({'@k': k, '@v': v} for k, v in tags.items())\n\n @staticmethod\n def _decode_tags_unsafe(tags: Sequence[dict]) -> dict:\n \"\"\"\n This method does not validate the input data.\n\n >>> _decode_tags_unsafe([\n ... {'@k': 'a', '@v': '1'},\n ... {'@k': 'b', '@v': '2'},\n ... ])\n {'a': '1', 'b': '2'}\n \"\"\"\n\n items = tuple((tag['@k'], tag['@v']) for tag in tags)\n result = dict(items)\n\n if len(items) != len(result):\n raise ValueError('Duplicate tags keys')\n\n return result\n\n @staticmethod\n def decode_tags_and_validate(tags: Sequence[dict]) -> dict:\n \"\"\"\n >>> decode_tags_and_validate([\n ... {'@k': 'a', '@v': '1'},\n ... {'@k': 'b', '@v': '2'},\n ... ])\n {'a': '1', 'b': '2'}\n \"\"\"\n\n return TagsValidating(tags=Format06._decode_tags_unsafe(tags)).tags\n\n @staticmethod\n def _encode_point(point: Point | None) -> dict:\n \"\"\"\n >>> _encode_point(Point(1, 2))\n {'@lon': 1, '@lat': 2}\n \"\"\"\n\n if not point:\n return {}\n\n return {\n XAttr('lon'): point.x,\n XAttr('lat'): point.y,\n }\n\n @staticmethod\n def _decode_point_unsafe(data: dict) -> Point | None:\n \"\"\"\n This method does not validate the input data.\n\n >>> _decode_point_unsafe({'@lon': '1', '@lat': '2'})\n POINT (1 2)\n \"\"\"\n\n if (lon := data.get('@lon')) is None or (lat := data.get('@lat')) is None:\n return None\n\n return Point(\n float(lon),\n float(lat),\n )\n\n @staticmethod\n def _encode_nodes(nodes: Sequence[ElementMemberRef]) -> Sequence[dict] | Sequence[int]:\n \"\"\"\n >>> _encode_nodes([\n ... ElementMember(type=ElementType.node, typed_id=1, role=''),\n ... ElementMember(type=ElementType.node, typed_id=2, role=''),\n ... ])\n ({'@ref': 1}, {'@ref': 2})\n \"\"\"\n\n if format_is_json():\n return tuple(node.typed_id for node in nodes)\n else:\n return tuple({'@ref': node.typed_id} for node in nodes)\n\n @staticmethod\n def _decode_nodes_unsafe(nodes: Sequence[dict]) -> Sequence[ElementMemberRef]:\n \"\"\"\n This method does not validate the input data.\n\n >>> _decode_nodes_unsafe([{'@ref': '1'}])\n [ElementMember(type=ElementType.node, typed_id=1, role='')]\n \"\"\"\n\n return tuple(\n ElementMemberRef(\n type=ElementType.node,\n typed_id=int(node['@ref']),\n role='',\n )\n for node in nodes\n )\n\n @staticmethod\n def _encode_members(members: Sequence[ElementMemberRef]) -> Sequence[dict]:\n \"\"\"\n >>> _encode_members([\n ... ElementMember(type=ElementType.node, typed_id=1, role='a'),\n ... ElementMember(type=ElementType.way, typed_id=2, role='b'),\n ... ])\n (\n {'@type': 'node', '@ref': 1, '@role': 'a'},\n {'@type': 'way', '@ref': 2, '@role': 'b'},\n )\n \"\"\"\n\n return tuple(\n {\n XAttr('type'): member.type.value,\n XAttr('ref'): member.typed_id,\n XAttr('role'): member.role,\n }\n for member in members\n )\n\n @staticmethod\n def _decode_members_unsafe(members: Sequence[dict]) -> Sequence[ElementMemberRef]:\n \"\"\"\n This method does not validate the input data.\n\n >>> _decode_members_unsafe([\n ... {'@type': 'node', '@ref': '1', '@role': 'a'},\n ... ])\n [ElementMember(type=ElementType.node, typed_id=1, role='a')]\n \"\"\"\n\n return tuple(\n ElementMemberRef(\n type=ElementType.from_str(member['@type']),\n typed_id=int(member['@ref']),\n role=member['@role'],\n )\n for member in members\n )\n\n @staticmethod\n def encode_element(element: Element) -> dict:\n \"\"\"\n >>> encode_element(Element(type=ElementType.node, typed_id=1, version=1, ...))\n {'node': {'@id': 1, '@version': 1, ...}}\n \"\"\"\n\n if format_is_json():\n return {\n 'type': element.type.value,\n 'id': element.typed_id,\n **(Format06._encode_point(element.point) if element.type == ElementType.node else {}),\n 'version': element.version,\n 'timestamp': element.created_at,\n 'changeset': element.changeset_id,\n 'uid': element.user_id,\n 'user': element.user.display_name,\n 'visible': element.visible,\n 'tags': element.tags,\n **({'nodes': Format06._encode_nodes(element.members)} if element.type == ElementType.way else {}),\n **(\n {'members': Format06._encode_members(element.members)}\n if element.type == ElementType.relation\n else {}\n ),\n }\n else:\n return {\n element.type.value: {\n '@id': element.typed_id,\n **(Format06._encode_point(element.point) if element.type == ElementType.node else {}),\n '@version': element.version,\n '@timestamp': element.created_at,\n '@changeset': element.changeset_id,\n '@uid': element.user_id,\n '@user': element.user.display_name,\n '@visible': element.visible,\n 'tag': Format06._encode_tags(element.tags),\n **({'nd': Format06._encode_nodes(element.members)} if element.type == ElementType.way else {}),\n **(\n {'member': Format06._encode_members(element.members)}\n if element.type == ElementType.relation\n else {}\n ),\n }\n }\n\n @staticmethod\n def decode_element(element: dict, changeset_id: int | None) -> Element:\n \"\"\"\n If `changeset_id` is `None`, it will be extracted from the element data.\n \"\"\"\n\n if len(element) != 1:\n raise ValueError(f'Expected one root element, got {len(element)}')\n\n type, data = next(iter(element.items()))\n type = ElementType.from_str(type)\n data: dict\n\n # decode members from either nd or member\n if data_nodes := data.get('nd'):\n members = Format06._decode_nodes_unsafe(data_nodes)\n elif data_members := data.get('member'):\n members = Format06._decode_members_unsafe(data_members)\n else:\n members = ()\n\n return Element(\n **ElementValidating(\n user_id=auth_user().id,\n changeset_id=changeset_id or data.get('@changeset'),\n type=type,\n typed_id=data.get('@id'),\n version=data.get('@version', 0) + 1,\n visible=data.get('@visible', True),\n tags=Format06._decode_tags_unsafe(data.get('tag', ())),\n point=Format06._decode_point_unsafe(data),\n members=members,\n ).to_orm_dict()\n )\n\n @staticmethod\n def encode_elements(elements: Sequence[Element]) -> dict[str, Sequence[dict]]:\n \"\"\"\n >>> encode_elements([\n ... Element(type=ElementType.node, typed_id=1, version=1, ...),\n ... Element(type=ElementType.way, typed_id=2, version=1,\n ... ])\n {'node': [{'@id': 1, '@version': 1, ...}], 'way': [{'@id': 2, '@version': 1, ...}]}\n \"\"\"\n\n if format_is_json():\n return {'elements': tuple(Format06.encode_element(element) for element in elements)}\n else:\n result: dict[str, list[dict]] = defaultdict(list)\n for element in elements:\n result[element.type.value].append(Format06.encode_element(element))\n return result\n\n @staticmethod\n def _encode_changeset_comment(comment: ChangesetComment) -> dict:\n \"\"\"\n >>> _encode_changeset_comment(ChangesetComment(...))\n {'@uid': 1, '@user': ..., '@date': ..., 'text': 'lorem ipsum'}\n \"\"\"\n\n return {\n XAttr('id'): comment.id,\n XAttr('date'): comment.created_at,\n XAttr('uid'): comment.user_id,\n XAttr('user'): comment.user.display_name,\n 'text': comment.body,\n }\n\n @staticmethod\n def encode_changeset(changeset: Changeset, *, add_comments_count: int = 0) -> dict:\n \"\"\"\n >>> encode_changeset(Changeset(...))\n {'changeset': {'@id': 1, '@created_at': ..., ..., 'discussion': {'comment': [...]}}}\n \"\"\"\n\n if changeset.boundary:\n minx, miny, maxx, maxy = changeset.boundary.bounds\n boundary_d = {\n XAttr('minlon', custom_xml='min_lon'): minx,\n XAttr('minlat', custom_xml='min_lat'): miny,\n XAttr('maxlon', custom_xml='max_lon'): maxx,\n XAttr('maxlat', custom_xml='max_lat'): maxy,\n }\n else:\n boundary_d = {}\n\n try:\n _ = changeset.comments\n has_comments = True\n except InvalidRequestError:\n has_comments = False\n\n if format_is_json():\n return {\n 'type': 'changeset',\n 'id': changeset.id,\n 'created_at': changeset.created_at,\n **({'closed_at': changeset.closed_at} if changeset.closed_at else {}),\n 'open': not changeset.closed_at,\n 'uid': changeset.user_id,\n 'user': changeset.user.display_name,\n **boundary_d,\n 'comments_count': len(changeset.comments) + add_comments_count,\n 'changes_count': changeset.size,\n 'tags': changeset.tags,\n **(\n {'discussion': tuple(Format06._encode_changeset_comment(comment) for comment in changeset.comments)}\n if has_comments\n else {}\n ),\n }\n else:\n return {\n 'changeset': {\n '@id': changeset.id,\n '@created_at': changeset.created_at,\n **({'@closed_at': changeset.closed_at} if changeset.closed_at else {}),\n '@open': not changeset.closed_at,\n '@uid': changeset.user_id,\n '@user': changeset.user.display_name,\n **boundary_d,\n '@comments_count': len(changeset.comments) + add_comments_count,\n '@changes_count': changeset.size,\n 'tag': Format06._encode_tags(changeset.tags),\n **(\n {\n 'discussion': {\n 'comment': tuple(\n Format06._encode_changeset_comment(comment) for comment in changeset.comments\n ),\n }\n }\n if has_comments\n else {}\n ),\n }\n }\n\n @staticmethod\n def encode_changesets(changesets: Sequence[Changeset]) -> dict:\n \"\"\"\n >>> encode_changesets([\n ... Changeset(...),\n ... Changeset(...),\n ... ])\n {'changeset': [{'@id': 1, '@created_at': ..., ..., 'discussion': {'comment': [...]}}]}\n \"\"\"\n\n if format_is_json():\n return {'elements': tuple(Format06.encode_changeset(changeset) for changeset in changesets)}\n else:\n return {'changeset': tuple(Format06.encode_changeset(changeset)['changeset'] for changeset in changesets)}\n\n @staticmethod\n def encode_osmchange(elements: Sequence[Element]) -> Sequence[tuple[str, dict]]:\n \"\"\"\n >>> encode_osmchange([\n ... Element(type=ElementType.node, typed_id=1, version=1, ...),\n ... Element(type=ElementType.way, typed_id=2, version=2, ...)\n ... ])\n [\n ('create', {'node': [{'@id': 1, '@version': 1, ...}]}),\n ('modify', {'way': [{'@id': 2, '@version': 2, ...}]}),\n ]\n \"\"\"\n\n result = [None] * len(elements)\n for i, element in len(elements):\n if element.version == 1:\n action = OSMChangeAction.create.value\n elif element.visible:\n action = OSMChangeAction.modify.value\n else:\n action = OSMChangeAction.delete.value\n result[i] = (action, Format06.encode_element(element))\n return result\n\n @staticmethod\n def decode_osmchange(elements: Sequence[tuple[str, dict]], changeset_id: int | None) -> Sequence[Element]:\n \"\"\"\n If `changeset_id` is `None`, it will be extracted from the element data.\n\n >>> decode_osmchange([\n ... ('create', {'node': [{'@id': 1, '@version': 1, ...}]}),\n ... ('modify', {'way': [{'@id': 2, '@version': 2, ...}]}),\n ... ])\n [Element(type=ElementType, ...), Element(type=ElementType.way, ...)]\n \"\"\"\n\n result = [None] * len(elements)\n\n for i, (action, element_d) in enumerate(elements):\n if len(element_d) != 1:\n raise ValueError(f'Expected one element in {action!r}, got {len(element_d)}')\n\n element = Format06.decode_element(element_d, changeset_id)\n\n if action == OSMChangeAction.create.value:\n if element.id > 0:\n raise_for().diff_create_bad_id(element.versioned_ref)\n if element.version > 1:\n element.version = 1\n elif action == OSMChangeAction.modify.value:\n if element.version < 2:\n raise_for().diff_update_bad_version(element.versioned_ref)\n elif action == OSMChangeAction.delete.value:\n if element.version < 2:\n raise_for().diff_update_bad_version(element.versioned_ref)\n if element.visible:\n element.visible = False\n else:\n raise_for().diff_unsupported_action(action)\n\n result[i] = element\n\n return result\n\n @staticmethod\n def encode_diff_result(assigned_ref_map: dict[TypedElementRef, Sequence[Element]]) -> Sequence[tuple]:\n \"\"\"\n >>> encode_diff_result({\n ... TypedElementRef(type=ElementType.node, typed_id=-1): [\n ... Element(type=ElementType.node, typed_id=1, version=1, ...),\n ... Element(type=ElementType.node, typed_id=1, version=2, ...),\n ... ],\n ... })\n (\n ('node', {'@old_id': -1, '@new_id': 1, '@new_version': 1}),\n ('node', {'@old_id': -1, '@new_id': 1, '@new_version': 2})\n )\n \"\"\"\n\n return tuple(\n (\n typed_ref.type.value,\n {\n '@old_id': typed_ref.typed_id,\n '@new_id': element.typed_id,\n '@new_version': element.version,\n },\n )\n for typed_ref, elements in assigned_ref_map.items()\n for element in elements\n )\n\n @staticmethod\n def encode_tracks(trace_points: Sequence[TracePoint]) -> dict:\n \"\"\"\n >>> encode_tracks([\n ... TracePoint(...),\n ... TracePoint(...),\n ... ])\n {'trk': [{'trkseg': [{'trkpt': [{'@lon': 1, '@lat': 2}, {'@lon': 3, '@lat': 4}]}]}]}\n \"\"\"\n\n trks = []\n trk_trksegs = []\n trk_trkseg_trkpts = []\n\n last_trk_id = None\n last_trkseg_id = None\n\n for tp in trace_points:\n trace = tp.trace\n\n # if trace is available via api, encode full information\n if trace.timestamps_via_api:\n # handle track change\n if last_trk_id != trace.id:\n if trace.visibility == TraceVisibility.identifiable:\n url = f'/user/permalink/{trace.user_id}/traces/{trace.id}'\n else:\n url = None\n\n trk_trksegs = []\n trks.append(\n {\n 'name': trace.name,\n 'desc': trace.description,\n **({'url': url} if url else {}),\n 'trkseg': trk_trksegs,\n }\n )\n last_trk_id = trace.id\n last_trkseg_id = None\n\n # handle track segment change\n if last_trkseg_id != tp.track_idx:\n trk_trkseg_trkpts = []\n trk_trksegs.append({'trkpt': trk_trkseg_trkpts})\n last_trkseg_id = tp.track_idx\n\n # add point\n trk_trkseg_trkpts.append(\n {\n **Format06._encode_point(tp.point),\n **({'ele': tp.elevation} if tp.elevation is not None else {}),\n 'time': tp.captured_at,\n }\n )\n\n # otherwise, encode only coordinates\n else:\n # handle track and track segment change\n if last_trk_id is not None or last_trkseg_id is not None:\n trk_trksegs = []\n trks.append({'trkseg': trk_trksegs})\n trk_trkseg_trkpts = []\n trk_trksegs.append({'trkpt': trk_trkseg_trkpts})\n last_trk_id = None\n last_trkseg_id = None\n\n trk_trkseg_trkpts.append(Format06._encode_point(tp.point))\n\n return {'trk': trks}\n\n @staticmethod\n def decode_tracks(tracks: Sequence[dict], *, track_idx_start: int = 0) -> Sequence[TracePoint]:\n \"\"\"\n >>> decode_tracks([{'trkseg': [{'trkpt': [{'@lon': 1, '@lat': 2}]}]}])\n [TracePoint(...)]\n \"\"\"\n\n result = []\n\n for trk in tracks:\n trk: dict\n for track_idx, trkseg in enumerate(trk.get('trkseg', []), track_idx_start):\n trkseg: dict\n for trkpt in trkseg.get('trkpt', []):\n trkpt: dict\n\n result.append(\n TracePoint(\n **TracePointValidating(\n track_idx=track_idx,\n captured_at=datetime.fromisoformat(time) if (time := trkpt.get('time')) else None,\n point=Format06._decode_point_unsafe(trkpt),\n elevation=trkpt.get('ele'),\n ).to_orm_dict()\n )\n )\n\n return result\n\n @staticmethod\n def encode_gpx_file(trace: Trace) -> dict:\n \"\"\"\n >>> encode_gpx_file(Trace(...))\n {'gpx_file': {'@id': 1, '@uid': 1234, ...}}\n \"\"\"\n\n return {\n 'gpx_file': {\n '@id': trace.id,\n '@uid': trace.user_id,\n '@user': trace.user.display_name,\n '@timestamp': trace.created_at,\n '@name': trace.name,\n '@lon': trace.start_point.x,\n '@lat': trace.start_point.y,\n '@visibility': trace.visibility.value,\n '@pending': False,\n 'description': trace.description,\n 'tag': trace.tags,\n }\n }\n\n @staticmethod\n def encode_gpx_files(traces: Sequence[Trace]) -> dict:\n \"\"\"\n >>> encode_gpx_files([\n ... Trace(...),\n ... Trace(...),\n ... ])\n {'gpx_file': [{'@id': 1, '@uid': 1234, ...}, {'@id': 2, '@uid': 1234, ...}]}\n \"\"\"\n\n return {\n 'gpx_file': tuple(Format06.encode_gpx_file(trace) for trace in traces),\n }\n\n @staticmethod\n def decode_gpx_file(gpx_file: dict) -> Trace:\n return Trace(\n **TraceValidating(\n user_id=auth_user().id,\n name=gpx_file.get('@name'),\n description=gpx_file.get('description'),\n visibility=TraceVisibility(gpx_file.get('@visibility')),\n size=1,\n start_point=Point(0, 0),\n tags=gpx_file.get('tag', ()),\n ).to_orm_dict()\n )\n\n @staticmethod\n def _encode_note_comment(comment: NoteComment) -> dict:\n \"\"\"\n >>> _encode_note_comment(NoteComment(...))\n {'date': '2019-06-15 08:26:04 UTC', 'uid': 1234, 'user': 'userName', ...}\n \"\"\"\n\n return {\n 'date': format_sql_date(comment.created_at),\n 'uid': comment.user_id,\n 'user': comment.user.display_name,\n 'user_url': comment.user.permalink,\n 'action': comment.event.value,\n 'text': comment.body,\n 'html': comment.body_rich.value, # a disaster waiting to happen\n }\n\n @staticmethod\n def encode_note(note: Note) -> dict:\n \"\"\"\n >>> encode_note(Note(...))\n {'note': {'@lon': 0.1, '@lat': 51, 'id': 16659, ...}}\n \"\"\"\n\n style = format_style()\n\n if style == FormatStyle.json:\n return {\n 'type': 'Feature',\n 'geometry': mapping(note.point),\n 'properties': {\n 'id': note.id,\n 'url': f'{API_URL}/api/0.6/notes/{note.id}.json',\n **(\n {\n 'reopen_url': f'{API_URL}/api/0.6/notes/{note.id}/reopen.json',\n }\n if note.closed_at\n else {\n 'comment_url': f'{API_URL}/api/0.6/notes/{note.id}/comment.json',\n 'close_url': f'{API_URL}/api/0.6/notes/{note.id}/close.json',\n }\n ),\n 'date_created': format_sql_date(note.created_at),\n **({'closed_at': format_sql_date(note.closed_at)} if note.closed_at else {}),\n 'status': note.status.value,\n 'comments': tuple(Format06._encode_note_comment(comment) for comment in note.comments),\n },\n }\n elif style == FormatStyle.gpx:\n return {\n 'wpt': {\n **Format06._encode_point(note.point),\n 'time': note.created_at,\n 'name': f'Note: {note.id}',\n 'link': {'href': note.permalink},\n 'desc': ET.CDATA(render('api/0.6/note_comments_rss.jinja2', comments=note.comments)),\n 'extensions': {\n 'id': note.id,\n 'url': f'{API_URL}/api/0.6/notes/{note.id}.gpx',\n **(\n {\n 'reopen_url': f'{API_URL}/api/0.6/notes/{note.id}/reopen.gpx',\n }\n if note.closed_at\n else {\n 'comment_url': f'{API_URL}/api/0.6/notes/{note.id}/comment.gpx',\n 'close_url': f'{API_URL}/api/0.6/notes/{note.id}/close.gpx',\n }\n ),\n 'date_created': format_sql_date(note.created_at),\n **({'date_closed': format_sql_date(note.closed_at)} if note.closed_at else {}),\n 'status': note.status.value,\n },\n }\n }\n else:\n return {\n 'note': {\n **Format06._encode_point(note.point),\n 'id': note.id,\n 'url': f'{API_URL}/api/0.6/notes/{note.id}',\n **(\n {\n 'reopen_url': f'{API_URL}/api/0.6/notes/{note.id}/reopen',\n }\n if note.closed_at\n else {\n 'comment_url': f'{API_URL}/api/0.6/notes/{note.id}/comment',\n 'close_url': f'{API_URL}/api/0.6/notes/{note.id}/close',\n }\n ),\n 'date_created': format_sql_date(note.created_at),\n **({'date_closed': format_sql_date(note.closed_at)} if note.closed_at else {}),\n 'status': note.status.value,\n 'comments': {\n 'comment': tuple(Format06._encode_note_comment(comment) for comment in note.comments),\n },\n }\n }\n\n @staticmethod\n def encode_notes(notes: Sequence[Note]) -> dict:\n \"\"\"\n >>> encode_notes([\n ... Note(...),\n ... Note(...),\n ... ])\n {'note': [{'@lon': 1, '@lat': 2, 'id': 1, ...}]}\n \"\"\"\n\n style = format_style()\n\n if style == FormatStyle.json:\n return {'type': 'FeatureCollection', 'features': tuple(Format06.encode_note(note) for note in notes)}\n elif style == FormatStyle.gpx:\n return {'wpt': tuple(Format06.encode_note(note)['wpt'] for note in notes)}\n else:\n return {'note': tuple(Format06.encode_note(note)['note'] for note in notes)}\n\n @staticmethod\n def _encode_languages(languages: Sequence[str]) -> dict | Sequence[str]:\n \"\"\"\n >>> _encode_languages(['en', 'pl'])\n {'lang': ('en', 'pl')}\n \"\"\"\n\n if format_is_json():\n return tuple(languages)\n else:\n return {'lang': tuple(languages)}\n\n @staticmethod\n async def encode_user(user: User) -> dict:\n \"\"\"\n >>> encode_user(User(...))\n {'user': {'@id': 1234, '@display_name': 'userName', ...}}\n \"\"\"\n\n current_user = auth_user()\n access_private = current_user and current_user.id == user.id\n\n changesets_count = 0\n traces_count = 0\n block_received_count = 0\n block_received_active_count = 0\n block_issued_count = 0\n block_issued_active_count = 0\n messages_received_count = 0\n messages_received_unread_count = 0\n messages_sent_count = 0\n\n async def changesets_count_task() -> None:\n nonlocal changesets_count\n changesets_count = await ChangesetRepository.count_by_user_id(user.id)\n\n async def traces_count_task() -> None:\n nonlocal traces_count\n traces_count = await TraceRepository.count_by_user_id(user.id)\n\n async def block_received_count_task() -> None:\n nonlocal block_received_count, block_received_active_count\n total, active = await UserBlockRepository.count_received_by_user_id(user.id)\n block_received_count = total\n block_received_active_count = active\n\n async def block_issued_count_task() -> None:\n nonlocal block_issued_count, block_issued_active_count\n total, active = await UserBlockRepository.count_given_by_user_id(user.id)\n block_issued_count = total\n block_issued_active_count = active\n\n async def messages_received_count_task() -> None:\n nonlocal messages_received_count, messages_received_unread_count\n total, unread = await MessageRepository.count_received_by_user_id(user.id)\n messages_received_count = total\n messages_received_unread_count = unread\n\n async def messages_sent_count_task() -> None:\n nonlocal messages_sent_count\n messages_sent_count = await MessageRepository.count_sent_by_user_id(user.id)\n\n async with anyio.create_task_group() as tg:\n tg.start_soon(changesets_count_task)\n tg.start_soon(traces_count_task)\n tg.start_soon(block_received_count_task)\n tg.start_soon(block_issued_count_task)\n\n if access_private:\n tg.start_soon(messages_received_count_task)\n tg.start_soon(messages_sent_count_task)\n\n return {\n 'user': {\n XAttr('id'): user.id,\n XAttr('display_name'): user.display_name,\n XAttr('account_created'): user.created_at,\n 'description': user.description,\n ('contributor_terms' if format_is_json() else 'contributor-terms'): {\n XAttr('agreed'): True,\n **({XAttr('pd'): user.consider_public_domain} if access_private else {}),\n },\n 'img': {XAttr('href'): user.avatar_url},\n 'roles': [role.value for role in user.roles],\n 'changesets': {XAttr('count'): changesets_count},\n 'traces': {XAttr('count'): traces_count},\n 'blocks': {\n 'received': {\n XAttr('count'): block_received_count,\n XAttr('active'): block_received_active_count,\n },\n 'issued': {\n XAttr('count'): block_issued_count,\n XAttr('active'): block_issued_active_count,\n },\n },\n # private section\n **(\n {\n **(\n {\n 'home': {\n **Format06._encode_point(user.home_point),\n XAttr('zoom'): user.home_zoom,\n }\n }\n if user.home_point\n else {}\n ),\n 'languages': Format06._encode_languages(user.languages),\n 'messages': {\n 'received': {\n XAttr('count'): messages_received_count,\n XAttr('unread'): messages_received_unread_count,\n },\n 'sent': {XAttr('count'): messages_sent_count},\n },\n }\n if access_private\n else {}\n ),\n }\n }\n\n @staticmethod\n async def encode_users(users: Sequence[User]) -> dict:\n \"\"\"\n >>> encode_users([\n ... User(...),\n ... User(...),\n ... ])\n {'user': [{'@id': 1234, '@display_name': 'userName', ...}]}\n \"\"\"\n\n encoded = [None] * len(users)\n\n async def task(i: int, user: User):\n encoded[i] = await Format06.encode_user(user)\n\n async with anyio.create_task_group() as tg:\n for i, user in enumerate(users):\n tg.start_soon(task, i, user)\n\n if format_is_json():\n return {'users': tuple(user for user in encoded)}\n else:\n return {'user': tuple(user['user'] for user in encoded)}\n\n @staticmethod\n def decode_user_preference(pref: dict) -> UserPref:\n \"\"\"\n >>> decode_user_preference({'@k': 'key', '@v': 'value'})\n UserPref(key='key', value='value')\n \"\"\"\n\n return UserPref(\n **UserPrefValidating(\n user_id=auth_user().id,\n app_id=None, # 0.6 api does not support prefs partitioning\n key=pref['@k'],\n value=pref['@v'],\n ).to_orm_dict()\n )\n\n @staticmethod\n def decode_user_preferences(prefs: Sequence[dict]) -> Sequence[UserPref]:\n \"\"\"\n >>> decode_user_preferences([{'@k': 'key', '@v': 'value'}])\n [UserPref(key='key', value='value')]\n \"\"\"\n\n seen_keys = set()\n\n for pref in prefs:\n key = pref['@k']\n if key in seen_keys:\n raise_for().pref_duplicate_key(key)\n seen_keys.add(key)\n\n return tuple(Format06.decode_user_preference(pref) for pref in prefs)\n\n @staticmethod\n def encode_user_preferences(prefs: Sequence[UserPref]) -> dict:\n \"\"\"\n >>> encode_user_preferences([\n ... UserPref(key='key1', value='value1'),\n ... UserPref(key='key2', value='value2'),\n ... ])\n {'preferences': {'preference': [{'@k': 'key1', '@v': 'value1'}, {'@k': 'key2', '@v': 'value2'}]}}\n \"\"\"\n\n if format_is_json():\n return {\n 'preferences': {pref.key: pref.value for pref in prefs},\n }\n else:\n return {\n 'preferences': {\n 'preference': tuple(\n {\n '@k': pref.key,\n '@v': pref.value,\n }\n for pref in prefs\n )\n }\n }" }, { "identifier": "XMLToDict", "path": "src/lib_cython/xmltodict.py", "snippet": "class XMLToDict:\n \"\"\"\n Uses standard library `xml.etree.ElementTree` for XML parsing, and `lxml.etree` for XML unparsing.\n \"\"\"\n\n force_list = frozenset(\n (\n 'create',\n 'modify',\n 'delete',\n 'node',\n 'way',\n 'relation',\n 'member',\n 'tag',\n 'nd',\n 'trk',\n 'trkseg',\n 'trkpt',\n 'preference',\n )\n )\n\n postprocessor_d = MappingProxyType(\n {\n '@changeset': int,\n '@closed_at': datetime.fromisoformat,\n '@comments_count': int,\n '@created_at': datetime.fromisoformat,\n '@id': int,\n '@lat': float,\n '@lon': float,\n '@max_lat': float,\n '@max_lon': float,\n '@min_lat': float,\n '@min_lon': float,\n '@num_changes': int,\n '@open': lambda x: x == 'true',\n '@ref': int,\n '@timestamp': datetime.fromisoformat,\n '@uid': int,\n '@version': lambda x: int(x) if x.isdigit() else float(x),\n '@visible': lambda x: x == 'true',\n }\n )\n\n @staticmethod\n def parse(xml_b: bytes, *, sequence: cython.char = False) -> dict:\n \"\"\"\n Parse XML string to dict.\n\n If `sequence` is `True`, then the root element is parsed as a sequence.\n \"\"\"\n\n if len(xml_b) > XML_PARSE_MAX_SIZE:\n raise_for().input_too_big(len(xml_b))\n\n logging.debug('Parsing %s XML string', naturalsize(len(xml_b), True))\n root = stdET.fromstring(xml_b) # noqa: S314\n return {_strip_namespace(root.tag): _parse_element(sequence, root, is_root=True)}\n\n @staticmethod\n def unparse(d: Mapping, *, raw: cython.char = False) -> str | bytes:\n \"\"\"\n Unparse dict to XML string.\n\n If `raw` is `True`, then the result is returned as raw bytes.\n \"\"\"\n\n # TODO: ensure valid XML charset (encode if necessary) /user/小智智/traces/10908782\n\n if len(d) != 1:\n raise ValueError(f'Invalid root element count {len(d)}')\n\n root_k, root_v = next(iter(d.items()))\n elements = _unparse_element(root_k, root_v)\n\n # always return root element, even if it's empty\n if not elements:\n elements = (ET.Element(root_k),)\n\n result: bytes = ET.tostring(elements[0], encoding='UTF-8', xml_declaration=True)\n logging.debug('Unparsed %s XML string', naturalsize(len(result), True))\n\n if raw:\n return result\n else:\n return result.decode()" }, { "identifier": "Element", "path": "src/models/db/element.py", "snippet": "class Element(Base.Sequential, CreatedAtMixin):\n __tablename__ = 'element'\n\n user_id: Mapped[int] = mapped_column(ForeignKey(User.id), nullable=False)\n user: Mapped[User] = relationship(lazy='raise')\n changeset_id: Mapped[int] = mapped_column(ForeignKey(Changeset.id), nullable=False)\n changeset: Mapped[Changeset] = relationship(back_populates='elements', lazy='raise')\n type: Mapped[ElementType] = mapped_column(Enum(ElementType), nullable=False)\n typed_id: Mapped[int] = mapped_column(BigInteger, nullable=False)\n version: Mapped[int] = mapped_column(BigInteger, nullable=False)\n visible: Mapped[bool] = mapped_column(Boolean, nullable=False)\n tags: Mapped[dict[str, str]] = mapped_column(JSONB, nullable=False)\n point: Mapped[Point | None] = mapped_column(PointType, nullable=True)\n members: Mapped[list[ElementMemberRef]] = mapped_column(ElementMemberRefType, nullable=False)\n\n # defaults\n superseded_at: Mapped[datetime | None] = mapped_column(DateTime, nullable=True, default=None)\n\n __table_args__ = (UniqueConstraint(type, typed_id, version),)\n\n @validates('typed_id')\n def validate_typed_id(self, _: str, value: int):\n if value <= 0:\n raise ValueError('typed_id must be positive')\n return value\n\n @validates('members')\n def validate_members(self, _: str, value: Sequence[ElementMemberRef]):\n if any(member.typed_id <= 0 for member in value):\n raise ValueError('members typed_id must be positive')\n return value\n\n @updating_cached_property('typed_id')\n def typed_ref(self) -> TypedElementRef:\n return TypedElementRef(type=self.type, typed_id=self.typed_id)\n\n @updating_cached_property('typed_id')\n def versioned_ref(self) -> VersionedElementRef:\n return VersionedElementRef(type=self.type, typed_id=self.typed_id, version=self.version)" }, { "identifier": "ElementMemberRef", "path": "src/models/element_member.py", "snippet": "class ElementMemberRef(TypedElementRef):\n role: EmptyStr255" }, { "identifier": "ElementType", "path": "src/models/element_type.py", "snippet": "class ElementType(BaseEnum):\n node = 'node'\n way = 'way'\n relation = 'relation'\n\n @classmethod\n def from_str(cls, s: str) -> Self:\n if s.startswith('n'):\n return cls.node\n elif s.startswith('w'):\n return cls.way\n elif s.startswith('r'):\n return cls.relation\n else:\n raise ValueError(f'Unknown element type {s!r}')" }, { "identifier": "TypedElementRef", "path": "src/models/typed_element_ref.py", "snippet": "class TypedElementRef:\n type: ElementType\n typed_id: int\n\n @property\n def typed_ref(self) -> Self:\n return TypedElementRef(\n type=self.type,\n typed_id=self.typed_id,\n )\n\n def __str__(self) -> str:\n \"\"\"\n Produce a string representation of the element reference.\n\n >>> TypedElementRef(ElementType.node, 123)\n 'n123'\n \"\"\"\n\n return f'{self.type.value[0]}{self.typed_id}'\n\n @classmethod\n def from_str(cls, s: str) -> Self:\n \"\"\"\n Parse an element reference from a string representation.\n\n >>> TypedElementRef.from_str('n123')\n TypedElementRef(type=<ElementType.node: 'node'>, id=123)\n \"\"\"\n\n type, id = s[0], s[1:]\n type = ElementType.from_str(type)\n typed_id = int(id)\n\n if typed_id == 0:\n raise ValueError('Element id cannot be 0')\n\n return cls(type, typed_id)" } ]
import sys import anyio import xmltodict from datetime import datetime from pathlib import Path from shapely.geometry import Point from src.lib.format.format06 import Format06 from src.lib_cython.xmltodict import XMLToDict from src.models.db.element import Element from src.models.db.element_node import ElementNode from src.models.db.element_relation import ElementRelation from src.models.db.element_way import ElementWay from src.models.element_member import ElementMemberRef from src.models.element_type import ElementType from src.models.typed_element_ref import TypedElementRef
10,295
async def main(): load_path = Path(sys.argv[1]) print(f'Loading {load_path} into database...') def thread(): batch = [] total = 0 async def process_batch(): nonlocal batch nonlocal total batch_ = batch batch = [] total += len(batch_) print(f'Processing batch of {len(batch_)} elements (total {total})') await Element._collection().bulk_write(batch_, ordered=False) def item_callback(tree, body): if not isinstance(body, dict): body = {} element_type, element = tree[-1] if element_type not in ('node', 'way', 'relation'): return True base = { 'typed_id': int(element['id']), 'changeset_id': int(element['changeset']), 'created_at': datetime.fromisoformat(element['timestamp']), 'version': int(element['version']), 'visible': element.get('visible', True), 'tags': Format06._decode_tags_unsafe(body.get('tag', [])), } if element_type == 'node': if 'lon' not in element: lon = 0 lat = 0 else: lon = float(element['lon']) lat = float(element['lat']) batch.append(ElementNode(**base, point=Point(lon, lat)).create_batch()) elif element_type == 'way': if 'nd' not in body: body['nd'] = [] batch.append(ElementWay(**base, nodes=tuple(n['@ref'] for n in body['nd'])).create_batch()) elif element_type == 'relation': if 'member' not in body: body['member'] = [] batch.append( ElementRelation( **base, members=tuple(
async def main(): load_path = Path(sys.argv[1]) print(f'Loading {load_path} into database...') def thread(): batch = [] total = 0 async def process_batch(): nonlocal batch nonlocal total batch_ = batch batch = [] total += len(batch_) print(f'Processing batch of {len(batch_)} elements (total {total})') await Element._collection().bulk_write(batch_, ordered=False) def item_callback(tree, body): if not isinstance(body, dict): body = {} element_type, element = tree[-1] if element_type not in ('node', 'way', 'relation'): return True base = { 'typed_id': int(element['id']), 'changeset_id': int(element['changeset']), 'created_at': datetime.fromisoformat(element['timestamp']), 'version': int(element['version']), 'visible': element.get('visible', True), 'tags': Format06._decode_tags_unsafe(body.get('tag', [])), } if element_type == 'node': if 'lon' not in element: lon = 0 lat = 0 else: lon = float(element['lon']) lat = float(element['lat']) batch.append(ElementNode(**base, point=Point(lon, lat)).create_batch()) elif element_type == 'way': if 'nd' not in body: body['nd'] = [] batch.append(ElementWay(**base, nodes=tuple(n['@ref'] for n in body['nd'])).create_batch()) elif element_type == 'relation': if 'member' not in body: body['member'] = [] batch.append( ElementRelation( **base, members=tuple(
ElementMemberRef(
3
2023-11-04 01:12:13+00:00
12k
codefuse-ai/Collinear-Constrained-Attention
model/llama/convert_llama_weights_to_hf.py
[ { "identifier": "LlamaConfig", "path": "model/llama/configuration_llama.py", "snippet": "class LlamaConfig(PretrainedConfig):\n r\"\"\"\n This is the configuration class to store the configuration of a [`LlamaModel`]. It is used to instantiate an LLaMA\n model according to the specified arguments, defining the model architecture. Instantiating a configuration with the\n defaults will yield a similar configuration to that of the LLaMA-7B.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n\n Args:\n vocab_size (`int`, *optional*, defaults to 32000):\n Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`LlamaModel`]\n hidden_size (`int`, *optional*, defaults to 4096):\n Dimension of the hidden representations.\n intermediate_size (`int`, *optional*, defaults to 11008):\n Dimension of the MLP representations.\n num_hidden_layers (`int`, *optional*, defaults to 32):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 32):\n Number of attention heads for each attention layer in the Transformer encoder.\n num_key_value_heads (`int`, *optional*):\n This is the number of key_value heads that should be used to implement Grouped Query Attention. If\n `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if\n `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When\n converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed\n by meanpooling all the original heads within that group. For more details checkout [this\n paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to\n `num_attention_heads`.\n pretraining_tp (`int`, *optional*, defaults to `1`):\n Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this\n document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is\n necessary to ensure exact reproducibility of the pretraining results. Please refer to [this\n issue](https://github.com/pytorch/pytorch/issues/76232).\n hidden_act (`str` or `function`, *optional*, defaults to `\"silu\"`):\n The non-linear activation function (function or string) in the decoder.\n max_position_embeddings (`int`, *optional*, defaults to 2048):\n The maximum sequence length that this model might ever be used with. Typically set this to something large\n just in case (e.g., 512 or 1024 or 2048).\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n rms_norm_eps (`float`, *optional*, defaults to 1e-12):\n The epsilon used by the rms normalization layers.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models). Only\n relevant if `config.is_decoder=True`.\n tie_word_embeddings(`bool`, *optional*, defaults to `False`):\n Whether to tie weight embeddings\n rope_scaling (`Dict`, *optional*):\n Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling\n strategies: linear and dynamic. Their scaling factor must be an float greater than 1. The expected format\n is `{\"type\": strategy name, \"factor\": scaling factor}`. When using this flag, don't update\n `max_position_embeddings` to the expected new maximum. See the following thread for more information on how\n these scaling strategies behave:\n https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an\n experimental feature, subject to breaking API changes in future versions.\n\n Example:\n\n ```python\n >>> from transformers import LlamaModel, LlamaConfig\n\n >>> # Initializing a LLaMA llama-7b style configuration\n >>> configuration = LlamaConfig()\n\n >>> # Initializing a model from the llama-7b style configuration\n >>> model = LlamaModel(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n ```\"\"\"\n model_type = \"llama\"\n keys_to_ignore_at_inference = [\"past_key_values\"]\n\n def __init__(\n self,\n vocab_size=32000,\n hidden_size=4096,\n intermediate_size=11008,\n num_hidden_layers=32,\n num_attention_heads=32,\n num_key_value_heads=None,\n hidden_act=\"silu\",\n max_position_embeddings=2048,\n initializer_range=0.02,\n rms_norm_eps=1e-6,\n use_cache=True,\n pad_token_id=None,\n bos_token_id=1,\n eos_token_id=2,\n pretraining_tp=1,\n tie_word_embeddings=False,\n rope_scaling=None,\n **kwargs,\n ):\n self.vocab_size = vocab_size\n self.max_position_embeddings = max_position_embeddings\n self.hidden_size = hidden_size\n self.intermediate_size = intermediate_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n\n # for backward compatibility\n if num_key_value_heads is None:\n num_key_value_heads = num_attention_heads\n\n self.num_key_value_heads = num_key_value_heads\n self.hidden_act = hidden_act\n self.initializer_range = initializer_range\n self.rms_norm_eps = rms_norm_eps\n self.pretraining_tp = pretraining_tp\n self.use_cache = use_cache\n self.rope_scaling = rope_scaling\n self._rope_scaling_validation()\n\n super().__init__(\n pad_token_id=pad_token_id,\n bos_token_id=bos_token_id,\n eos_token_id=eos_token_id,\n tie_word_embeddings=tie_word_embeddings,\n **kwargs,\n )\n\n def _rope_scaling_validation(self):\n \"\"\"\n Validate the `rope_scaling` configuration.\n \"\"\"\n if self.rope_scaling is None:\n return\n\n if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:\n raise ValueError(\n \"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, \"\n f\"got {self.rope_scaling}\"\n )\n rope_scaling_type = self.rope_scaling.get(\"type\", None)\n rope_scaling_factor = self.rope_scaling.get(\"factor\", None)\n if rope_scaling_type is None or rope_scaling_type not in [\"linear\", \"dynamic\"]:\n raise ValueError(\n f\"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}\"\n )\n if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:\n raise ValueError(f\"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}\")" }, { "identifier": "LlamaForCausalLM", "path": "model/llama/modeling_llama.py", "snippet": "class LlamaForCausalLM(LlamaPreTrainedModel):\n _tied_weights_keys = [\"lm_head.weight\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.model = LlamaModel(config)\n self.vocab_size = config.vocab_size\n self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.model.embed_tokens\n\n def set_input_embeddings(self, value):\n self.model.embed_tokens = value\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n def set_decoder(self, decoder):\n self.model = decoder\n\n def get_decoder(self):\n return self.model\n\n @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n r\"\"\"\n Args:\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\n Returns:\n\n Example:\n\n ```python\n >>> from transformers import AutoTokenizer, LlamaForCausalLM\n\n >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)\n >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)\n\n >>> prompt = \"Hey, are you conscious? Can you talk to me?\"\n >>> inputs = tokenizer(prompt, return_tensors=\"pt\")\n\n >>> # Generate\n >>> generate_ids = model.generate(inputs.input_ids, max_length=30)\n >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n \"Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you.\"\n ```\"\"\"\n\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)\n outputs = self.model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = outputs[0]\n if self.config.pretraining_tp > 1:\n lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0)\n logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)]\n logits = torch.cat(logits, dim=-1)\n else:\n logits = self.lm_head(hidden_states)\n logits = logits.float()\n\n loss = None\n if labels is not None:\n # Shift so that tokens < n predict n\n shift_logits = logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = CrossEntropyLoss()\n shift_logits = shift_logits.view(-1, self.config.vocab_size)\n shift_labels = shift_labels.view(-1)\n # Enable model parallelism\n shift_labels = shift_labels.to(shift_logits.device)\n loss = loss_fct(shift_logits, shift_labels)\n\n if not return_dict:\n output = (logits,) + outputs[1:]\n return (loss,) + output if loss is not None else output\n\n return CausalLMOutputWithPast(\n loss=loss,\n logits=logits,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def prepare_inputs_for_generation(\n self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs\n ):\n if past_key_values:\n input_ids = input_ids[:, -1:]\n\n position_ids = kwargs.get(\"position_ids\", None)\n if attention_mask is not None and position_ids is None:\n # create position_ids on the fly for batch generation\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past_key_values:\n position_ids = position_ids[:, -1].unsqueeze(-1)\n\n # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n if inputs_embeds is not None and past_key_values is None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n else:\n model_inputs = {\"input_ids\": input_ids}\n\n model_inputs.update(\n {\n \"position_ids\": position_ids,\n \"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"attention_mask\": attention_mask,\n }\n )\n return model_inputs\n\n @staticmethod\n def _reorder_cache(past_key_values, beam_idx):\n reordered_past = ()\n for layer_past in past_key_values:\n reordered_past += (\n tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),\n )\n return reordered_past" }, { "identifier": "LlamaTokenizer", "path": "model/llama/tokenization_llama.py", "snippet": "class LlamaTokenizer(PreTrainedTokenizer):\n \"\"\"\n Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as there is\n no padding token in the original model.\n\n Args:\n vocab_file (`str`):\n Path to the vocabulary file.\n legacy (`bool`, *optional*, defaults to `True`):\n Whether or not the `legacy` behaviour of the tokenizer should be used. Legacy is before the merge of #24622\n which includes fixes to properly handle tokens that appear after special tokens. A simple example:\n\n - `legacy=True`:\n ```python\n >>> from transformers import T5Tokenizer\n\n >>> tokenizer = T5Tokenizer.from_pretrained(\"t5-base\", legacy=True)\n >>> tokenizer.encode(\"Hello <extra_id_0>.\")\n [8774, 32099, 3, 5, 1]\n ```\n - `legacy=False`:\n ```python\n >>> from transformers import T5Tokenizer\n\n >>> tokenizer = T5Tokenizer.from_pretrained(\"t5-base\", legacy=False)\n >>> tokenizer.encode(\"Hello <extra_id_0>.\") # the extra space `[3]` is no longer here\n [8774, 32099, 5, 1]\n ```\n Checkout the pull request and the issue [here](https://github.com/huggingface/transformers/pull/24565) for\n more details.\n\n \"\"\"\n\n vocab_files_names = VOCAB_FILES_NAMES\n pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP\n max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\n model_input_names = [\"input_ids\", \"attention_mask\"]\n\n def __init__(\n self,\n vocab_file,\n unk_token=\"<unk>\",\n bos_token=\"<s>\",\n eos_token=\"</s>\",\n pad_token=None,\n sp_model_kwargs: Optional[Dict[str, Any]] = None,\n add_bos_token=True,\n add_eos_token=False,\n clean_up_tokenization_spaces=False,\n legacy=None,\n **kwargs,\n ):\n self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs\n bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token\n eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token\n unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token\n pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token\n super().__init__(\n bos_token=bos_token,\n eos_token=eos_token,\n unk_token=unk_token,\n pad_token=pad_token,\n add_bos_token=add_bos_token,\n add_eos_token=add_eos_token,\n sp_model_kwargs=self.sp_model_kwargs,\n clean_up_tokenization_spaces=clean_up_tokenization_spaces,\n legacy=legacy,\n **kwargs,\n )\n if legacy is None:\n logger.warning_once(\n f\"You are using the default legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to\"\n \" read the related pull request available at https://github.com/huggingface/transformers/pull/24565, and set the legacy attribute accordingly.\"\n )\n legacy = True\n\n self.legacy = legacy\n self.vocab_file = vocab_file\n self.add_bos_token = add_bos_token\n self.add_eos_token = add_eos_token\n self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)\n self.sp_model.Load(vocab_file)\n\n def __getstate__(self):\n state = self.__dict__.copy()\n state[\"sp_model\"] = None\n state[\"sp_model_proto\"] = self.sp_model.serialized_model_proto()\n return state\n\n def __setstate__(self, d):\n self.__dict__ = d\n self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)\n self.sp_model.LoadFromSerializedProto(self.sp_model_proto)\n\n @property\n def vocab_size(self):\n \"\"\"Returns vocab size\"\"\"\n return self.sp_model.get_piece_size()\n\n def get_vocab(self):\n \"\"\"Returns vocab as a dict\"\"\"\n vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}\n vocab.update(self.added_tokens_encoder)\n return vocab\n\n # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.tokenize\n def tokenize(self, text: \"TextInput\", **kwargs) -> List[str]:\n # Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at\n # the beginning of the text\n if not self.legacy:\n text = SPIECE_UNDERLINE + text.replace(SPIECE_UNDERLINE, \" \")\n return super().tokenize(text, **kwargs)\n\n # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._tokenize\n def _tokenize(self, text, **kwargs):\n \"\"\"\n Returns a tokenized string.\n\n Since the sentencepiece internal model always adds a SPIECE_UNDERLINE, at the beginning of the provided text,\n we need to remove it by hand when the current text is a subsequence. This happens whenever the `self.tokenize`\n function is called with specials tokens: the input is split on the special tokens, and each subsequence is\n passed to `_tokenize`. Thus if a subsequence did not start with a `\" \"` or SPIECE_UNDERLINE, we have to remove\n the extra `SPIECE_UNDERLINE` prepended.\n \"\"\"\n if not self.legacy:\n is_first = text.startswith(SPIECE_UNDERLINE)\n if is_first:\n text = text[1:]\n\n tokens = self.sp_model.encode(text, out_type=str)\n\n if not self.legacy and not is_first and not text.startswith(\" \") and tokens[0].startswith(SPIECE_UNDERLINE):\n tokens = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]\n return tokens\n\n def _convert_token_to_id(self, token):\n \"\"\"Converts a token (str) in an id using the vocab.\"\"\"\n return self.sp_model.piece_to_id(token)\n\n def _convert_id_to_token(self, index):\n \"\"\"Converts an index (integer) in a token (str) using the vocab.\"\"\"\n token = self.sp_model.IdToPiece(index)\n return token\n\n def convert_tokens_to_string(self, tokens):\n \"\"\"Converts a sequence of tokens (string) in a single string.\"\"\"\n current_sub_tokens = []\n out_string = \"\"\n prev_is_special = False\n for i, token in enumerate(tokens):\n # make sure that special tokens are not decoded using sentencepiece model\n if token in self.all_special_tokens:\n if not prev_is_special and i != 0:\n out_string += \" \"\n out_string += self.sp_model.decode(current_sub_tokens) + token\n prev_is_special = True\n current_sub_tokens = []\n else:\n current_sub_tokens.append(token)\n prev_is_special = False\n out_string += self.sp_model.decode(current_sub_tokens)\n return out_string\n\n def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:\n \"\"\"\n Save the vocabulary and special tokens file to a directory.\n\n Args:\n save_directory (`str`):\n The directory in which to save the vocabulary.\n\n Returns:\n `Tuple(str)`: Paths to the files saved.\n \"\"\"\n if not os.path.isdir(save_directory):\n logger.error(f\"Vocabulary path ({save_directory}) should be a directory\")\n return\n out_vocab_file = os.path.join(\n save_directory, (filename_prefix + \"-\" if filename_prefix else \"\") + VOCAB_FILES_NAMES[\"vocab_file\"]\n )\n\n if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):\n copyfile(self.vocab_file, out_vocab_file)\n elif not os.path.isfile(self.vocab_file):\n with open(out_vocab_file, \"wb\") as fi:\n content_spiece_model = self.sp_model.serialized_model_proto()\n fi.write(content_spiece_model)\n\n return (out_vocab_file,)\n\n def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n bos_token_id = [self.bos_token_id] if self.add_bos_token else []\n eos_token_id = [self.eos_token_id] if self.add_eos_token else []\n\n output = bos_token_id + token_ids_0 + eos_token_id\n\n if token_ids_1 is not None:\n output = output + bos_token_id + token_ids_1 + eos_token_id\n\n return output\n\n def get_special_tokens_mask(\n self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False\n ) -> List[int]:\n \"\"\"\n Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\n special tokens using the tokenizer `prepare_for_model` method.\n\n Args:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n already_has_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not the token list is already formatted with special tokens for the model.\n\n Returns:\n `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.\n \"\"\"\n if already_has_special_tokens:\n return super().get_special_tokens_mask(\n token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True\n )\n\n bos_token_id = [1] if self.add_bos_token else []\n eos_token_id = [1] if self.add_eos_token else []\n\n if token_ids_1 is None:\n return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id\n return (\n bos_token_id\n + ([0] * len(token_ids_0))\n + eos_token_id\n + bos_token_id\n + ([0] * len(token_ids_1))\n + eos_token_id\n )\n\n def create_token_type_ids_from_sequences(\n self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None\n ) -> List[int]:\n \"\"\"\n Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT\n sequence pair mask has the following format:\n\n ```\n 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1\n | first sequence | second sequence |\n ```\n\n if token_ids_1 is None, only returns the first portion of the mask (0s).\n\n Args:\n token_ids_0 (`List[int]`):\n List of ids.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\n Returns:\n `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).\n \"\"\"\n bos_token_id = [self.bos_token_id] if self.add_bos_token else []\n eos_token_id = [self.eos_token_id] if self.add_eos_token else []\n\n output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)\n\n if token_ids_1 is not None:\n output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)\n\n return output\n\n def _build_conversation_input_ids(self, conversation: \"Conversation\") -> List[int]:\n r\"\"\"Builds the input ids for a conversation.\n This is the format used in the provided examples. System prompts should be manually added at the beginning of\n the conversation. If no system prompt is given, the `DEFAULT_SYSTEM_PROMPT` will be used.\n ```\n <bos>[INST] B_SYS SytemPrompt E_SYS Prompt [/INST] Answer <eos>\n <bos>[INST] Prompt [/INST] Answer <eos>\n <bos>[INST] Prompt [/INST]\n ```\n\n If you want to use your own system prompt, make sure to use both `B_SYS` and `E_SYS` use the following:\n ```python\n >>> from transformers import Conversation\n\n >>> Conversation(\n ... \"<<SYS>>\\n Only answer with emojis, and charades\\n<</SYS>>\\n\\nHow can I build a house in 10 septs?\"\n ... ) # doctest: +IGNORE_RESULT\n ```\n Args:\n conversation (`Conversation`):\n Conversation to build input ids for.\n Returns:\n `List[int]`:\n Input ids for the conversation.\n \"\"\"\n if len(conversation.past_user_inputs) > 0:\n if not conversation.past_user_inputs[0].startswith(B_SYS) or E_SYS not in conversation.past_user_inputs[0]:\n conversation.past_user_inputs[0] = (\n B_SYS + DEFAULT_SYSTEM_PROMPT + E_SYS + conversation.past_user_inputs[0]\n )\n elif conversation.new_user_input:\n if not conversation.new_user_input.startswith(B_SYS) or E_SYS not in conversation.new_user_input:\n conversation.new_user_input = B_SYS + DEFAULT_SYSTEM_PROMPT + E_SYS + conversation.new_user_input\n else:\n raise ValueError(\"Last message must be from user\")\n\n dialogue = list(conversation.iter_texts())\n if not all([is_user for is_user, msg in dialogue[::2]]) or not all(\n [not is_user for is_user, msg in dialogue[1::2]]\n ):\n raise ValueError(\n \"The model only supports 'user' and 'assistant' roles, starting with user and alternating (u/a/u/a/u...)\"\n )\n\n dialog_tokens: List[int] = []\n dialog_tokens += sum(\n [\n [self.bos_token_id]\n + self.encode(\n f\"{B_INST} {(prompt[1]).strip()} {E_INST} {(answer[1]).strip()} \", add_special_tokens=False\n )\n + [self.eos_token_id]\n for prompt, answer in zip(dialogue[::2], dialogue[1::2])\n ],\n [],\n )\n dialog_tokens += [self.bos_token_id] + self.encode(\n f\"{B_INST} {(dialogue[-1][1]).strip()} {E_INST}\", add_special_tokens=False\n )\n return dialog_tokens" } ]
import argparse import gc import json import os import shutil import warnings import torch from .configuration_llama import LlamaConfig from .modeling_llama import LlamaForCausalLM from .tokenization_llama import LlamaTokenizer from tokenization_llama_fast import LlamaTokenizerFast
9,724
else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. state_dict = { f"model.layers.{layer_i}.input_layernorm.weight": loaded[0][ f"layers.{layer_i}.attention_norm.weight" ].clone(), f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][ f"layers.{layer_i}.ffn_norm.weight" ].clone(), } state_dict[f"model.layers.{layer_i}.self_attn.q_proj.weight"] = permute( torch.cat( [ loaded[i][f"layers.{layer_i}.attention.wq.weight"].view(n_heads_per_shard, dims_per_head, dim) for i in range(num_shards) ], dim=0, ).reshape(dim, dim) ) state_dict[f"model.layers.{layer_i}.self_attn.k_proj.weight"] = permute( torch.cat( [ loaded[i][f"layers.{layer_i}.attention.wk.weight"].view( num_local_key_value_heads, dims_per_head, dim ) for i in range(num_shards) ], dim=0, ).reshape(key_value_dim, dim), num_key_value_heads, key_value_dim, dim, ) state_dict[f"model.layers.{layer_i}.self_attn.v_proj.weight"] = torch.cat( [ loaded[i][f"layers.{layer_i}.attention.wv.weight"].view( num_local_key_value_heads, dims_per_head, dim ) for i in range(num_shards) ], dim=0, ).reshape(key_value_dim, dim) state_dict[f"model.layers.{layer_i}.self_attn.o_proj.weight"] = torch.cat( [loaded[i][f"layers.{layer_i}.attention.wo.weight"] for i in range(num_shards)], dim=1 ) state_dict[f"model.layers.{layer_i}.mlp.gate_proj.weight"] = torch.cat( [loaded[i][f"layers.{layer_i}.feed_forward.w1.weight"] for i in range(num_shards)], dim=0 ) state_dict[f"model.layers.{layer_i}.mlp.down_proj.weight"] = torch.cat( [loaded[i][f"layers.{layer_i}.feed_forward.w2.weight"] for i in range(num_shards)], dim=1 ) state_dict[f"model.layers.{layer_i}.mlp.up_proj.weight"] = torch.cat( [loaded[i][f"layers.{layer_i}.feed_forward.w3.weight"] for i in range(num_shards)], dim=0 ) state_dict[f"model.layers.{layer_i}.self_attn.rotary_emb.inv_freq"] = inv_freq for k, v in state_dict.items(): index_dict["weight_map"][k] = filename param_count += v.numel() torch.save(state_dict, os.path.join(tmp_model_path, filename)) filename = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin" if model_size == "7B": # Unsharded state_dict = { "model.embed_tokens.weight": loaded["tok_embeddings.weight"], "model.norm.weight": loaded["norm.weight"], "lm_head.weight": loaded["output.weight"], } else: state_dict = { "model.norm.weight": loaded[0]["norm.weight"], "model.embed_tokens.weight": torch.cat( [loaded[i]["tok_embeddings.weight"] for i in range(num_shards)], dim=1 ), "lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(num_shards)], dim=0), } for k, v in state_dict.items(): index_dict["weight_map"][k] = filename param_count += v.numel() torch.save(state_dict, os.path.join(tmp_model_path, filename)) # Write configs index_dict["metadata"] = {"total_size": param_count * 2} write_json(index_dict, os.path.join(tmp_model_path, "pytorch_model.bin.index.json")) ffn_dim_multiplier = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1 multiple_of = params["multiple_of"] if "multiple_of" in params else 256 config = LlamaConfig( hidden_size=dim, intermediate_size=compute_intermediate_size(dim, ffn_dim_multiplier, multiple_of), num_attention_heads=params["n_heads"], num_hidden_layers=params["n_layers"], rms_norm_eps=params["norm_eps"], num_key_value_heads=num_key_value_heads, ) config.save_pretrained(tmp_model_path) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print("Loading the checkpoint in a Llama model.") model = LlamaForCausalLM.from_pretrained(tmp_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True) # Avoid saving this as part of the config. del model.config._name_or_path print("Saving in the Transformers format.") model.save_pretrained(model_path, safe_serialization=safe_serialization) shutil.rmtree(tmp_model_path) def write_tokenizer(tokenizer_path, input_tokenizer_path): # Initialize the tokenizer based on the `spm` model
# Copyright 2022 EleutherAI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: # from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( "The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion" ) LlamaTokenizerFast = None """ Sample usage: ``` python src/transformers/models/llama/convert_llama_weights_to_hf.py \ --input_dir /path/to/downloaded/llama/weights --model_size 7B --output_dir /output/path ``` Thereafter, models can be loaded via: ```py from transformers import LlamaForCausalLM, LlamaTokenizer model = LlamaForCausalLM.from_pretrained("/output/path") tokenizer = LlamaTokenizer.from_pretrained("/output/path") ``` Important note: you need to be able to host the whole model in RAM to execute this script (even if the biggest versions come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM). """ INTERMEDIATE_SIZE_MAP = { "7B": 11008, "13B": 13824, "30B": 17920, "65B": 22016, "70B": 28672, } NUM_SHARDS = { "7B": 1, "7Bf": 1, "13B": 2, "13Bf": 2, "30B": 4, "65B": 8, "70B": 8, "70Bf": 8, } def compute_intermediate_size(n, ffn_dim_multiplier=1, multiple_of=256): return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3)) + multiple_of - 1) // multiple_of) def read_json(path): with open(path, "r") as f: return json.load(f) def write_json(text, path): with open(path, "w") as f: json.dump(text, f) def write_model(model_path, input_base_path, model_size, safe_serialization=True): os.makedirs(model_path, exist_ok=True) tmp_model_path = os.path.join(model_path, "tmp") os.makedirs(tmp_model_path, exist_ok=True) params = read_json(os.path.join(input_base_path, "params.json")) num_shards = NUM_SHARDS[model_size] n_layers = params["n_layers"] n_heads = params["n_heads"] n_heads_per_shard = n_heads // num_shards dim = params["dim"] dims_per_head = dim // n_heads base = 10000.0 inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head)) if "n_kv_heads" in params: num_key_value_heads = params["n_kv_heads"] # for GQA / MQA num_local_key_value_heads = n_heads_per_shard // num_key_value_heads key_value_dim = dim // num_key_value_heads else: # compatibility with other checkpoints num_key_value_heads = n_heads num_local_key_value_heads = n_heads_per_shard key_value_dim = dim # permute for sliced rotary def permute(w, n_heads=n_heads, dim1=dim, dim2=dim): return w.view(n_heads, dim1 // n_heads // 2, 2, dim2).transpose(1, 2).reshape(dim1, dim2) print(f"Fetching all parameters from the checkpoint at {input_base_path}.") # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) loaded = torch.load(os.path.join(input_base_path, "consolidated.00.pth"), map_location="cpu") else: # Sharded loaded = [ torch.load(os.path.join(input_base_path, f"consolidated.{i:02d}.pth"), map_location="cpu") for i in range(num_shards) ] param_count = 0 index_dict = {"weight_map": {}} for layer_i in range(n_layers): filename = f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin" if model_size == "7B": # Unsharded state_dict = { f"model.layers.{layer_i}.self_attn.q_proj.weight": permute( loaded[f"layers.{layer_i}.attention.wq.weight"] ), f"model.layers.{layer_i}.self_attn.k_proj.weight": permute( loaded[f"layers.{layer_i}.attention.wk.weight"] ), f"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[f"layers.{layer_i}.attention.wv.weight"], f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"layers.{layer_i}.attention.wo.weight"], f"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w1.weight"], f"model.layers.{layer_i}.mlp.down_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w2.weight"], f"model.layers.{layer_i}.mlp.up_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w3.weight"], f"model.layers.{layer_i}.input_layernorm.weight": loaded[f"layers.{layer_i}.attention_norm.weight"], f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[f"layers.{layer_i}.ffn_norm.weight"], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. state_dict = { f"model.layers.{layer_i}.input_layernorm.weight": loaded[0][ f"layers.{layer_i}.attention_norm.weight" ].clone(), f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][ f"layers.{layer_i}.ffn_norm.weight" ].clone(), } state_dict[f"model.layers.{layer_i}.self_attn.q_proj.weight"] = permute( torch.cat( [ loaded[i][f"layers.{layer_i}.attention.wq.weight"].view(n_heads_per_shard, dims_per_head, dim) for i in range(num_shards) ], dim=0, ).reshape(dim, dim) ) state_dict[f"model.layers.{layer_i}.self_attn.k_proj.weight"] = permute( torch.cat( [ loaded[i][f"layers.{layer_i}.attention.wk.weight"].view( num_local_key_value_heads, dims_per_head, dim ) for i in range(num_shards) ], dim=0, ).reshape(key_value_dim, dim), num_key_value_heads, key_value_dim, dim, ) state_dict[f"model.layers.{layer_i}.self_attn.v_proj.weight"] = torch.cat( [ loaded[i][f"layers.{layer_i}.attention.wv.weight"].view( num_local_key_value_heads, dims_per_head, dim ) for i in range(num_shards) ], dim=0, ).reshape(key_value_dim, dim) state_dict[f"model.layers.{layer_i}.self_attn.o_proj.weight"] = torch.cat( [loaded[i][f"layers.{layer_i}.attention.wo.weight"] for i in range(num_shards)], dim=1 ) state_dict[f"model.layers.{layer_i}.mlp.gate_proj.weight"] = torch.cat( [loaded[i][f"layers.{layer_i}.feed_forward.w1.weight"] for i in range(num_shards)], dim=0 ) state_dict[f"model.layers.{layer_i}.mlp.down_proj.weight"] = torch.cat( [loaded[i][f"layers.{layer_i}.feed_forward.w2.weight"] for i in range(num_shards)], dim=1 ) state_dict[f"model.layers.{layer_i}.mlp.up_proj.weight"] = torch.cat( [loaded[i][f"layers.{layer_i}.feed_forward.w3.weight"] for i in range(num_shards)], dim=0 ) state_dict[f"model.layers.{layer_i}.self_attn.rotary_emb.inv_freq"] = inv_freq for k, v in state_dict.items(): index_dict["weight_map"][k] = filename param_count += v.numel() torch.save(state_dict, os.path.join(tmp_model_path, filename)) filename = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin" if model_size == "7B": # Unsharded state_dict = { "model.embed_tokens.weight": loaded["tok_embeddings.weight"], "model.norm.weight": loaded["norm.weight"], "lm_head.weight": loaded["output.weight"], } else: state_dict = { "model.norm.weight": loaded[0]["norm.weight"], "model.embed_tokens.weight": torch.cat( [loaded[i]["tok_embeddings.weight"] for i in range(num_shards)], dim=1 ), "lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(num_shards)], dim=0), } for k, v in state_dict.items(): index_dict["weight_map"][k] = filename param_count += v.numel() torch.save(state_dict, os.path.join(tmp_model_path, filename)) # Write configs index_dict["metadata"] = {"total_size": param_count * 2} write_json(index_dict, os.path.join(tmp_model_path, "pytorch_model.bin.index.json")) ffn_dim_multiplier = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1 multiple_of = params["multiple_of"] if "multiple_of" in params else 256 config = LlamaConfig( hidden_size=dim, intermediate_size=compute_intermediate_size(dim, ffn_dim_multiplier, multiple_of), num_attention_heads=params["n_heads"], num_hidden_layers=params["n_layers"], rms_norm_eps=params["norm_eps"], num_key_value_heads=num_key_value_heads, ) config.save_pretrained(tmp_model_path) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print("Loading the checkpoint in a Llama model.") model = LlamaForCausalLM.from_pretrained(tmp_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True) # Avoid saving this as part of the config. del model.config._name_or_path print("Saving in the Transformers format.") model.save_pretrained(model_path, safe_serialization=safe_serialization) shutil.rmtree(tmp_model_path) def write_tokenizer(tokenizer_path, input_tokenizer_path): # Initialize the tokenizer based on the `spm` model
tokenizer_class = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
2
2023-11-02 01:37:01+00:00
12k
bytedance/cryostar
projects/star/train_density.py
[ { "identifier": "StarfileDataSet", "path": "cryostar/utils/dataio.py", "snippet": "class StarfileDataSet(Dataset):\n\n def __init__(self, cfg: StarfileDatasetConfig):\n super().__init__()\n self.cfg = cfg\n self.df = starfile.read(Path(cfg.starfile_path))\n\n if \"optics\" in self.df:\n optics_df = self.df[\"optics\"]\n particles_df = self.df[\"particles\"]\n else:\n optics_df = None\n particles_df = self.df\n self.particles_df = particles_df\n\n if cfg.apix is None:\n if optics_df is not None and \"rlnImagePixelSize\" in optics_df:\n self.apix = float(optics_df[\"rlnImagePixelSize\"][0])\n print(f\"Infer dataset apix={self.apix} from first optic group.\")\n elif \"rlnDetectorPixelSize\" in particles_df and \"rlnMagnification\" in particles_df:\n self.apix = float(particles_df[\"rlnDetectorPixelSize\"][0] / particles_df[\"rlnMagnification\"][0] * 1e4)\n print(f\"Infer dataset apix={self.apix} from first particle meta data.\")\n else:\n raise AttributeError(\"Cannot parse apix from starfile, please set it in config by hand.\")\n else:\n self.apix = cfg.apix\n\n if cfg.side_shape is None:\n tmp_mrc_path = osp.join(cfg.dataset_dir, particles_df[\"rlnImageName\"][0].split('@')[-1])\n with mrcfile.mmap(tmp_mrc_path, mode=\"r\", permissive=True) as m:\n self.side_shape = m.data.shape[-1]\n print(f\"Infer dataset side_shape={self.side_shape} from the 1st particle.\")\n else:\n self.side_shape = cfg.side_shape\n\n self.num_proj = len(particles_df)\n\n self.down_side_shape = self.side_shape\n if cfg.down_side_shape is not None:\n self.down_side_shape = cfg.down_side_shape\n\n if cfg.mask_rad is not None:\n self.mask = Mask(self.down_side_shape, cfg.mask_rad)\n\n self.f_mu = None\n self.f_std = None\n\n def __len__(self):\n return self.num_proj\n\n def estimate_normalization(self):\n if self.f_mu is None and self.f_std is None:\n f_sub_data = []\n # I have checked that the standard deviation of 10/100/1000 particles is similar\n for i in range(0, len(self), len(self) // 100):\n f_sub_data.append(self[i][\"fproj\"])\n f_sub_data = torch.cat(f_sub_data, dim=0)\n # self.f_mu = torch.mean(f_sub_data)\n self.f_mu = 0.0 # just follow cryodrgn\n self.f_std = torch.std(f_sub_data).item()\n else:\n raise Exception(\"The normalization factor has been estimated!\")\n\n def __getitem__(self, idx):\n item_row = self.particles_df.iloc[idx]\n try:\n img_name_raw = item_row[\"rlnImageName\"]\n in_mrc_idx, img_name = item_row[\"rlnImageName\"].split(\"@\")\n in_mrc_idx = int(in_mrc_idx) - 1\n mrc_path = osp.join(self.cfg.dataset_dir, img_name)\n with mrcfile.mmap(mrc_path, mode=\"r\", permissive=True) as mrc:\n if mrc.data.ndim > 2:\n proj = torch.from_numpy(np.array(mrc.data[in_mrc_idx])).float() * self.cfg.scale_images\n else:\n # the mrcs file can contain only one particle\n proj = torch.from_numpy(np.array(mrc.data)).float() * self.cfg.scale_images\n\n # get (1, side_shape, side_shape) proj\n if len(proj.shape) == 2:\n proj = proj[None, :, :] # add a dummy channel (for consistency w/ img fmt)\n else:\n assert len(proj.shape) == 3 and proj.shape[0] == 1 # some starfile already have a dummy channel\n\n # down-sample\n if self.down_side_shape != self.side_shape:\n if self.cfg.down_method == \"interp\":\n proj = tvf.resize(proj, [self.down_side_shape, ] * 2, antialias=True)\n elif self.cfg.down_method == \"fft\":\n proj = downsample_2d(proj[0, :, :], self.down_side_shape)[None, :, :]\n else:\n raise NotImplementedError\n\n if self.cfg.mask_rad is not None:\n proj = self.mask(proj)\n\n except Exception as e:\n print(f\"WARNING: Particle image {img_name_raw} invalid! Setting to zeros.\")\n print(e)\n proj = torch.zeros(1, self.down_side_shape, self.down_side_shape)\n\n if self.cfg.power_images != 1.0:\n proj *= self.cfg.power_images\n\n # Generate CTF from CTF paramaters\n defocusU = torch.from_numpy(np.array(item_row[\"rlnDefocusU\"] / 1e4, ndmin=2)).float()\n defocusV = torch.from_numpy(np.array(item_row[\"rlnDefocusV\"] / 1e4, ndmin=2)).float()\n angleAstigmatism = torch.from_numpy(np.radians(np.array(item_row[\"rlnDefocusAngle\"], ndmin=2))).float()\n\n # Read \"GT\" orientations\n if self.cfg.ignore_rots:\n rotmat = torch.eye(3).float()\n else:\n # yapf: disable\n rotmat = torch.from_numpy(euler_angles2matrix(\n np.radians(-item_row[\"rlnAngleRot\"]),\n # np.radians(particle[\"rlnAngleTilt\"]) * (-1 if self.cfg.invert_hand else 1),\n np.radians(-item_row[\"rlnAngleTilt\"]),\n np.radians(-item_row[\"rlnAnglePsi\"]))\n ).float()\n # yapf: enable\n\n # Read \"GT\" shifts\n if self.cfg.ignore_trans:\n shiftX = torch.tensor([0.])\n shiftY = torch.tensor([0.])\n else:\n # support early starfile formats\n # Particle translations used to be in pixels (rlnOriginX and rlnOriginY) but this changed to Angstroms\n # (rlnOriginXAngstrom and rlnOriginYAngstrom) in relion 3.1.\n # https://relion.readthedocs.io/en/release-3.1/Reference/Conventions.html\n if \"rlnOriginXAngst\" in item_row:\n shiftX = torch.from_numpy(np.array(item_row[\"rlnOriginXAngst\"], dtype=np.float32))\n shiftY = torch.from_numpy(np.array(item_row[\"rlnOriginYAngst\"], dtype=np.float32))\n else:\n shiftX = torch.from_numpy(np.array(item_row[\"rlnOriginX\"] * self.apix, dtype=np.float32))\n shiftY = torch.from_numpy(np.array(item_row[\"rlnOriginY\"] * self.apix, dtype=np.float32))\n\n fproj = primal_to_fourier_2d(proj)\n\n if self.f_mu is not None:\n fproj = (fproj - self.f_mu) / self.f_std\n proj = fourier_to_primal_2d(fproj).real\n\n in_dict = {\n \"proj\": proj,\n \"rotmat\": rotmat,\n \"defocusU\": defocusU,\n \"defocusV\": defocusV,\n \"shiftX\": shiftX,\n \"shiftY\": shiftY,\n \"angleAstigmatism\": angleAstigmatism,\n \"idx\": torch.tensor(idx, dtype=torch.long),\n \"fproj\": fproj,\n \"imgname_raw\": img_name_raw\n }\n\n if \"rlnClassNumber\" in item_row:\n in_dict[\"class_id\"] = item_row[\"rlnClassNumber\"]\n\n return in_dict" }, { "identifier": "StarfileDatasetConfig", "path": "cryostar/utils/dataio.py", "snippet": "class StarfileDatasetConfig:\n dataset_dir: str\n starfile_path: str\n # if is not specified, the following apix, and side_shape will be inferred from starfile\n apix: float = None\n side_shape: int = None\n # down-sample the original image or not\n down_side_shape: int = None\n down_method: str = \"interp\"\n # apply a circular mask on input image or not\n mask_rad: float = None\n # change image values\n scale_images: float = 1.0\n power_images: float = field(\n default=1.0,\n metadata={\"help\": \"Change the power of the signal by multiplying a constant number.\"})\n # ignore pose from starfile or not\n ignore_trans: bool = False\n ignore_rots: bool = False\n # invert_hand: bool = field(\n # default=False,\n # metadata={\"help\": \"Invert handedness when reading relion data.\"})" }, { "identifier": "ImplicitFourierVolume", "path": "cryostar/nerf/volume_utils.py", "snippet": "class ImplicitFourierVolume(nn.Module):\n\n def __init__(self, z_dim, img_sz, mask_rad, params_implicit):\n \"\"\"\n Initialization of an implicit representation of the volume in Fourier space.\n\n Parameters\n ----------\n img_sz: int\n params_implicit: dictionary\n \"\"\"\n super().__init__()\n self.img_sz = img_sz\n self.z_dim = z_dim\n\n lincoords = torch.linspace(-1., 1., self.img_sz)\n [X, Y] = torch.meshgrid([lincoords, lincoords], indexing=\"ij\")\n coords = torch.stack([Y, X, torch.zeros_like(X)], dim=-1)\n coords = shift_coords(coords, 1., 1., 0, img_sz, img_sz, 1)\n self.register_buffer('plane_coords', coords.reshape(-1, 3))\n\n self.mask_rad = mask_rad\n if self.mask_rad != 1:\n mask = create_circular_mask(img_sz, img_sz, None, self.mask_rad / 2 * img_sz)\n plane_window_mask = torch.from_numpy(mask).reshape(-1)\n self.register_buffer('plane_window_mask', plane_window_mask)\n sphere_mask = torch.from_numpy(\n create_sphere_mask(self.img_sz, self.img_sz, self.img_sz, radius=self.mask_rad / 2 * self.img_sz)\n )\n self.register_buffer(\"sphere_mask\", sphere_mask)\n\n lincoords = torch.linspace(-1., 1., self.img_sz)\n [X, Y, Z] = torch.meshgrid([lincoords, lincoords, lincoords], indexing=\"ij\")\n coords = torch.stack([Z, Y, X], dim=-1)\n coords = shift_coords(coords, 1., 1., 1., img_sz, img_sz, img_sz)\n self.register_buffer('coords_3d', coords.reshape(-1, 3))\n\n self.fvol = FourierNet(net_type=params_implicit[\"net_type\"],\n z_dim=z_dim,\n pe_dim=params_implicit[\"pe_dim\"],\n pe_type=params_implicit[\"pe_type\"],\n D=params_implicit[\"D\"],\n hidden_dim=params_implicit[\"hidden\"],\n force_symmetry=params_implicit['force_symmetry'])\n\n def forward(self, z, rotmat):\n \"\"\"\n Generates a slice in Fourier space from a rotation matrix.\n\n Parameters\n ----------\n rotmat: torch.Tensor (B, 3, 3)\n\n Returns\n -------\n fplane: torch.Tensor (B, 1, img_sz, img_sz) (complex)\n \"\"\"\n if self.z_dim == 0:\n assert z is None\n batch_sz = rotmat.shape[0]\n\n with torch.autocast(\"cuda\", enabled=False):\n assert self.plane_coords.dtype == torch.float32\n assert rotmat.dtype == torch.float32\n rot_plane_coords = torch.bmm(self.plane_coords.repeat(batch_sz, 1, 1), rotmat) # B, img_sz^2, 3\n\n if self.mask_rad != 1:\n coords_mask = einops.repeat(self.plane_window_mask, \"num_coords -> bsz num_coords c3\", bsz=batch_sz, c3=3)\n rot_plane_coords = rot_plane_coords[coords_mask].reshape(batch_sz, -1, 3) # B, mask_num, 3\n\n fplane = self.fvol(z, rot_plane_coords) # B, _, 1/2\n\n if self.mask_rad != 1:\n unmask_fplane = fplane.new_zeros(batch_sz, self.img_sz * self.img_sz, self.fvol.out_features)\n value_mask = einops.repeat(self.plane_window_mask, \"num_coords -> bsz num_coords c\", bsz=batch_sz, c=self.fvol.out_features)\n unmask_fplane[value_mask] = fplane.reshape(-1)\n fplane = unmask_fplane.reshape(batch_sz, self.img_sz, self.img_sz, self.fvol.out_features)\n else:\n fplane = fplane.reshape(batch_sz, self.img_sz, self.img_sz, self.fvol.out_features)\n\n if self.fvol.out_features == 2:\n fplane = torch.view_as_complex(fplane) # B, img_sz, img_sz\n else:\n fplane = batch_hartley_to_fourier_2d(fplane.squeeze(-1)) # B, img_sz, img_sz\n\n fplane = fplane[:, None, :, :]\n return fplane\n\n def make_volume(self, z):\n with torch.no_grad():\n with torch.autocast(\"cuda\", enabled=False):\n coords = self.coords_3d.unsqueeze(0)\n num_coords = coords.shape[1]\n chunk_size = 128**2 * 32\n exp_fvol = []\n for sid in range(0, num_coords, chunk_size):\n eid = sid + chunk_size\n exp_fvol.append(self.fvol(z, coords[:, sid:eid]))\n exp_fvol = torch.cat(exp_fvol, dim=1)\n if self.fvol.out_features == 2:\n exp_fvol = exp_fvol.reshape(self.img_sz, self.img_sz, self.img_sz, 2)\n exp_fvol = torch.view_as_complex(exp_fvol)\n else:\n exp_fvol = exp_fvol.reshape(self.img_sz, self.img_sz, self.img_sz)\n exp_fvol = hartley_to_fourier_3d(exp_fvol)\n\n exp_fvol[~self.sphere_mask] = 0.0\n exp_vol = fourier_to_primal_3d(exp_fvol).real\n return exp_vol" }, { "identifier": "SpatialGridTranslate", "path": "cryostar/utils/transforms.py", "snippet": "class SpatialGridTranslate(torch.nn.Module):\n\n def __init__(self, D, device=None) -> None:\n super().__init__()\n self.D = D\n # yapf: disable\n coords = torch.stack(torch.meshgrid([\n torch.linspace(-1.0, 1.0, self.D, device=device),\n torch.linspace(-1.0, 1.0, self.D, device=device)],\n indexing=\"ij\"), dim=-1).reshape(-1, 2)\n # yapf: enable\n self.register_buffer(\"coords\", coords)\n\n def transform(self, images: torch.Tensor, trans: torch.Tensor):\n \"\"\"\n The `images` are stored in `YX` mode, so the `trans` is also `YX`!\n\n Supposing that D is 96, a point is at 0.0:\n - adding 48 should move it to the right corner which is 1.0\n 1.0 = 0.0 + 48 / (96 / 2)\n - adding 96(>48) should leave it at 0.0\n 0.0 = 0.0 + 96 / (96 / 2) - 2.0\n - adding -96(<48) should leave it at 0.0\n 0.0 = 0.0 - 96 / (96 / 2) + 2.0\n\n Input:\n images: (B, NY, NX)\n trans: (B, T, 2)\n\n Returns:\n images: (B, T, NY, NX)\n \"\"\"\n B, NY, NX = images.shape\n assert self.D == NY == NX\n assert images.shape[0] == trans.shape[0]\n\n grid = einops.rearrange(self.coords, \"N C2 -> 1 1 N C2\") - \\\n einops.rearrange(trans, \"B T C2 -> B T 1 C2\") * 2 / self.D\n grid = grid.flip(-1) # convert the first axis from slow-axis to fast-axis\n grid[grid >= 1] -= 2\n grid[grid <= -1] += 2\n grid.clamp_(-1.0, 1.0)\n\n sampled = F.grid_sample(einops.rearrange(images, \"B NY NX -> B 1 NY NX\"), grid, align_corners=True)\n\n sampled = einops.rearrange(sampled, \"B 1 T (NY NX) -> B T NY NX\", NX=NX, NY=NY)\n return sampled" }, { "identifier": "FourierGridTranslate", "path": "cryostar/utils/transforms.py", "snippet": "class FourierGridTranslate(torch.nn.Module):\n \"\"\"\n DFT's translation is:\n `f(x - x0, y - y0) <=> F(u, v) exp(-2 j \\pi (x0 u + y0 v) / N )`\n where `x, y, u, v` all have a range of `N`, so `(x0 u + y0 v) / N \\in (0, N)`\n\n Here we initialize the `u, v` coordinates between `(-0.5, 0.5)` so that the \n range is 1, where the `1/N` term can be ignored.\n\n See also: https://dsp.stackexchange.com/questions/40228/translation-property-of-2-d-discrete-fourier-transform\n\n Important notes:\n If `N=4`, the coordinates u will be `[-0.5, -0.17, 0.17, 0.5]`, but the \n `fft`ed image's frequency is `[-0.50, -0.25, 0.00, 0.25]`, so we have to \n add some corrections:\n - right-shift `u` to be `[-0.50, -0.25, 0.00, 0.25]`\n - perform multiplication\n\n \"\"\"\n\n def __init__(self, D, device=None) -> None:\n super().__init__()\n self.D = D\n # yapf: disable\n coords = torch.stack(torch.meshgrid([\n torch.linspace(-1.0, 1.0, self.D, device=device),\n torch.linspace(-1.0, 1.0, self.D, device=device)],\n indexing=\"ij\"), dim=-1).reshape(-1, 2) / 2\n # yapf: enable\n coords = shift_coords(coords, 0.5, 0.5, None, self.D, self.D, None, False)\n self.register_buffer(\"coords\", coords)\n\n def transform(self, images: torch.Tensor, trans: torch.Tensor):\n \"\"\"\n The `images` are stored in `YX` mode, so the `trans` is also `YX`!\n\n Input:\n images: (B, NY, NX)\n trans: (B, T, 2)\n\n Returns:\n images: (B, T, NY, NX)\n \"\"\"\n B, NY, NX = images.shape\n assert self.D == NY == NX\n assert images.shape[0] == trans.shape[0]\n images = einops.rearrange(images, \"B NY NX -> B 1 (NY NX)\")\n delta = trans @ self.coords.t() * -2j * torch.pi\n images_trans = torch.exp(delta) * images\n images_trans = einops.rearrange(images_trans, \"B T (NY NX) -> B T NY NX\", NY=self.D, NX=self.D)\n return images_trans" }, { "identifier": "CTFRelion", "path": "cryostar/utils/ctf_utils.py", "snippet": "class CTFRelion(CTFBase):\n \"\"\"\n BUG: There are two bugs in this file:\n 1. `self.angleFrequency` has some error for even-sized grid.\n 2. `local_defocus` in `get_ctf()` has some error, `angleAstigmatism` should be\n replaced with `defocusU - defocusV`.\n\n The bugs will not affect real-world data too much. But you may encounter some issues\n on simulated datasets. Use CTFCryoDRGN instead.\n \"\"\"\n\n def __init__(self,\n size=257,\n resolution=0.8,\n kV=300.0,\n valueNyquist=1.,\n defocusU=1.,\n defocusV=1.,\n angleAstigmatism=0.,\n cs=2.7,\n phasePlate=0.,\n amplitudeContrast=.1,\n bFactor=0.,\n num_particles=500,\n requires_grad=False,\n precompute=False,\n flip_images=False):\n super(CTFRelion, self).__init__(resolution, num_particles, requires_grad)\n self.requires_grad = requires_grad\n self.flip_images = flip_images\n\n self.size = size # in pixel\n self.resolution = resolution # in angstrom\n self.kV = kV # in kilovolt\n\n self.valueNyquist = valueNyquist\n self.phasePlate = phasePlate / 180. * np.pi # in radians (converted from degrees)\n self.amplitudeContrast = amplitudeContrast\n self.bFactor = bFactor\n\n self.frequency = 1. / self.resolution\n\n self.wavelength = self._get_ewavelength(self.kV * 1e3) # input in V (so we convert kv*1e3)\n\n angleAstigmatism = angleAstigmatism / 180. * np.pi # input in degree converted in radian\n cs = cs * 1e7 # input in mm converted in angstrom\n # the angleAstigmatism, defocusU, defocusV and cs are nn.Parameter of size (N, 1, 1)\n self.angleAstigmatism = nn.Parameter(angleAstigmatism * torch.ones((num_particles, 1, 1), dtype=torch.float32),\n requires_grad=requires_grad)\n self.cs = nn.Parameter(cs * torch.ones((num_particles, 1, 1), dtype=torch.float32), requires_grad=requires_grad)\n self.defocusU = nn.Parameter(defocusU * torch.ones((num_particles, 1, 1), dtype=torch.float32),\n requires_grad=requires_grad)\n self.defocusV = nn.Parameter(defocusV * torch.ones((num_particles, 1, 1), dtype=torch.float32),\n requires_grad=requires_grad)\n\n self.precomputed_filters = precompute\n\n ax = torch.linspace(-1. / (2. * resolution), 1 / (2. * resolution), self.size)\n mx, my = torch.meshgrid(ax, ax, indexing=\"ij\")\n self.register_buffer(\"r2\", mx**2 + my**2)\n self.register_buffer(\"r\", torch.sqrt(self.r2))\n self.register_buffer(\"angleFrequency\", torch.atan2(my, mx))\n\n if not self.requires_grad and self.precomputed_filters:\n print(\"Precomputing hFourier in CTF\")\n self.register_buffer('hFourier', self.get_ctf(torch.arange(num_particles), num_particles))\n\n def _get_ewavelength(self, U):\n # assumes V as input, returns wavelength in angstrom\n h = scipy.constants.h\n e = scipy.constants.e\n c = scipy.constants.c\n m0 = scipy.constants.m_e\n\n return h / math.sqrt(2. * m0 * e * U) / math.sqrt(1 + e * U / (2 * m0 * c**2)) * 1e10\n\n def get_ctf(self, idcs, B, cpu_params={}, frequency_marcher=None):\n defocusU = self.defocusU[idcs, :, :]\n defocusV = self.defocusV[idcs, :, :]\n angleAstigmatism = self.angleAstigmatism[idcs, :, :]\n cs = self.cs[idcs, :, :]\n\n ac = self.amplitudeContrast\n pc = math.sqrt(1. - ac**2)\n K1 = np.pi / 2. * cs * self.wavelength**3\n K2 = np.pi * self.wavelength\n\n # Cut-off from frequency marcher\n if frequency_marcher is not None:\n self.size_after_fm = 2 * frequency_marcher.f + 1\n if self.size_after_fm > self.size:\n self.size_after_fm = self.size\n angleFrequency = frequency_marcher.cut_coords_plane(self.angleFrequency.reshape(\n self.size, self.size, 1)).reshape(self.size_after_fm, self.size_after_fm)\n r2 = frequency_marcher.cut_coords_plane(self.r2.reshape(self.size, self.size,\n 1)).reshape(self.size_after_fm, self.size_after_fm)\n else:\n self.size_after_fm = self.size\n angleFrequency = self.angleFrequency\n r2 = self.r2\n\n angle = angleFrequency - angleAstigmatism\n local_defocus = 1e4 * (defocusU + defocusV) / 2. + angleAstigmatism * torch.cos(2. * angle)\n\n gamma = K1 * r2**2 - K2 * r2 * local_defocus - self.phasePlate\n hFourier = -pc * torch.sin(gamma) + ac * torch.cos(gamma)\n\n if self.valueNyquist != 1:\n decay = np.sqrt(-np.log(self.valueNyquist)) * 2. * self.resolution\n envelope = torch.exp(-self.frequency * decay**2 * r2)\n hFourier *= envelope\n\n return hFourier\n\n def oversample_multiply_crop(self, x_fourier, hFourier):\n # we assume that the shape of the CTF is always going to be bigger\n # than the size of the input image\n input_sz = x_fourier.shape[-1]\n if input_sz != self.size_after_fm:\n x_primal = fourier_to_primal_2d(x_fourier)\n\n pad_len = (self.size_after_fm - x_fourier.shape[-1]) // 2 # here we assume even lengths\n p2d = (pad_len, pad_len, pad_len, pad_len)\n x_primal_padded = F.pad(x_primal, p2d, 'constant', 0)\n\n x_fourier_padded = primal_to_fourier_2d(x_primal_padded)\n\n x_fourier_padded_filtered = x_fourier_padded * hFourier[:, None, :, :]\n return x_fourier_padded_filtered[..., pad_len:-pad_len, pad_len:-pad_len]\n else:\n return x_fourier * hFourier[:, None, :, :]\n\n def get_cpu_params(self, idcs, ctf_params, flip=False):\n batch_size = idcs.shape[0]\n self.defocusU[idcs, :, :] = ctf_params['defocusU'][:batch_size] if not flip else\\\n ctf_params['defocusU'][batch_size:]\n self.defocusV[idcs, :, :] = ctf_params['defocusV'][:batch_size] if not flip else\\\n ctf_params['defocusV'][batch_size:]\n self.angleAstigmatism[idcs, :, :] = ctf_params['angleAstigmatism'][:batch_size] if not flip else\\\n ctf_params['angleAstigmatism'][batch_size:]\n cpu_params = {}\n return cpu_params\n\n def forward(self, x_fourier, idcs=0, ctf_params={}, mode='gt', frequency_marcher=None):\n # This is when we want to prescribe parameters for the CTF\n if x_fourier.dim() == 3:\n x_fourier = x_fourier[None, ...]\n # x_fourier: B, 1, S, S\n batch_size = len(idcs)\n cpu_params = {}\n if ctf_params:\n cpu_params = self.get_cpu_params(idcs, ctf_params, flip=False)\n\n # if new params for the CTF have been prescribed or we are optimizing it\n # then request the evaluation of the CTF\n if not ctf_params and self.precomputed_filters and not self.requires_grad:\n hFourier = self.hFourier[idcs, :, :]\n else:\n hFourier = self.get_ctf(idcs, batch_size, cpu_params=cpu_params, frequency_marcher=frequency_marcher)\n\n if self.flip_images:\n flipped_hFourier = torch.flip(hFourier, [1, 2])\n\n hFourier = torch.cat([hFourier, flipped_hFourier], dim=0)\n\n return self.oversample_multiply_crop(x_fourier, hFourier)" }, { "identifier": "CTFCryoDRGN", "path": "cryostar/utils/ctf_utils.py", "snippet": "class CTFCryoDRGN(CTFBase):\n\n def __init__(self,\n size,\n resolution,\n num_particles=None,\n kV=300,\n cs=2.0,\n amplitudeContrast=0.1,\n requires_grad=False):\n super(CTFBase, self).__init__()\n self.size = size\n self.resolution = resolution\n self.requires_grad = requires_grad\n self.kV = kV\n self.cs = cs\n self.ac = amplitudeContrast\n # ax = torch.linspace(-1. / (2. * resolution), 1 / (2. * resolution), self.size)\n # mx, my = torch.meshgrid(ax, ax, indexing=\"ij\")\n ax = torch.fft.fftshift(torch.fft.fftfreq(self.size, self.resolution))\n mx, my = torch.meshgrid(ax, ax, indexing=\"xy\")\n freqs = torch.stack([mx.flatten(), my.flatten()], 1)\n self.register_buffer(\"freqs\", freqs)\n\n def get_ctf(self, ctf_params={}):\n bsz = len(ctf_params[\"defocusU\"])\n device = self.freqs.device\n hFourier = compute_ctf(freqs=self.freqs.repeat(bsz, 1, 1),\n dfu=(ctf_params[\"defocusU\"] * 1e4).squeeze(1),\n dfv=(ctf_params[\"defocusV\"] * 1e4).squeeze(1),\n dfang=torch.rad2deg(ctf_params[\"angleAstigmatism\"]).squeeze(1),\n volt=torch.tensor(self.kV, device=device).repeat(bsz, 1),\n cs=torch.tensor(self.cs, device=device).repeat(bsz, 1),\n w=torch.tensor(self.ac, device=device).repeat(bsz,\n 1)).reshape(bsz, self.size, self.size)\n return hFourier\n\n def forward(self, x_fourier, idcs=0, ctf_params={}, mode='gt', frequency_marcher=None):\n hFourier = -self.get_ctf(ctf_params)\n return x_fourier * hFourier[:, None, :, :]" }, { "identifier": "fourier_to_primal_2d", "path": "cryostar/utils/fft_utils.py", "snippet": "def fourier_to_primal_2d(f: torch.Tensor) -> torch.Tensor:\n f = torch.fft.ifftshift(f, dim=(-2, -1))\n return torch.fft.fftshift(torch.fft.ifftn(f, s=(f.shape[-2], f.shape[-1]), dim=(-2, -1)), dim=(-2, -1))" }, { "identifier": "primal_to_fourier_2d", "path": "cryostar/utils/fft_utils.py", "snippet": "@torch.autocast(\"cuda\")\ndef primal_to_fourier_2d(r: torch.Tensor) -> torch.Tensor:\n with torch.autocast(\"cuda\", enabled=False):\n r = torch.fft.ifftshift(r.float(), dim=(-2, -1))\n f = torch.fft.fftshift(torch.fft.fftn(r, s=(r.shape[-2], r.shape[-1]), dim=(-2, -1)), dim=(-2, -1))\n return f" }, { "identifier": "sample_along_pca", "path": "cryostar/utils/latent_space_utils.py", "snippet": "def sample_along_pca(z: np.ndarray, pca_dim=1, num=5) -> np.ndarray:\n assert isinstance(z, np.ndarray)\n pc, pca = run_pca(z)\n start = np.percentile(pc[:, pca_dim - 1], 5)\n stop = np.percentile(pc[:, pca_dim - 1], 95)\n z_pc_traj = get_pc_traj(pca, z.shape[1], num, pca_dim, start, stop)\n point, point_id = get_nearest_point(z, z_pc_traj)\n return point, point_id" }, { "identifier": "get_nearest_point", "path": "cryostar/utils/latent_space_utils.py", "snippet": "def get_nearest_point(data: np.ndarray, query: np.ndarray) -> Tuple[npt.NDArray[np.float32], np.ndarray]:\n \"\"\"\n Find closest point in @data to @query\n Return datapoint, index\n \"\"\"\n ind = cdist(query, data).argmin(axis=1)\n return data[ind], ind" }, { "identifier": "cluster_kmeans", "path": "cryostar/utils/latent_space_utils.py", "snippet": "def cluster_kmeans(z: np.ndarray, K: int, on_data: bool = True, reorder: bool = True) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Cluster z by K means clustering\n Returns cluster labels, cluster centers\n If reorder=True, reorders clusters according to agglomerative clustering of cluster centers\n \"\"\"\n kmeans = KMeans(n_clusters=K, n_init=10, random_state=0, max_iter=10)\n labels = kmeans.fit_predict(z)\n centers = kmeans.cluster_centers_\n\n centers_ind = None\n if on_data:\n centers, centers_ind = get_nearest_point(z, centers)\n\n if reorder:\n # BUG from seaborn or scipy:\n # sns.clustermap only supports data with at least 2 dim\n if z.shape[1] == 1:\n centers = np.hstack([centers, np.zeros_like(centers)])\n g = sns.clustermap(centers)\n reordered = g.dendrogram_row.reordered_ind\n centers = centers[reordered]\n if centers_ind is not None:\n centers_ind = centers_ind[reordered]\n tmp = {k: i for i, k in enumerate(reordered)}\n labels = np.array([tmp[k] for k in labels])\n if z.shape[1] == 1:\n centers = centers[:, :1]\n return labels, centers" }, { "identifier": "pl_init_exp", "path": "cryostar/utils/misc.py", "snippet": "def set_seed(seed: int = 42):\ndef chain(arg, *funcs):\ndef convert_to_numpy(*args):\ndef CHECK_SHAPE(tensor, expected_shape):\ndef ASSERT_SHAPE(tensor, expected_shape):\ndef parse_mmengine_args(override_mode=\"default\"):\ndef flatten_nested_dict(nested: Union[dict, Config]) -> dict:\ndef warmup(warmup_step, lower=0.0, upper=1.0):\n def run(cur_step):\ndef init_mmengine_config(args):\ndef init_mmengine_exp(args,\n exp_prefix='',\n backup_list=None,\n inplace=True,\n work_dir_name=\"work_dirs\",\n project_name=\"cryostar\",\n tensorboard=False):\ndef _get_next_version(root_dir, dir_name_prefix):\ndef pl_init_exp(override_mode=\"default\",\n exp_prefix='',\n backup_list=None,\n inplace=False,\n work_dir_name=\"work_dirs\",\n project_name=\"cryostar\"):\ndef save_pdb(CAs, path, ref_pdb_path):\ndef load_CAs_from_pdb(file):\ndef load_NCaC_from_pdb(file):\ndef load_chain_A(pdb_path):\ndef points_to_pdb(path_to_save, points: np.ndarray):\ndef point_stack_to_pdb(path_to_save, point_stack: np.ndarray):\ndef find_rigid_alignment(A, B):\ndef batch_find_rigid_alignment(A, B):\ndef pretty_dict(x, precision=3):\ndef create_sphere_mask(d, h, w, center=None, radius=None) -> np.ndarray:\ndef create_circular_mask(h, w, center=None, radius=None) -> np.ndarray:\n H = A_c.T.mm(B_c)\n U, S, V = torch.svd(H)\n R = V.mm(U.T)\n H = einops.einsum(A_c, B_c, \"b n c1, b n c2 -> b c1 c2\")\n V = VmT.mT\n R = einops.einsum(V, U.transpose(2, 1), \"b c1 c2, b c2 c3 -> b c1 c3\")" }, { "identifier": "calc_kl_loss", "path": "cryostar/utils/losses.py", "snippet": "def calc_kl_loss(mu, log_var, free_bits, reduction=\"mean\"):\n kld_loss = -0.5 * (1 + log_var - mu.pow(2) - log_var.exp())\n # free bits\n kld_loss = torch.clamp(kld_loss, free_bits) # (bsz, z-dim)\n kld_loss = torch.mean(kld_loss, dim=1) # (bsz, )\n if reduction == \"mean\":\n kld_loss = torch.mean(kld_loss) # averaged over bsz x z-dim\n elif reduction == \"none\":\n kld_loss = kld_loss\n else:\n raise NotImplementedError\n return kld_loss" }, { "identifier": "VAEEncoder", "path": "cryostar/utils/ml_modules.py", "snippet": "class VAEEncoder(nn.Module):\n\n def __init__(self, in_dim: int, hidden_dim: Union[int, List[int]], out_dim: int, num_hidden_layers=3):\n super().__init__()\n self.in_dim = in_dim\n if isinstance(hidden_dim, int):\n self.hidden_dim = (hidden_dim, ) * num_hidden_layers\n elif isinstance(hidden_dim, (list, tuple)):\n assert len(hidden_dim) == num_hidden_layers\n self.hidden_dim = hidden_dim\n else:\n raise NotImplementedError\n self.out_dim = out_dim\n self.num_hidden_layers = num_hidden_layers\n\n self.input_layer = nn.Sequential(\n ResLinear(in_dim, self.hidden_dim[0]) if in_dim == self.hidden_dim[0] else Linear(\n in_dim, self.hidden_dim[0]), nn.ReLU(inplace=True))\n self.mlp = MLP(self.hidden_dim[:-1], self.hidden_dim[1:])\n\n self.mean_layer = Linear(self.hidden_dim[-1], out_dim)\n self.var_layer = Linear(self.hidden_dim[-1], out_dim)\n\n def forward(self, x):\n x = self.mlp(self.input_layer(x))\n mean = self.mean_layer(x)\n log_var = self.var_layer(x)\n return mean, log_var" }, { "identifier": "reparameterize", "path": "cryostar/utils/ml_modules.py", "snippet": "def reparameterize(mu, log_var):\n std = torch.exp(0.5 * log_var)\n eps = torch.randn_like(std)\n return mu + eps * std" }, { "identifier": "save_mrc", "path": "cryostar/utils/mrc_tools.py", "snippet": "def save_mrc(vol,\n path,\n voxel_size: Union[int, float, Tuple, np.recarray] = None,\n origin: Union[int, float, Tuple, np.recarray] = None):\n \"\"\"\n Save volumetric data to mrc file, set voxel_size, origin.\n See Also: https://mrcfile.readthedocs.io/en/stable/source/mrcfile.html#mrcfile.mrcobject.MrcObject.voxel_size\n Args:\n vol: density volume\n path: save path\n voxel_size: a single number, a 3-tuple (x, y ,z) or a modified version of the voxel_size array, default 1.\n origin: a single number, a 3-tuple (x, y ,z) or a modified version of the origin array, default 0.\n\n \"\"\"\n with mrcfile.new(path, overwrite=True) as m:\n m.set_data(vol)\n\n if voxel_size is not None:\n m.voxel_size = voxel_size\n\n if origin is not None:\n m.header.origin = origin" } ]
import os import os.path as osp import einops import lightning.pytorch as pl import numpy as np import torch from lightning.pytorch.strategies import DDPStrategy from lightning.pytorch.utilities import rank_zero_only from torch.utils.data import DataLoader from tqdm import tqdm from mmengine import mkdir_or_exist from cryostar.utils.dataio import StarfileDataSet, StarfileDatasetConfig from cryostar.nerf.volume_utils import ImplicitFourierVolume from cryostar.utils.transforms import SpatialGridTranslate, FourierGridTranslate from cryostar.utils.ctf_utils import CTFRelion, CTFCryoDRGN from cryostar.utils.fft_utils import (fourier_to_primal_2d, primal_to_fourier_2d) from cryostar.utils.latent_space_utils import sample_along_pca, get_nearest_point, cluster_kmeans from cryostar.utils.misc import (pl_init_exp, create_circular_mask, log_to_current, pretty_dict) from cryostar.utils.losses import calc_kl_loss from cryostar.utils.ml_modules import VAEEncoder, reparameterize from cryostar.utils.mrc_tools import save_mrc from miscs import infer_ctf_params_from_config
10,236
log_to_current = rank_zero_only(log_to_current) TASK_NAME = "density" class CryoModel(pl.LightningModule): def __init__(self, cfg, dataset): super().__init__() self.cfg = cfg self.dataset = dataset self.z_dim = cfg.model.z_dim self.history_saved_dirs = [] if cfg.extra_input_data_attr.given_z is None and self.z_dim != 0: if cfg.model.enc_space == "real": self.encoder = VAEEncoder(self.cfg.data_process.down_side_shape**2, cfg.model.hidden, self.z_dim, num_hidden_layers=4) elif cfg.model.enc_space == "fourier": self.encoder = VAEEncoder(2 * self.cfg.data_process.down_side_shape**2, cfg.model.hidden, self.z_dim, num_hidden_layers=4) else: raise NotImplementedError if cfg.model.shift_method == "interp": self.translate = SpatialGridTranslate(self.cfg.data_process.down_side_shape, ) log_to_current("We will deprecate `model.shift_method=interp` in a future version, use `model.shift_method=fft` instead.") elif cfg.model.shift_method == "fft": self.f_translate = FourierGridTranslate(self.cfg.data_process.down_side_shape, ) else: raise NotImplementedError ctf_params = infer_ctf_params_from_config(cfg) if cfg.model.ctf == "v1": self.ctf = CTFRelion(**ctf_params, num_particles=len(dataset)) log_to_current("We will deprecate `model.ctf=v1` in a future version, use `model.ctf=v2` instead.") elif cfg.model.ctf == "v2":
log_to_current = rank_zero_only(log_to_current) TASK_NAME = "density" class CryoModel(pl.LightningModule): def __init__(self, cfg, dataset): super().__init__() self.cfg = cfg self.dataset = dataset self.z_dim = cfg.model.z_dim self.history_saved_dirs = [] if cfg.extra_input_data_attr.given_z is None and self.z_dim != 0: if cfg.model.enc_space == "real": self.encoder = VAEEncoder(self.cfg.data_process.down_side_shape**2, cfg.model.hidden, self.z_dim, num_hidden_layers=4) elif cfg.model.enc_space == "fourier": self.encoder = VAEEncoder(2 * self.cfg.data_process.down_side_shape**2, cfg.model.hidden, self.z_dim, num_hidden_layers=4) else: raise NotImplementedError if cfg.model.shift_method == "interp": self.translate = SpatialGridTranslate(self.cfg.data_process.down_side_shape, ) log_to_current("We will deprecate `model.shift_method=interp` in a future version, use `model.shift_method=fft` instead.") elif cfg.model.shift_method == "fft": self.f_translate = FourierGridTranslate(self.cfg.data_process.down_side_shape, ) else: raise NotImplementedError ctf_params = infer_ctf_params_from_config(cfg) if cfg.model.ctf == "v1": self.ctf = CTFRelion(**ctf_params, num_particles=len(dataset)) log_to_current("We will deprecate `model.ctf=v1` in a future version, use `model.ctf=v2` instead.") elif cfg.model.ctf == "v2":
self.ctf = CTFCryoDRGN(**ctf_params, num_particles=len(dataset))
6
2023-11-06 07:15:26+00:00
12k
xyongLu/SBCFormer
main.py
[ { "identifier": "Mixup", "path": "mixup.py", "snippet": "class Mixup:\n \"\"\" Mixup/Cutmix that applies different params to each element or whole batch\n\n Args:\n mixup_alpha (float): mixup alpha value, mixup is active if > 0.\n cutmix_alpha (float): cutmix alpha value, cutmix is active if > 0.\n cutmix_minmax (List[float]): cutmix min/max image ratio, cutmix is active and uses this vs alpha if not None.\n prob (float): probability of applying mixup or cutmix per batch or element\n switch_prob (float): probability of switching to cutmix instead of mixup when both are active\n mode (str): how to apply mixup/cutmix params (per 'batch', 'pair' (pair of elements), 'elem' (element)\n correct_lam (bool): apply lambda correction when cutmix bbox clipped by image borders\n label_smoothing (float): apply label smoothing to the mixed target tensor\n num_classes (int): number of classes for target\n \"\"\"\n def __init__(self, mixup_alpha=1., cutmix_alpha=0., cutmix_minmax=None, prob=1.0, switch_prob=0.5,\n mode='batch', correct_lam=True, label_smoothing=0.1, num_classes=1000):\n self.mixup_alpha = mixup_alpha\n self.cutmix_alpha = cutmix_alpha\n self.cutmix_minmax = cutmix_minmax\n if self.cutmix_minmax is not None:\n assert len(self.cutmix_minmax) == 2\n # force cutmix alpha == 1.0 when minmax active to keep logic simple & safe\n self.cutmix_alpha = 1.0\n self.mix_prob = prob\n self.switch_prob = switch_prob\n self.label_smoothing = label_smoothing\n self.num_classes = num_classes\n self.mode = mode\n self.correct_lam = correct_lam # correct lambda based on clipped area for cutmix\n self.mixup_enabled = True # set to false to disable mixing (intended tp be set by train loop)\n\n def _params_per_elem(self, batch_size):\n lam = np.ones(batch_size, dtype=np.float32)\n use_cutmix = np.zeros(batch_size, dtype=np.bool)\n if self.mixup_enabled:\n if self.mixup_alpha > 0. and self.cutmix_alpha > 0.:\n use_cutmix = np.random.rand(batch_size) < self.switch_prob\n lam_mix = np.where(\n use_cutmix,\n np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size),\n np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size))\n elif self.mixup_alpha > 0.:\n lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size)\n elif self.cutmix_alpha > 0.:\n use_cutmix = np.ones(batch_size, dtype=np.bool)\n lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size)\n else:\n assert False, \"One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true.\"\n lam = np.where(np.random.rand(batch_size) < self.mix_prob, lam_mix.astype(np.float32), lam)\n return lam, use_cutmix\n\n def _params_per_batch(self):\n lam = 1.\n use_cutmix = False\n if self.mixup_enabled and np.random.rand() < self.mix_prob:\n if self.mixup_alpha > 0. and self.cutmix_alpha > 0.:\n use_cutmix = np.random.rand() < self.switch_prob\n lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha) if use_cutmix else \\\n np.random.beta(self.mixup_alpha, self.mixup_alpha)\n elif self.mixup_alpha > 0.:\n lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha)\n elif self.cutmix_alpha > 0.:\n use_cutmix = True\n lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha)\n else:\n assert False, \"One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true.\"\n lam = float(lam_mix)\n return lam, use_cutmix\n\n def _mix_elem(self, x):\n batch_size = len(x)\n lam_batch, use_cutmix = self._params_per_elem(batch_size)\n x_orig = x.clone() # need to keep an unmodified original for mixing source\n for i in range(batch_size):\n j = batch_size - i - 1\n lam = lam_batch[i]\n if lam != 1.:\n if use_cutmix[i]:\n (yl, yh, xl, xh), lam = cutmix_bbox_and_lam(\n x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)\n x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh]\n lam_batch[i] = lam\n else:\n x[i] = x[i] * lam + x_orig[j] * (1 - lam)\n return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1)\n\n def _mix_pair(self, x):\n batch_size = len(x)\n lam_batch, use_cutmix = self._params_per_elem(batch_size // 2)\n x_orig = x.clone() # need to keep an unmodified original for mixing source\n for i in range(batch_size // 2):\n j = batch_size - i - 1\n lam = lam_batch[i]\n if lam != 1.:\n if use_cutmix[i]:\n (yl, yh, xl, xh), lam = cutmix_bbox_and_lam(\n x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)\n x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh]\n x[j][:, yl:yh, xl:xh] = x_orig[i][:, yl:yh, xl:xh]\n lam_batch[i] = lam\n else:\n x[i] = x[i] * lam + x_orig[j] * (1 - lam)\n x[j] = x[j] * lam + x_orig[i] * (1 - lam)\n lam_batch = np.concatenate((lam_batch, lam_batch[::-1]))\n return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1)\n\n def _mix_batch(self, x):\n lam, use_cutmix = self._params_per_batch()\n if lam == 1.:\n return 1.\n if use_cutmix:\n (yl, yh, xl, xh), lam = cutmix_bbox_and_lam(\n x.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)\n x[:, :, yl:yh, xl:xh] = x.flip(0)[:, :, yl:yh, xl:xh]\n else:\n x_flipped = x.flip(0).mul_(1. - lam)\n x.mul_(lam).add_(x_flipped)\n return lam\n\n def __call__(self, x, target):\n assert len(x) % 2 == 0, 'Batch size should be even when using this'\n if self.mode == 'elem':\n lam = self._mix_elem(x)\n elif self.mode == 'pair':\n lam = self._mix_pair(x)\n else:\n lam = self._mix_batch(x)\n target = mixup_target(target, self.num_classes, lam, self.label_smoothing, x.device)\n return x, target" }, { "identifier": "build_dataset", "path": "datasets.py", "snippet": "def build_dataset(is_train, args):\n \n if args.data_set == 'CIFAR10':\n if is_train:\n transform = transforms.Compose([\n transforms.Resize(args.input_size),\n transforms.RandomCrop(args.input_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(CIFAR10_DEFAULT_MEAN, CIFAR10_DEFAULT_STD)\n ])\n else:\n transform = transforms.Compose([\n transforms.Resize(args.input_size),\n transforms.ToTensor(),\n transforms.Normalize(CIFAR10_DEFAULT_MEAN, CIFAR10_DEFAULT_STD)\n ])\n \n dataset = datasets.CIFAR10(args.data_path, train=is_train, download=True, transform=transform)\n nb_classes = 10\n elif args.data_set == 'CIFAR100':\n if is_train:\n transform = transforms.Compose([\n transforms.Resize(args.input_size),\n transforms.RandomCrop(args.input_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(CIFAR100_DEFAULT_MEAN, CIFAR100_DEFAULT_STD)\n ])\n else:\n transform = transforms.Compose([\n transforms.Resize(args.input_size),\n transforms.ToTensor(),\n transforms.Normalize(CIFAR100_DEFAULT_MEAN, CIFAR100_DEFAULT_STD)\n ])\n\n dataset = datasets.CIFAR100(args.data_path, train=is_train, download=True, transform=transform)\n nb_classes = 100\n elif args.data_set == 'IMNET':\n transform = build_transform(is_train, args)\n \n root = os.path.join(args.data_path, 'train' if is_train else 'val')\n dataset = datasets.ImageFolder(root, transform=transform)\n nb_classes = 1000\n elif args.data_set == 'INAT':\n transform = build_transform(is_train, args)\n\n dataset = INatDataset(args.data_path, train=is_train, year=2018,\n category=args.inat_category, transform=transform)\n nb_classes = dataset.nb_classes\n elif args.data_set == 'INAT19':\n transform = build_transform(is_train, args)\n\n dataset = INatDataset(args.data_path, train=is_train, year=2019,\n category=args.inat_category, transform=transform)\n nb_classes = dataset.nb_classes\n\n return dataset, nb_classes" }, { "identifier": "train_one_epoch", "path": "engine.py", "snippet": "def train_one_epoch(model: torch.nn.Module, criterion: DistillationLoss,\n data_loader: Iterable, optimizer: torch.optim.Optimizer,\n device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,\n model_ema: Optional[ModelEma] = None, mixup_fn: Optional[Mixup] = None,\n set_training_mode=True):\n model.train(set_training_mode)\n metric_logger = utils.MetricLogger(delimiter=\" \")\n metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))\n header = 'Epoch: [{}]'.format(epoch)\n print_freq = 10\n\n for samples, targets in metric_logger.log_every(data_loader, print_freq, header):\n samples = samples.to(device, non_blocking=True)\n targets = targets.to(device, non_blocking=True)\n\n if mixup_fn is not None:\n samples, targets = mixup_fn(samples, targets)\n\n with torch.cuda.amp.autocast():\n outputs = model(samples)\n loss = criterion(samples, outputs, targets)\n\n loss_value = loss.item()\n\n if not math.isfinite(loss_value):\n print(\"Loss is {}, stopping training\".format(loss_value))\n sys.exit(1)\n\n optimizer.zero_grad()\n\n # this attribute is added by timm on one optimizer (adahessian)\n is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order\n loss_scaler(loss, optimizer, clip_grad=max_norm,\n parameters=model.parameters(), create_graph=is_second_order)\n\n torch.cuda.synchronize()\n if model_ema is not None:\n model_ema.update(model)\n\n metric_logger.update(loss=loss_value)\n metric_logger.update(lr=optimizer.param_groups[0][\"lr\"])\n # gather the stats from all processes\n metric_logger.synchronize_between_processes()\n print(\"Averaged stats:\", metric_logger)\n return {k: meter.global_avg for k, meter in metric_logger.meters.items()}" }, { "identifier": "evaluate", "path": "engine.py", "snippet": "@torch.no_grad()\ndef evaluate(data_loader, model, device):\n criterion = torch.nn.CrossEntropyLoss()\n\n metric_logger = utils.MetricLogger(delimiter=\" \")\n header = 'Test:'\n print_freq = 10\n\n # switch to evaluation mode\n model.eval()\n\n for images, target in metric_logger.log_every(data_loader, print_freq, header):\n images = images.to(device, non_blocking=True)\n target = target.to(device, non_blocking=True)\n\n # compute output\n with torch.cuda.amp.autocast():\n output = model(images)\n loss = criterion(output, target)\n\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n\n batch_size = images.shape[0]\n metric_logger.update(loss=loss.item())\n metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)\n metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)\n # gather the stats from all processes\n metric_logger.synchronize_between_processes()\n print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'\n .format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))\n\n return {k: meter.global_avg for k, meter in metric_logger.meters.items()}" }, { "identifier": "DistillationLoss", "path": "losses.py", "snippet": "class DistillationLoss(torch.nn.Module):\n \"\"\"\n This module wraps a standard criterion and adds an extra knowledge distillation loss by\n taking a teacher model prediction and using it as additional supervision.\n \"\"\"\n def __init__(self, base_criterion: torch.nn.Module, teacher_model: torch.nn.Module,\n distillation_type: str, alpha: float, tau: float):\n super().__init__()\n self.base_criterion = base_criterion\n self.teacher_model = teacher_model\n assert distillation_type in ['none', 'soft', 'hard']\n self.distillation_type = distillation_type\n self.alpha = alpha\n self.tau = tau\n\n def forward(self, inputs, outputs, labels):\n \"\"\"\n Args:\n inputs: The original inputs that are feed to the teacher model\n outputs: the outputs of the model to be trained. It is expected to be\n either a Tensor, or a Tuple[Tensor, Tensor], with the original output\n in the first position and the distillation predictions as the second output\n labels: the labels for the base criterion\n \"\"\"\n outputs_kd = None\n if not isinstance(outputs, torch.Tensor):\n # assume that the model outputs a tuple of [outputs, outputs_kd]\n outputs, outputs_kd = outputs\n base_loss = self.base_criterion(outputs, labels)\n if self.distillation_type == 'none':\n return base_loss\n\n if outputs_kd is None:\n raise ValueError(\"When knowledge distillation is enabled, the model is \"\n \"expected to return a Tuple[Tensor, Tensor] with the output of the \"\n \"class_token and the dist_token\")\n # don't backprop throught the teacher\n with torch.no_grad():\n teacher_outputs = self.teacher_model(inputs)\n\n if self.distillation_type == 'soft':\n T = self.tau\n # taken from https://github.com/peterliht/knowledge-distillation-pytorch/blob/master/model/net.py#L100\n # with slight modifications\n distillation_loss = F.kl_div(\n F.log_softmax(outputs_kd / T, dim=1),\n #We provide the teacher's targets in log probability because we use log_target=True \n #(as recommended in pytorch https://github.com/pytorch/pytorch/blob/9324181d0ac7b4f7949a574dbc3e8be30abe7041/torch/nn/functional.py#L2719)\n #but it is possible to give just the probabilities and set log_target=False. In our experiments we tried both.\n F.log_softmax(teacher_outputs / T, dim=1),\n reduction='sum',\n log_target=True\n ) * (T * T) / outputs_kd.numel()\n #We divide by outputs_kd.numel() to have the legacy PyTorch behavior. \n #But we also experiments output_kd.size(0) \n #see issue 61(https://github.com/facebookresearch/deit/issues/61) for more details\n elif self.distillation_type == 'hard':\n distillation_loss = F.cross_entropy(outputs_kd, teacher_outputs.argmax(dim=1))\n\n loss = base_loss * (1 - self.alpha) + distillation_loss * self.alpha\n return loss" }, { "identifier": "RASampler", "path": "samplers.py", "snippet": "class RASampler(torch.utils.data.Sampler):\n \"\"\"Sampler that restricts data loading to a subset of the dataset for distributed,\n with repeated augmentation.\n It ensures that different each augmented version of a sample will be visible to a\n different process (GPU)\n Heavily based on torch.utils.data.DistributedSampler\n \"\"\"\n\n def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):\n if num_replicas is None:\n if not dist.is_available():\n raise RuntimeError(\"Requires distributed package to be available\")\n num_replicas = dist.get_world_size()\n if rank is None:\n if not dist.is_available():\n raise RuntimeError(\"Requires distributed package to be available\")\n rank = dist.get_rank()\n self.dataset = dataset\n self.num_replicas = num_replicas\n self.rank = rank\n self.epoch = 0\n self.num_samples = int(math.ceil(len(self.dataset) * 3.0 / self.num_replicas))\n self.total_size = self.num_samples * self.num_replicas\n # self.num_selected_samples = int(math.ceil(len(self.dataset) / self.num_replicas))\n self.num_selected_samples = int(math.floor(len(self.dataset) // 256 * 256 / self.num_replicas))\n self.shuffle = shuffle\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n if self.shuffle:\n indices = torch.randperm(len(self.dataset), generator=g).tolist()\n else:\n indices = list(range(len(self.dataset)))\n\n # add extra samples to make it evenly divisible\n indices = [ele for ele in indices for i in range(3)]\n indices += indices[:(self.total_size - len(indices))]\n assert len(indices) == self.total_size\n\n # subsample\n indices = indices[self.rank:self.total_size:self.num_replicas]\n assert len(indices) == self.num_samples\n\n return iter(indices[:self.num_selected_samples])\n\n def __len__(self):\n return self.num_selected_samples\n\n def set_epoch(self, epoch):\n self.epoch = epoch" } ]
import argparse import datetime import numpy as np import time import torch import torch.backends.cudnn as cudnn import json import utils from pathlib import Path from mixup import Mixup from timm.models import create_model from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy from timm.scheduler import create_scheduler from timm.optim import create_optimizer from timm.utils import NativeScaler, get_state_dict, ModelEma from datasets import build_dataset from engine import train_one_epoch, evaluate from losses import DistillationLoss from samplers import RASampler from models import *
7,208
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)') parser.add_argument('--train-interpolation', type=str, default='bicubic', help='Training interpolation (random, bilinear, bicubic default: "bicubic")') parser.add_argument('--repeated-aug', action='store_true') parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug') parser.set_defaults(repeated_aug=False) # * Random Erase params parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT', help='Random erase prob (default: 0.25)') parser.add_argument('--remode', type=str, default='pixel', help='Random erase mode (default: "pixel")') parser.add_argument('--recount', type=int, default=1, help='Random erase count (default: 1)') parser.add_argument('--resplit', action='store_true', default=False, help='Do not random erase first (clean) augmentation split') # * Mixup params parser.add_argument('--mixup', type=float, default=0.8, help='mixup alpha, mixup enabled if > 0. (default: 0.8)') parser.add_argument('--cutmix', type=float, default=1.0, help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)') parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None, help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)') parser.add_argument('--mixup-prob', type=float, default=1.0, help='Probability of performing mixup or cutmix when either/both is enabled') parser.add_argument('--mixup-switch-prob', type=float, default=0.5, help='Probability of switching to cutmix when both mixup and cutmix enabled') parser.add_argument('--mixup-mode', type=str, default='batch', help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"') # Distillation parameters distilled parser.add_argument('--distilled', action='store_true', default=False, help='Perform distilled ') parser.add_argument('--teacher-model', default='regnety_200mf', type=str, metavar='MODEL', help='Name of teacher model to train (default: "regnety_160"') parser.add_argument('--teacher-path', type=str, default='') parser.add_argument('--distillation-type', default='none', choices=['none', 'soft', 'hard'], type=str, help="") parser.add_argument('--distillation-alpha', default=0.5, type=float, help="") parser.add_argument('--distillation-tau', default=1.0, type=float, help="") # Finetuning params parser.add_argument('--finetune', default='', help='finetune from checkpoint') # Dataset parameters parser.add_argument('--data-path', default= '../../PythonWork_E/Data/ImageNet_2012',#'./data', type=str, help='dataset path') parser.add_argument('--data-set', default='IMNET', choices=['CIFAR10', 'CIFAR100' , 'IMNET'], type=str, help='Image Net dataset path') parser.add_argument('--inat-category', default='name', choices=['kingdom', 'phylum', 'class', 'order', 'supercategory', 'family', 'genus', 'name'], type=str, help='semantic granularity') parser.add_argument('--output_dir', default='./outputs', help='path where to save, empty for no saving') parser.add_argument('--device', default='cuda', help='device to use for training / testing') parser.add_argument('--seed', default=0, type=int) parser.add_argument('--resume', default= '', help='resume from checkpoint') parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch') parser.add_argument('--eval', action='store_true', default=False, help='Perform evaluation only') parser.add_argument('--dist-eval', action='store_true', default=False, help='Enabling distributed evaluation') parser.add_argument('--num_workers', default=10, type=int) parser.add_argument('--pin-mem', action='store_true', help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') parser.add_argument('--no-pin-mem', action='store_false', dest='pin_mem', help='') parser.set_defaults(pin_mem=True) # distributed training parameters parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes') parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') # test throught parser.add_argument('--throughout', action='store_true', help='Perform throughout only') return parser @torch.no_grad() def throughput(data_loader, model, logger): model.eval() for _, (images, _) in enumerate(data_loader): images = images.cuda(non_blocking=True) batch_size = images.shape[0] for i in range(50): model(images) torch.cuda.synchronize() logger.info(f"throughput averaged with 30 times") tic1 = time.time() for i in range(30): model(images) torch.cuda.synchronize() tic2 = time.time() logger.info(f"batch_size {batch_size} throughput {30 * batch_size / (tic2 - tic1)}") return def main(args): utils.init_distributed_mode(args) print('------------ Options -------------') for key, value in sorted(vars(args).items()): print('%16.16s: %16.16s' % (str(key), str(value))) print('-------------- End ----------------') if args.distillation_type != 'none' and args.finetune and not args.eval: raise NotImplementedError("Finetuning with distillation not yet supported") # fix the seed for reproducibility seed = args.seed + utils.get_rank() torch.manual_seed(seed) np.random.seed(seed) cudnn.benchmark = True dataset_train, args.nb_classes = build_dataset(is_train=True, args=args) dataset_val, args.nb_classes = build_dataset(is_train=False, args=args) if args.distributed: num_tasks = utils.get_world_size() global_rank = utils.get_rank() if args.repeated_aug:
# Copyright (c) 2015-present, Facebook, Inc. # All rights reserved. # from ptflops import get_model_complexity_info device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print("running on {} device.".format(device)) def get_args_parser(): parser = argparse.ArgumentParser('SlenderViT training and evaluation script', add_help=False) # Model parameters parser.add_argument('--uni-note', default='', type=str, help='unique note on the name of model to train') parser.add_argument('--model', default='SBCFormer_B', type=str, metavar='MODEL', help='Name of model to train.') parser.add_argument('--epochs', default=300, type=int) parser.add_argument('--input-size', default=224, type=int, help='images input size') parser.add_argument('--in-chans', type=int, default=3, help='the channel of inputs ') parser.add_argument('--batch-size', default=30, type=int) parser.add_argument('--drop', type=float, default=0., metavar='PCT', help='Dropout rate (default: 0.)') parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT', help='Drop path rate (default: 0.1)') parser.add_argument('--model-ema', action='store_true') parser.add_argument('--no-model-ema', action='store_false', dest='model_ema') parser.set_defaults(model_ema=False) parser.add_argument('--model-ema-decay', type=float, default=0.99996, help='') parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, help='') # Optimizer parameters parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER', help='Optimizer (default: "adamw"') parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON', help='Optimizer Epsilon (defaudevice = torch.device(args.device)ult: None, no clipping)') parser.add_argument('--clip-grad', type=float, default=5, metavar='NORM', help='Clip gradient norm (default: None, no clipping)') parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.9)') parser.add_argument('--weight-decay', type=float, default=0.05, help='weight decay (default: 0.05)') # Learning rate schedule parameters parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER', help='LR scheduler (default: "cosine"') parser.add_argument('--lr', type=float, default=2.5e-4, metavar='LR', help='learning rate (default: 5e-4)') parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct', help='learning rate noise on/off epoch percentages') parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT', help='learning rate noise limit percent (default: 0.67)') parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV', help='learning rate noise std-dev (default: 1.0)') parser.add_argument('--warmup-lr', type=float, default=1e-6, metavar='LR', help='warmup learning rate (default: 1e-6)') parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR', help='lower lr bound for cyclic schedulers that hit 0 (1e-5)') parser.add_argument('--decay-epochs', type=float, default=30, metavar='N', help='epoch interval to decay LR') parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N', help='epochs to warmup LR, if scheduler supports') parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N', help='epochs to cooldown LR at min_lr, after cyclic schedule ends') parser.add_argument('--patience-epochs', type=int, default=10, metavar='N', help='patience epochs for Plateau LR scheduler (default: 10') parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE', help='LR decay rate (default: 0.1)') # Augmentation parameters parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT', help='Color jitter factor (default: 0.4)') parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME', help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'), parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)') parser.add_argument('--train-interpolation', type=str, default='bicubic', help='Training interpolation (random, bilinear, bicubic default: "bicubic")') parser.add_argument('--repeated-aug', action='store_true') parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug') parser.set_defaults(repeated_aug=False) # * Random Erase params parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT', help='Random erase prob (default: 0.25)') parser.add_argument('--remode', type=str, default='pixel', help='Random erase mode (default: "pixel")') parser.add_argument('--recount', type=int, default=1, help='Random erase count (default: 1)') parser.add_argument('--resplit', action='store_true', default=False, help='Do not random erase first (clean) augmentation split') # * Mixup params parser.add_argument('--mixup', type=float, default=0.8, help='mixup alpha, mixup enabled if > 0. (default: 0.8)') parser.add_argument('--cutmix', type=float, default=1.0, help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)') parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None, help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)') parser.add_argument('--mixup-prob', type=float, default=1.0, help='Probability of performing mixup or cutmix when either/both is enabled') parser.add_argument('--mixup-switch-prob', type=float, default=0.5, help='Probability of switching to cutmix when both mixup and cutmix enabled') parser.add_argument('--mixup-mode', type=str, default='batch', help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"') # Distillation parameters distilled parser.add_argument('--distilled', action='store_true', default=False, help='Perform distilled ') parser.add_argument('--teacher-model', default='regnety_200mf', type=str, metavar='MODEL', help='Name of teacher model to train (default: "regnety_160"') parser.add_argument('--teacher-path', type=str, default='') parser.add_argument('--distillation-type', default='none', choices=['none', 'soft', 'hard'], type=str, help="") parser.add_argument('--distillation-alpha', default=0.5, type=float, help="") parser.add_argument('--distillation-tau', default=1.0, type=float, help="") # Finetuning params parser.add_argument('--finetune', default='', help='finetune from checkpoint') # Dataset parameters parser.add_argument('--data-path', default= '../../PythonWork_E/Data/ImageNet_2012',#'./data', type=str, help='dataset path') parser.add_argument('--data-set', default='IMNET', choices=['CIFAR10', 'CIFAR100' , 'IMNET'], type=str, help='Image Net dataset path') parser.add_argument('--inat-category', default='name', choices=['kingdom', 'phylum', 'class', 'order', 'supercategory', 'family', 'genus', 'name'], type=str, help='semantic granularity') parser.add_argument('--output_dir', default='./outputs', help='path where to save, empty for no saving') parser.add_argument('--device', default='cuda', help='device to use for training / testing') parser.add_argument('--seed', default=0, type=int) parser.add_argument('--resume', default= '', help='resume from checkpoint') parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch') parser.add_argument('--eval', action='store_true', default=False, help='Perform evaluation only') parser.add_argument('--dist-eval', action='store_true', default=False, help='Enabling distributed evaluation') parser.add_argument('--num_workers', default=10, type=int) parser.add_argument('--pin-mem', action='store_true', help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') parser.add_argument('--no-pin-mem', action='store_false', dest='pin_mem', help='') parser.set_defaults(pin_mem=True) # distributed training parameters parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes') parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') # test throught parser.add_argument('--throughout', action='store_true', help='Perform throughout only') return parser @torch.no_grad() def throughput(data_loader, model, logger): model.eval() for _, (images, _) in enumerate(data_loader): images = images.cuda(non_blocking=True) batch_size = images.shape[0] for i in range(50): model(images) torch.cuda.synchronize() logger.info(f"throughput averaged with 30 times") tic1 = time.time() for i in range(30): model(images) torch.cuda.synchronize() tic2 = time.time() logger.info(f"batch_size {batch_size} throughput {30 * batch_size / (tic2 - tic1)}") return def main(args): utils.init_distributed_mode(args) print('------------ Options -------------') for key, value in sorted(vars(args).items()): print('%16.16s: %16.16s' % (str(key), str(value))) print('-------------- End ----------------') if args.distillation_type != 'none' and args.finetune and not args.eval: raise NotImplementedError("Finetuning with distillation not yet supported") # fix the seed for reproducibility seed = args.seed + utils.get_rank() torch.manual_seed(seed) np.random.seed(seed) cudnn.benchmark = True dataset_train, args.nb_classes = build_dataset(is_train=True, args=args) dataset_val, args.nb_classes = build_dataset(is_train=False, args=args) if args.distributed: num_tasks = utils.get_world_size() global_rank = utils.get_rank() if args.repeated_aug:
sampler_train = RASampler(
5
2023-11-06 03:31:47+00:00
12k
zamaniamin/fastapi-shop
apps/products/tests/test_product_media.py
[ { "identifier": "FakeUser", "path": "apps/accounts/faker/data.py", "snippet": "class FakeUser(BaseFakeAccount):\n\n @classmethod\n def populate_members(cls):\n \"\"\"\n Create an admin and a user.\n \"\"\"\n\n # --- admin ---\n user, access_token = FakeAccount.verified_registration()\n user_data = {\n 'email': '[email protected]',\n 'first_name': cls.fake.first_name(),\n 'last_name': cls.fake.last_name(),\n 'is_superuser': True,\n 'role': 'admin'\n }\n\n UserManager.update_user(user.id, **user_data)\n\n # --- user ---\n user, access_token = FakeAccount.verified_registration()\n user_data = {\n 'email': '[email protected]',\n 'first_name': cls.fake.first_name(),\n 'last_name': cls.fake.last_name()\n }\n\n UserManager.update_user(user.id, **user_data)\n\n @classmethod\n def populate_admin(cls):\n \"\"\"\n Create an admin and generate an access token too.\n \"\"\"\n\n user, access_token = FakeAccount.verified_registration()\n user_data = {\n 'first_name': cls.fake.first_name(),\n 'last_name': cls.fake.last_name(),\n 'is_superuser': True,\n 'role': 'admin'\n }\n\n user = UserManager.update_user(user.id, **user_data)\n return user, access_token\n\n @classmethod\n def populate_user(cls):\n \"\"\"\n Create a new user and generate an access token too.\n \"\"\"\n\n user, access_token = FakeAccount.verified_registration()\n user_data = {\n 'first_name': cls.fake.first_name(),\n 'last_name': cls.fake.last_name()\n }\n\n user = UserManager.update_user(user.id, **user_data)\n return user, access_token" }, { "identifier": "User", "path": "apps/accounts/models.py", "snippet": "class User(FastModel):\n \"\"\"\n User represents registered users in the application.\n\n Attributes:\n id (int): Unique identifier for the user.\n email (str): User's email address used for authentication and communication.\n password (str): Hashed password for user authentication.\n first_name (str, optional): User's first name. Default is None.\n last_name (str, optional): User's last name. Default is None.\n is_verified_email (bool): Flag indicating whether the user's email address has been verified.\n is_active (bool): Flag indicating whether the user's account is active.\n is_superuser (bool): Flag indicating whether the user has superuser privileges.\n role (str): User's role in the system, represented as a short string.\n date_joined (datetime): Timestamp indicating when the user account was created.\n updated_at (datetime, optional): Timestamp indicating when the user account was last updated. Default is None.\n last_login (datetime, optional): Timestamp indicating the user's last login time. Default is None.\n change (relationship): Relationship attribute linking this user to change requests initiated by the user.\n \"\"\"\n\n __tablename__ = \"users\"\n\n id = Column(Integer, primary_key=True)\n email = Column(String(256), nullable=False, unique=True)\n password = Column(String, nullable=False)\n\n first_name = Column(String(256), nullable=True)\n last_name = Column(String(256), nullable=True)\n\n is_verified_email = Column(Boolean, default=False)\n is_active = Column(Boolean, default=False)\n is_superuser = Column(Boolean, default=False)\n\n # TODO add unittest and check the default role is 'user', also move role to permissions table\n role = Column(String(5), default=\"user\")\n\n date_joined = Column(DateTime, server_default=func.now())\n updated_at = Column(DateTime, nullable=True, onupdate=func.now())\n last_login = Column(DateTime, nullable=True)\n\n change = relationship(\"UserVerification\", back_populates=\"user\", cascade=\"all, delete-orphan\")" }, { "identifier": "BaseTestCase", "path": "apps/core/base_test_case.py", "snippet": "class BaseTestCase:\n\n @staticmethod\n def assert_datetime_format(date: str | datetime):\n if isinstance(date, datetime):\n date = DateTime.string(date)\n\n formatted_date = datetime.strptime(date, '%Y-%m-%d %H:%M:%S').strftime('%Y-%m-%d %H:%M:%S')\n assert date == formatted_date\n\n @staticmethod\n def convert_datetime_to_string(date):\n return DateTime.string(date)" }, { "identifier": "app", "path": "apps/main.py", "snippet": "" }, { "identifier": "FakeProduct", "path": "apps/products/faker/data.py", "snippet": "class FakeProduct:\n \"\"\"\n Populates the database with fake products.\n \"\"\"\n\n fake = Faker()\n\n options = ['color', 'size', 'material', 'Style']\n option_color_items = ['red', 'green', 'black', 'blue', 'yellow']\n option_size_items = ['S', 'M', 'L', 'XL', 'XXL']\n option_material_items = ['Cotton', 'Nylon', 'Plastic', 'Wool', 'Leather']\n option_style_items = ['Casual', 'Formal']\n\n def fill_products(self):\n \"\"\"\n For generating fake products as demo.\n \"\"\"\n self.fake.add_provider(lorem)\n\n @classmethod\n def generate_name(cls):\n return cls.fake.text(max_nb_chars=25)\n\n @classmethod\n def generate_description(cls):\n return cls.fake.paragraph(nb_sentences=5)\n\n @staticmethod\n def get_random_price():\n return round(random.uniform(1, 100), 2)\n\n @staticmethod\n def get_random_stock():\n return random.randint(0, 100)\n\n @classmethod\n def generate_uniq_options(cls):\n return [\n {\n \"option_name\": \"color\",\n \"items\": cls.option_color_items[:2]\n },\n {\n \"option_name\": \"size\",\n \"items\": cls.option_size_items[:2]\n },\n {\n \"option_name\": \"material\",\n \"items\": cls.option_material_items[:2]\n }\n ]\n\n @classmethod\n def get_payload(cls):\n payload = {\n 'product_name': cls.generate_name(),\n 'description': cls.generate_description(),\n 'status': 'active',\n 'price': cls.get_random_price(),\n 'stock': cls.get_random_stock()\n }\n return payload.copy()\n\n @classmethod\n def get_payload_with_options(cls):\n payload = {\n 'product_name': cls.generate_name(),\n 'description': cls.generate_description(),\n 'status': 'active',\n 'price': cls.get_random_price(),\n 'stock': cls.get_random_stock(),\n 'options': cls.generate_uniq_options()\n }\n return payload.copy()\n\n @classmethod\n def populate_product(cls) -> tuple[dict[str, str | int], Product]:\n \"\"\"\n Crete a product without options.\n \"\"\"\n\n product_data = cls.get_payload()\n return product_data.copy(), ProductService.create_product(product_data, get_obj=True)\n\n @classmethod\n def populate_product_with_options(cls, get_product_obj=True) -> tuple[dict[str, str | int], Product | dict]:\n \"\"\"\n Crete a product with options. (with all fields)\n \"\"\"\n\n product_data = cls.get_payload_with_options()\n return product_data.copy(), ProductService.create_product(product_data, get_obj=get_product_obj)\n\n @classmethod\n async def populate_product_with_media(cls):\n payload: dict\n product: Product\n\n # --- create a product ---\n payload, product = cls.populate_product()\n payload['alt'] = 'Test Alt Text'\n\n # --- get demo images ---\n upload = FakeMedia.populate_images_for_product(upload_file=True, product_id=product.id)\n\n # --- attach media to product ---\n media = ProductService.create_media(product.id, payload['alt'], upload)\n if media:\n return payload, product\n\n @classmethod\n async def populate_product_with_options_media(cls):\n \"\"\"\n Crete a product with options and attach some media to it.\n \"\"\"\n\n payload: dict\n product: Product\n\n # --- create a product ---\n payload, product = cls.populate_product_with_options()\n payload['alt'] = 'Test Alt Text'\n\n # --- get demo images ---\n upload = FakeMedia.populate_images_for_product(upload_file=True, product_id=product.id)\n\n # --- attach media to product ---\n media = ProductService.create_media(product.id, payload['alt'], upload)\n if media:\n return payload, product\n\n @classmethod\n async def populate_30_products(cls):\n\n # --- create 12 products with media ---\n # TODO generate random options for variable-products\n for i in range(6):\n await cls.populate_product_with_options_media()\n for i in range(6):\n await cls.populate_product_with_media()\n\n # --- create 18 products without media ---\n for i in range(9):\n cls.populate_product()\n for i in range(9):\n cls.populate_product_with_options()" }, { "identifier": "FakeMedia", "path": "apps/products/faker/data.py", "snippet": "class FakeMedia:\n product_demo_dir = f'{DEMO_PRODUCTS_MEDIA_DIR}'\n\n @classmethod\n def populate_images_for_product(cls, upload_file=False, product_id: int = 1):\n \"\"\"\n Attach some media (images) just to a product.\n\n Read some image file in `.jpg` format from this directory:\n `/apps/demo/products/{number}` (you can replace your files in the dir)\n \"\"\"\n\n directory_path = f'{DEMO_PRODUCTS_MEDIA_DIR}/{product_id}'\n file_paths = []\n upload = []\n\n if os.path.isdir(directory_path):\n for filename in os.listdir(directory_path):\n if filename.endswith(\".jpg\"):\n file_path = os.path.join(directory_path, filename)\n file_paths.append(file_path)\n\n for_upload = UploadFile(filename=filename, file=open(file_path, \"rb\"))\n upload.append(for_upload)\n\n else:\n raise FileNotFoundError(f\"{DEMO_PRODUCTS_MEDIA_DIR}\")\n\n if upload_file:\n return upload\n return file_paths\n\n @classmethod\n def populate_docs_file(cls, upload_file=False):\n docs_path = f'{DEMO_DOCS_DIR}/'\n file_paths = []\n upload = []\n\n if os.path.isdir(docs_path):\n file_path = os.path.join(docs_path, 'test.txt')\n file_paths.append(file_path)\n\n for_upload = UploadFile(filename='test.txt', file=open(file_path, \"rb\"))\n upload.append(for_upload)\n else:\n raise FileNotFoundError(f\"{DEMO_PRODUCTS_MEDIA_DIR}\")\n if upload_file:\n return upload\n return file_paths\n\n @classmethod\n def populate_large_file(cls, upload_file=False):\n docs_path = f'{DEMO_LARGE_DIR}/'\n file_paths = []\n upload = []\n\n if os.path.isdir(docs_path):\n file_path = os.path.join(docs_path, 'large.png')\n file_paths.append(file_path)\n\n for_upload = UploadFile(filename='large.png', file=open(file_path, \"rb\"))\n upload.append(for_upload)\n else:\n raise FileNotFoundError(f\"{DEMO_PRODUCTS_MEDIA_DIR}\")\n if upload_file:\n return upload\n return file_paths" }, { "identifier": "ProductService", "path": "apps/products/services.py", "snippet": "class ProductService:\n request: Request | None = None\n product = None\n price: int | float\n stock: int\n options: list | None = []\n options_data: list = []\n variants: list = []\n media: list | None = None\n\n @classmethod\n def __init__(cls, request: Request | None = None):\n cls.request = request\n\n @classmethod\n def create_product(cls, data: dict, get_obj: bool = False):\n\n cls._create_product(data)\n cls.__create_product_options()\n cls.__create_variants()\n\n if get_obj:\n return cls.product\n return cls.retrieve_product(cls.product.id)\n\n @classmethod\n def _create_product(cls, data: dict):\n cls.price = data.pop('price', 0)\n cls.stock = data.pop('stock', 0)\n cls.options_data = data.pop('options', [])\n\n if 'status' in data:\n # Check if the value is one of the specified values, if not, set it to 'draft'\n valid_statuses = ['active', 'archived', 'draft']\n if data['status'] not in valid_statuses:\n data['status'] = 'draft'\n\n # create a product\n cls.product = Product.create(**data)\n\n @classmethod\n def __create_product_options(cls):\n \"\"\"\n Create new option if it doesn't exist and update its items,\n and ensures that options are uniq in a product and also items in each option are uniq.\n \"\"\"\n\n if cls.options_data:\n for option in cls.options_data:\n\n # Creates a new instance of the ProductOption model, adds it to the database,\n # and commits the transaction. Returns the newly created model instance\n new_option = ProductOption.create(product_id=cls.product.id, option_name=option['option_name'])\n\n for item in option['items']:\n ProductOptionItem.create(option_id=new_option.id, item_name=item)\n cls.options = cls.retrieve_options(cls.product.id)\n else:\n cls.options = None\n\n @classmethod\n def retrieve_options(cls, product_id):\n \"\"\"\n Get all options of a product\n \"\"\"\n\n product_options = []\n options = ProductOption.filter(ProductOption.product_id == product_id).all()\n for option in options:\n # Retrieves records from the database based on a given filter condition.\n # Returns a list of model instances matching the filter condition.\n items = ProductOptionItem.filter(ProductOptionItem.option_id == option.id).all()\n\n product_options.append({\n 'options_id': option.id,\n 'option_name': option.option_name,\n 'items': [{'item_id': item.id, 'item_name': item.item_name} for item in items]\n })\n if product_options:\n return product_options\n else:\n return None\n\n @classmethod\n def __create_variants(cls):\n \"\"\"\n Create a default variant or create variants by options combination.\n \"\"\"\n\n if cls.options:\n\n # create variants by options combination\n items_id = cls.get_item_ids_by_product_id(cls.product.id)\n variants = list(options_combination(*items_id))\n for variant in variants:\n values_tuple = tuple(variant)\n\n # set each value to an option and set none if it doesn't exist\n while len(values_tuple) < 3:\n values_tuple += (None,)\n option1, option2, option3 = values_tuple\n\n ProductVariant.create(\n product_id=cls.product.id,\n option1=option1,\n option2=option2,\n option3=option3,\n price=cls.price,\n stock=cls.stock\n )\n else:\n # set a default variant\n ProductVariant.create(\n product_id=cls.product.id,\n price=cls.price,\n stock=cls.stock\n )\n\n cls.variants = cls.retrieve_variants(cls.product.id)\n\n @classmethod\n def retrieve_variants(cls, product_id):\n \"\"\"\n Get all variants of a product\n \"\"\"\n\n product_variants = []\n variants: list[ProductVariant] = ProductVariant.filter(ProductVariant.product_id == product_id).all()\n for variant in variants:\n product_variants.append(\n {\n \"variant_id\": variant.id,\n \"product_id\": variant.product_id,\n \"price\": variant.price,\n \"stock\": variant.stock,\n \"option1\": variant.option1,\n \"option2\": variant.option2,\n \"option3\": variant.option3,\n \"created_at\": DateTime.string(variant.created_at),\n \"updated_at\": DateTime.string(variant.updated_at)\n })\n\n if product_variants:\n return product_variants\n return None\n\n @staticmethod\n def retrieve_variant(variant_id: int):\n variant = ProductVariant.get_or_404(variant_id)\n variant_data = {\n \"variant_id\": variant.id,\n \"product_id\": variant.product_id,\n \"price\": variant.price,\n \"stock\": variant.stock,\n \"option1\": variant.option1,\n \"option2\": variant.option2,\n \"option3\": variant.option3,\n \"created_at\": DateTime.string(variant.created_at),\n \"updated_at\": DateTime.string(variant.updated_at)\n }\n return variant_data\n\n @classmethod\n def get_item_ids_by_product_id(cls, product_id):\n item_ids_by_option = []\n item_ids_dict = {}\n with DatabaseManager.session as session:\n\n # Query the ProductOptionItem table to retrieve item_ids\n items = (\n session.query(ProductOptionItem.option_id, ProductOptionItem.id)\n .join(ProductOption)\n .filter(ProductOption.product_id == product_id)\n .all()\n )\n\n # Separate item_ids by option_id\n for option_id, item_id in items:\n if option_id not in item_ids_dict:\n item_ids_dict[option_id] = []\n item_ids_dict[option_id].append(item_id)\n\n # Append `item_ids` lists to the result list\n item_ids_by_option.extend(item_ids_dict.values())\n\n return item_ids_by_option\n\n @classmethod\n def retrieve_product(cls, product_id):\n cls.product = Product.get_or_404(product_id)\n cls.options = cls.retrieve_options(product_id)\n cls.variants = cls.retrieve_variants(product_id)\n cls.media = cls.retrieve_media_list(product_id)\n\n product = {\n 'product_id': cls.product.id,\n 'product_name': cls.product.product_name,\n 'description': cls.product.description,\n 'status': cls.product.status,\n 'created_at': DateTime.string(cls.product.created_at),\n 'updated_at': DateTime.string(cls.product.updated_at),\n 'published_at': DateTime.string(cls.product.published_at),\n 'options': cls.options,\n 'variants': cls.variants,\n 'media': cls.media\n }\n return product\n\n @classmethod\n def update_product(cls, product_id, **kwargs):\n\n # --- init data ---\n # TODO `updated_at` is autoupdate dont need to code\n kwargs['updated_at'] = DateTime.now()\n\n # --- update product ---\n Product.update(product_id, **kwargs)\n return cls.retrieve_product(product_id)\n\n @classmethod\n def update_variant(cls, variant_id, **kwargs):\n # check variant exist\n ProductVariant.get_or_404(variant_id)\n\n # TODO `updated_at` is autoupdate dont need to code\n kwargs['updated_at'] = DateTime.now()\n ProductVariant.update(variant_id, **kwargs)\n\n return cls.retrieve_variant(variant_id)\n\n @classmethod\n def list_products(cls, limit: int = 12):\n # - if \"default variant\" is not set, first variant will be\n # - on list of products, for price, get it from \"default variant\"\n # - if price or stock of default variant is 0 then select first variant that is not 0\n # - or for price, get it from \"less price\"\n # do all of them with graphql and let the front devs decide witch query should be run.\n\n # also can override the list `limit` in settings.py\n if hasattr(settings, 'products_list_limit'):\n limit = settings.products_list_limit\n\n products_list = []\n\n with DatabaseManager.session as session:\n products = session.execute(\n select(Product.id).limit(limit)\n )\n\n for product in products:\n products_list.append(cls.retrieve_product(product.id))\n\n return products_list\n # --- list by join ----\n # products_list = []\n # with DatabaseManager.session as session:\n # products = select(\n # Product.id,\n # Product.product_name,\n # coalesce(ProductMedia.alt, None).label('alt'),\n # coalesce(ProductMedia.src, None).label('src'),\n # # media.alt,\n # ProductVariant.price,\n # ProductVariant.stock\n # ).outerjoin(ProductMedia).outerjoin(ProductVariant)\n # products = session.execute(products)\n #\n # for product in products:\n # media = {'src': product.src, 'alt': product.alt} if product.src is not None else None\n # products_list.append(\n # {\n # 'product_id': product.id,\n # 'product_name': product.product_name,\n # 'price': product.price,\n # 'stock': product.stock,\n # 'media': media\n # }\n # )\n\n @classmethod\n def create_media(cls, product_id, alt, files):\n \"\"\"\n Save uploaded media to `media` directory and attach uploads to a product.\n \"\"\"\n\n product: Product = Product.get_or_404(product_id)\n media_service = MediaService(parent_directory=\"/products\", sub_directory=product_id)\n\n for file in files:\n file_name, file_extension = media_service.save_file(file)\n ProductMedia.create(\n product_id=product_id,\n alt=alt if alt is not None else product.product_name,\n src=file_name,\n type=file_extension\n )\n\n media = cls.retrieve_media_list(product_id)\n return media\n\n @classmethod\n def retrieve_media_list(cls, product_id):\n \"\"\"\n Get all media of a product.\n \"\"\"\n\n media_list = []\n product_media: list[ProductMedia] = ProductMedia.filter(ProductMedia.product_id == product_id).all()\n for media in product_media:\n media_list.append(\n {\n \"media_id\": media.id,\n \"product_id\": media.product_id,\n \"alt\": media.alt,\n \"src\": cls.__get_media_url(media.product_id, media.src),\n \"type\": media.type,\n \"created_at\": DateTime.string(media.created_at),\n \"updated_at\": DateTime.string(media.updated_at)\n })\n if media_list:\n return media_list\n else:\n return None\n\n @classmethod\n def retrieve_single_media(cls, media_id):\n \"\"\"\n Get a media by id.\n \"\"\"\n\n media_obj = ProductMedia.filter(ProductMedia.id == media_id).first()\n if media_obj:\n media = {\n \"media_id\": media_obj.id,\n \"product_id\": media_obj.product_id,\n \"alt\": media_obj.alt,\n \"src\": cls.__get_media_url(media_obj.product_id, media_obj.src),\n \"type\": media_obj.type,\n \"created_at\": DateTime.string(media_obj.created_at),\n \"updated_at\": DateTime.string(media_obj.updated_at)\n }\n return media\n else:\n return None\n\n @classmethod\n def __get_media_url(cls, product_id, file_name: str):\n if cls.request is None:\n base_url = \"http://127.0.0.1:8000/\"\n else:\n base_url = str(cls.request.base_url)\n\n return f\"{base_url}media/products/{product_id}/{file_name}\" if file_name is not None else None\n\n @classmethod\n def update_media(cls, media_id, **kwargs):\n # check media exist\n media: ProductMedia = ProductMedia.get_or_404(media_id)\n file = kwargs.pop('file', None)\n if file is not None:\n media_service = MediaService(parent_directory=\"/products\", sub_directory=media.product_id)\n file_name, file_extension = media_service.save_file(file)\n kwargs['src'] = file_name\n kwargs['type'] = file_extension\n\n # TODO `updated_at` is autoupdate dont need to code\n kwargs['updated_at'] = DateTime.now()\n ProductMedia.update(media_id, **kwargs)\n\n return cls.retrieve_single_media(media_id)\n\n @staticmethod\n def delete_product_media(product_id, media_ids: list[int]):\n\n # Fetch the product media records to be deleted\n with DatabaseManager.session as session:\n filters = [\n and_(ProductMedia.product_id == product_id, ProductMedia.id == media_id)\n for media_id in media_ids\n ]\n media_to_delete = session.query(ProductMedia).filter(or_(*filters)).all()\n\n # Delete the product media records\n for media in media_to_delete:\n ProductMedia.delete(ProductMedia.get_or_404(media.id))\n return None\n\n @staticmethod\n def delete_product(product_id):\n Product.delete(Product.get_or_404(product_id))\n\n @classmethod\n def delete_media_file(cls, media_id: int):\n media = ProductMedia.get_or_404(media_id)\n product_id = media.product_id\n\n media_service = MediaService(parent_directory=\"/products\", sub_directory=product_id)\n is_fie_deleted = media_service.delete_file(media.src)\n if is_fie_deleted:\n ProductMedia.delete(ProductMedia.get_or_404(media_id))\n return True\n return False" }, { "identifier": "DatabaseManager", "path": "config/database.py", "snippet": "class DatabaseManager:\n \"\"\"\n A utility class for managing database operations using SQLAlchemy.\n\n The DatabaseManager simplifies the process of initializing and managing database connections, creating database\n tables based on SQLAlchemy models, and providing a session for performing database operations.\n\n Attributes:\n engine (Engine): The SQLAlchemy engine for the configured database.\n session (Session): The SQLAlchemy session for database interactions.\n\n Methods:\n __init__():\n Initializes the DatabaseManager by creating an SQLAlchemy engine and a session based on the\n specified database configuration from the 'settings' module.\n\n create_database_tables():\n Detects 'models.py' files in subdirectories of the 'apps' directory and creates corresponding\n database tables based on SQLAlchemy models.\n\n Example Usage:\n db_manager = DatabaseManager()\n\n # Create database tables for all detected models\n db_manager.create_database_tables()\n\n Example Usage2:\n DatabaseManager().create_database_tables()\n \"\"\"\n engine: create_engine = None\n session: Session = None\n\n @classmethod\n def __init__(cls):\n \"\"\"\n Initializes the DatabaseManager.\n\n This method creates an SQLAlchemy engine and a session based on the specified database configuration\n from the 'settings' module.\n \"\"\"\n global testing # Access the global testing flag\n db_config = settings.DATABASES.copy()\n if testing:\n db_config[\"database\"] = \"test_\" + db_config[\"database\"]\n\n if db_config[\"drivername\"] == \"sqlite\":\n project_root = Path(__file__).parent.parent # Assuming this is where your models are located\n db_config[\"database\"] = os.path.join(project_root, db_config[\"database\"])\n\n url = URL.create(**db_config)\n cls.engine = create_engine(url, connect_args={\"check_same_thread\": False})\n else:\n # for postgres\n cls.engine = create_engine(URL.create(**db_config))\n\n session = sessionmaker(autocommit=False, autoflush=False, bind=cls.engine)\n cls.session = session()\n\n @classmethod\n def create_test_database(cls):\n \"\"\"\n Create and configure a test database for use in tests.\n \"\"\"\n\n # Set the testing flag to True\n global testing\n testing = True\n\n # Reinitialize the DatabaseManager for testing\n cls.__init__()\n DatabaseManager.create_database_tables()\n\n @classmethod\n def drop_all_tables(cls):\n \"\"\"\n Drop all tables in the current database.\n \"\"\"\n # TODO drop tables for postgres too\n if cls.engine:\n metadata = MetaData()\n metadata.reflect(bind=cls.engine)\n for table_name, table in metadata.tables.items():\n table.drop(cls.engine)\n\n @classmethod\n def create_database_tables(cls):\n \"\"\"\n Create database tables based on SQLAlchemy models.\n\n This method detects 'models.py' files in subdirectories of the 'apps'\n directory and creates corresponding database tables based on SQLAlchemy\n models defined within those files.\n\n Returns:\n None\n \"\"\"\n script_directory = os.path.dirname(os.path.abspath(__file__))\n project_root = Path(script_directory).parent\n apps_directory = project_root / \"apps\"\n\n for app_dir in apps_directory.iterdir():\n if app_dir.is_dir():\n models_file = app_dir / \"models.py\"\n if models_file.exists():\n module_name = f\"apps.{app_dir.name}.models\"\n try:\n module = importlib.import_module(module_name)\n if hasattr(module, \"FastModel\") and hasattr(module.FastModel, \"metadata\"):\n module.FastModel.metadata.create_all(bind=cls.engine)\n except ImportError:\n pass\n\n @classmethod\n def get_testing_mode(cls):\n return testing" } ]
import asyncio import pytest from fastapi import status from fastapi.testclient import TestClient from apps.accounts.faker.data import FakeUser from apps.accounts.models import User from apps.core.base_test_case import BaseTestCase from apps.main import app from apps.products.faker.data import FakeProduct, FakeMedia from apps.products.services import ProductService from config.database import DatabaseManager
7,659
class ProductMediaTestBase(BaseTestCase): product_endpoint = '/products/' product_media_endpoint = '/products/media/' # --- members --- admin: User | None = None admin_authorization = {} @classmethod def setup_class(cls): cls.client = TestClient(app) DatabaseManager.create_test_database() # --- create an admin --- cls.admin, access_token = FakeUser.populate_admin() cls.admin_authorization = {"Authorization": f"Bearer {access_token}"} @classmethod def teardown_class(cls): DatabaseManager.drop_all_tables() class TestCreateProductMedia(ProductMediaTestBase): """ Test create product-media on the multi scenario """ def test_create_product_media(self): """ Test create a product-media (images) for a product and attach them to that product (assuming valid data). Test the File "type, size and url". """ # --- create a product --- product_payload, product = FakeProduct.populate_product() # --- upload files ---- file_paths = FakeMedia.populate_images_for_product() files = [("x_files", open(file_path, "rb")) for file_path in file_paths] media_payload = { 'alt': 'Test Alt Text' } # --- request --- response = self.client.post(f"{self.product_endpoint}{product.id}/media/", data=media_payload, files=files, headers=self.admin_authorization) assert response.status_code == status.HTTP_201_CREATED # --- response data --- expected = response.json() # --- media --- assert "media" in expected media_list = expected["media"] assert isinstance(media_list, list) for media in media_list: assert media["media_id"] > 0 assert media["product_id"] == product.id assert media["alt"] == media_payload['alt'] assert "src" in media and not None assert media["type"] == 'jpg' assert media["updated_at"] is None self.assert_datetime_format(media['created_at']) # --- test static file URL --- url = f'/media/test{media_list[0]["src"].split("/media")[-1]}' response = self.client.get(url) assert response.status_code == status.HTTP_200_OK # test file size is not zero assert len(response.content) > 0 class TestRetrieveProductMedia(ProductMediaTestBase): """ Test retrieve product-media on the multi scenario """ def test_retrieve_single_media(self): """ Test retrieve a single product image """ # --- create a product --- payload, product = asyncio.run(FakeProduct.populate_product_with_media()) # --- get a media ---
class ProductMediaTestBase(BaseTestCase): product_endpoint = '/products/' product_media_endpoint = '/products/media/' # --- members --- admin: User | None = None admin_authorization = {} @classmethod def setup_class(cls): cls.client = TestClient(app) DatabaseManager.create_test_database() # --- create an admin --- cls.admin, access_token = FakeUser.populate_admin() cls.admin_authorization = {"Authorization": f"Bearer {access_token}"} @classmethod def teardown_class(cls): DatabaseManager.drop_all_tables() class TestCreateProductMedia(ProductMediaTestBase): """ Test create product-media on the multi scenario """ def test_create_product_media(self): """ Test create a product-media (images) for a product and attach them to that product (assuming valid data). Test the File "type, size and url". """ # --- create a product --- product_payload, product = FakeProduct.populate_product() # --- upload files ---- file_paths = FakeMedia.populate_images_for_product() files = [("x_files", open(file_path, "rb")) for file_path in file_paths] media_payload = { 'alt': 'Test Alt Text' } # --- request --- response = self.client.post(f"{self.product_endpoint}{product.id}/media/", data=media_payload, files=files, headers=self.admin_authorization) assert response.status_code == status.HTTP_201_CREATED # --- response data --- expected = response.json() # --- media --- assert "media" in expected media_list = expected["media"] assert isinstance(media_list, list) for media in media_list: assert media["media_id"] > 0 assert media["product_id"] == product.id assert media["alt"] == media_payload['alt'] assert "src" in media and not None assert media["type"] == 'jpg' assert media["updated_at"] is None self.assert_datetime_format(media['created_at']) # --- test static file URL --- url = f'/media/test{media_list[0]["src"].split("/media")[-1]}' response = self.client.get(url) assert response.status_code == status.HTTP_200_OK # test file size is not zero assert len(response.content) > 0 class TestRetrieveProductMedia(ProductMediaTestBase): """ Test retrieve product-media on the multi scenario """ def test_retrieve_single_media(self): """ Test retrieve a single product image """ # --- create a product --- payload, product = asyncio.run(FakeProduct.populate_product_with_media()) # --- get a media ---
media = ProductService.retrieve_media_list(product.id)[0]
6
2023-11-06 04:46:03+00:00
12k
lukas-clarke/eight_sleep
custom_components/eight_sleep/pyEight/eight.py
[ { "identifier": "NotAuthenticatedError", "path": "custom_components/eight_sleep/pyEight/exceptions.py", "snippet": "class NotAuthenticatedError(BaseEightSleepError):\n \"\"\"Exception for eight sleep authentication errors..\"\"\"" }, { "identifier": "RequestError", "path": "custom_components/eight_sleep/pyEight/exceptions.py", "snippet": "class RequestError(BaseEightSleepError):\n \"\"\"Exception for eight sleep request failures.\"\"\"" }, { "identifier": "EightUser", "path": "custom_components/eight_sleep/pyEight/user.py", "snippet": "class EightUser: # pylint: disable=too-many-public-methods\n \"\"\"Class for handling data of each eight user.\"\"\"\n\n def __init__(self, device: \"EightSleep\", user_id: str, side: str):\n \"\"\"Initialize user class.\"\"\"\n self.device = device\n self.user_id = user_id\n self.side = side\n self._user_profile: dict[str, Any] = {}\n self.trends: list[dict[str, Any]] = []\n self.intervals: list[dict[str, Any]] = []\n self.next_alarm = None\n self.bed_state_type = None\n\n # Variables to do dynamic presence\n self.presence: bool = False\n self.observed_low: int = 0\n\n def _get_trend(self, trend_num: int, keys: str | tuple[str, ...]) -> Any:\n \"\"\"Get trend value for specified key.\"\"\"\n if len(self.trends) < trend_num + 1:\n return None\n data = self.trends[-(trend_num + 1)]\n # data = self.trends[trend_num]\n if isinstance(keys, str):\n return data.get(keys)\n if self.trends:\n for key in keys[:-1]:\n data = data.get(key, {})\n return data.get(keys[-1])\n\n def _get_quality_score(self, trend_num: int, key: str) -> Any:\n \"\"\"Get fitness score for specified key.\"\"\"\n return self._get_trend(trend_num, (\"sleepQualityScore\", key, \"score\"))\n\n def _get_routine_score(self, trend_num: int, key: str) -> Any:\n \"\"\"Get fitness score for specified key.\"\"\"\n return self._get_trend(trend_num, (\"sleepRoutineScore\", key, \"score\"))\n\n def _get_sleep_score(self, interval_num: int) -> int | None:\n \"\"\"Return sleep score for a given interval.\"\"\"\n if len(self.intervals) < interval_num + 1:\n return None\n return self.intervals[interval_num].get(\"score\")\n\n def _interval_timeseries(self, interval_num: int) -> dict[str, Any] | None:\n \"\"\"Return timeseries interval if it exists.\"\"\"\n if len(self.intervals) < interval_num + 1:\n return None\n return self.intervals[interval_num].get(\"timeseries\", {})\n\n def _get_current_interval_property_value(self, key: str) -> int | float | None:\n \"\"\"Get current property from intervals.\"\"\"\n if (\n not (timeseries_data := self._interval_timeseries(0))\n or timeseries_data.get(key) is None\n ):\n return None\n return timeseries_data[key][-1][1]\n\n def _calculate_interval_data(\n self, interval_num: int, key: str, average_data: bool = True\n ) -> int | float | None:\n \"\"\"Calculate interval data.\"\"\"\n\n if (timeseries := self._interval_timeseries(interval_num)) is None or (\n data_list := timeseries.get(key)\n ) is None:\n return None\n total = 0\n for entry in data_list:\n total += entry[1]\n if not average_data:\n return total\n return total / len(data_list)\n\n def _session_date(self, interval_num: int) -> datetime | None:\n \"\"\"Get session date for given interval.\"\"\"\n if (\n len(self.intervals) < interval_num + 1\n or (session_date := self.intervals[interval_num].get(\"ts\")) is None\n ):\n return None\n date = datetime.strptime(session_date, DATE_TIME_ISO_FORMAT)\n return date.replace(tzinfo=ZoneInfo(\"UTC\"))\n\n def _sleep_breakdown(self, interval_num: int) -> dict[str, Any] | None:\n \"\"\"Return durations of sleep stages for given session.\"\"\"\n if len(self.intervals) < (interval_num + 1) or not (\n stages := self.intervals[interval_num].get(\"stages\")\n ):\n return None\n breakdown = {}\n for stage in stages:\n if stage[\"stage\"] in (\"out\"):\n continue\n if stage[\"stage\"] not in breakdown:\n breakdown[stage[\"stage\"]] = 0\n breakdown[stage[\"stage\"]] += stage[\"duration\"]\n\n return breakdown\n\n def _session_processing(self, interval_num: int) -> bool | None:\n \"\"\"Return processing state of given session.\"\"\"\n if len(self.intervals) < interval_num + 1:\n return None\n return self.intervals[interval_num].get(\"incomplete\", False)\n\n @property\n def user_profile(self) -> dict[str, Any] | None:\n \"\"\"Return userdata.\"\"\"\n return self._user_profile\n\n @property\n def bed_presence(self) -> bool:\n \"\"\"Return true/false for bed presence.\"\"\"\n return self.presence\n\n @property\n def target_heating_level(self) -> int | None:\n \"\"\"Return target heating/cooling level.\"\"\"\n return self.device.device_data.get(f\"{self.side}TargetHeatingLevel\")\n\n @property\n def heating_level(self) -> int | None:\n \"\"\"Return heating/cooling level.\"\"\"\n level = self.device.device_data.get(f\"{self.side}HeatingLevel\")\n # Update observed low\n if level is not None and level < self.observed_low:\n self.observed_low = level\n return level\n\n def past_heating_level(self, num) -> int:\n \"\"\"Return a heating level from the past.\"\"\"\n if num > 9 or len(self.device.device_data_history) < num + 1:\n return 0\n\n return self.device.device_data_history[num].get(f\"{self.side}HeatingLevel\", 0)\n\n def _now_heating_or_cooling(self, target_heating_level_check: bool) -> bool | None:\n \"\"\"Return true/false if heating or cooling is currently happening.\"\"\"\n key = f\"{self.side}NowHeating\"\n if (\n self.target_heating_level is None\n or (target := self.device.device_data.get(key)) is None\n ):\n return None\n return target and target_heating_level_check\n\n @property\n def now_heating(self) -> bool | None:\n \"\"\"Return current heating state.\"\"\"\n level = self.target_heating_level\n return self._now_heating_or_cooling(level is not None and level > 0)\n\n @property\n def now_cooling(self) -> bool | None:\n \"\"\"Return current cooling state.\"\"\"\n level = self.target_heating_level\n return self._now_heating_or_cooling(level is not None and level < 0)\n\n @property\n def heating_remaining(self) -> int | None:\n \"\"\"Return seconds of heat/cool time remaining.\"\"\"\n return self.device.device_data.get(f\"{self.side}HeatingDuration\")\n\n @property\n def last_seen(self) -> str | None:\n \"\"\"Return mattress last seen time.\n\n These values seem to be rarely updated correctly in the API.\n Don't expect accurate results from this property.\n \"\"\"\n if not (last_seen := self.device.device_data.get(f\"{self.side}PresenceEnd\")):\n return None\n return datetime.fromtimestamp(int(last_seen)).strftime(DATE_TIME_ISO_FORMAT)\n\n @property\n def heating_values(self) -> dict[str, Any]:\n \"\"\"Return a dict of all the current heating values.\"\"\"\n return {\n \"level\": self.heating_level,\n \"target\": self.target_heating_level,\n \"active\": self.now_heating,\n \"remaining\": self.heating_remaining,\n \"last_seen\": self.last_seen,\n }\n\n @property\n def current_session_date(self) -> datetime | None:\n \"\"\"Return date/time for start of last session data.\"\"\"\n return self._session_date(0)\n\n @property\n def current_session_processing(self) -> bool | None:\n \"\"\"Return processing state of current session.\"\"\"\n return self._session_processing(0)\n\n @property\n def current_sleep_stage(self) -> str | None:\n \"\"\"Return sleep stage for in-progress session.\"\"\"\n if (\n not self.intervals\n or not (stages := self.intervals[0].get(\"stages\"))\n or len(stages) < 2\n ):\n return None\n # API now always has an awake state last in the dict\n # so always pull the second to last stage while we are\n # in a processing state\n if self.current_session_processing:\n stage = stages[-2].get(\"stage\")\n else:\n stage = stages[-1].get(\"stage\")\n\n # UNRELIABLE... Removing for now.\n # Check sleep stage against last_seen time to make\n # sure we don't get stuck in a non-awake state.\n # delta_elap = datetime.fromtimestamp(time.time()) \\\n # - datetime.strptime(self.last_seen, 'DATE_TIME_ISO_FORMAT')\n # _LOGGER.debug('User elap: %s', delta_elap.total_seconds())\n # if stage != 'awake' and delta_elap.total_seconds() > 1800:\n # Bed hasn't seen us for 30min so set awake.\n # stage = 'awake'\n\n return stage\n\n @property\n def current_sleep_score(self) -> int | None:\n \"\"\"Return sleep score for in-progress session.\"\"\"\n return self._get_sleep_score(0)\n\n @property\n def current_sleep_fitness_score(self) -> int | None:\n \"\"\"Return sleep fitness score for latest session.\"\"\"\n # return self._get_trend(0, (\"sleepFitnessScore\", \"total\"))\n return self._get_trend(0, \"score\")\n\n @property\n def current_sleep_quality_score(self) -> int | None:\n return self._get_trend(0, (\"sleepQualityScore\", \"total\"))\n\n @property\n def current_sleep_routine_score(self) -> int | None:\n return self._get_trend(0, (\"sleepRoutineScore\", \"total\"))\n\n @property\n def current_sleep_duration_score(self) -> int | None:\n \"\"\"Return sleep duration score for latest session.\"\"\"\n return self._get_quality_score(0, \"sleepDurationSeconds\")\n\n @property\n def current_latency_asleep_score(self) -> int | None:\n \"\"\"Return latency asleep score for latest session.\"\"\"\n return self._get_routine_score(0, \"latencyAsleepSeconds\")\n\n @property\n def time_slept(self) -> int | None:\n return self._get_trend(0, (\"sleepDuration\"))\n\n @property\n def presence_start(self):\n timestamp = self._get_trend(0, \"presenceStart\")\n if timestamp:\n return self.device.convert_string_to_datetime(timestamp)\n\n @property\n def presence_end(self):\n timestamp = self._get_trend(0, \"presenceEnd\")\n if timestamp:\n return self.device.convert_string_to_datetime(timestamp)\n\n @property\n def current_latency_out_score(self) -> int | None:\n \"\"\"Return latency out score for latest session.\"\"\"\n return self._get_routine_score(0, \"latencyOutSeconds\")\n\n @property\n def current_hrv(self) -> int | None:\n \"\"\"Return wakeup consistency score for latest session.\"\"\"\n return str(self._get_trend(0, (\"sleepQualityScore\", \"hrv\", \"current\")))\n\n @property\n def current_heart_rate(self) -> int | None:\n \"\"\"Return wakeup consistency score for latest session.\"\"\"\n return str(self._get_trend(0, (\"sleepRoutineScore\", \"heartRate\", \"current\")))\n\n @property\n def current_breath_rate(self) -> int | None:\n \"\"\"Return wakeup consistency score for latest session.\"\"\"\n return str(\n self._get_trend(0, (\"sleepQualityScore\", \"respiratoryRate\", \"current\"))\n )\n\n @property\n def current_wakeup_consistency_score(self) -> int | None:\n \"\"\"Return wakeup consistency score for latest session.\"\"\"\n return self._get_routine_score(0, \"wakeupConsistency\")\n\n @property\n def current_fitness_session_date(self) -> str | None:\n \"\"\"Return date/time for start of last session data.\"\"\"\n return self._get_trend(0, \"day\")\n\n @property\n def current_sleep_breakdown(self) -> dict[str, Any] | None:\n \"\"\"Return durations of sleep stages for in-progress session.\"\"\"\n return self._sleep_breakdown(0)\n\n @property\n def current_bed_temp(self) -> int | float | None:\n \"\"\"Return current bed temperature for in-progress session.\"\"\"\n return self._get_current_interval_property_value(\"tempBedC\")\n\n @property\n def current_room_temp(self) -> int | float | None:\n \"\"\"Return current room temperature for in-progress session.\"\"\"\n return self._get_current_interval_property_value(\"tempRoomC\")\n\n @property\n def current_tnt(self) -> int | None:\n \"\"\"Return current toss & turns for in-progress session.\"\"\"\n return cast(\n Optional[int], self._calculate_interval_data(0, \"tnt\", average_data=False)\n )\n\n @property\n def current_resp_rate(self) -> int | float | None:\n \"\"\"Return current respiratory rate for in-progress session.\"\"\"\n return self._get_current_interval_property_value(\"respiratoryRate\")\n\n @property\n def current_heart_rate(self) -> int | float | None:\n \"\"\"Return current heart rate for in-progress session.\"\"\"\n return self._get_current_interval_property_value(\"heartRate\")\n\n @property\n def current_values(self) -> dict[str, Any]:\n \"\"\"Return a dict of all the 'current' parameters.\"\"\"\n return {\n \"date\": self.current_session_date,\n \"score\": self.current_sleep_score,\n \"stage\": self.current_sleep_stage,\n \"breakdown\": self.current_sleep_breakdown,\n \"tnt\": self.current_tnt,\n \"bed_temp\": self.current_bed_temp,\n \"room_temp\": self.current_room_temp,\n \"resp_rate\": self.current_resp_rate,\n \"heart_rate\": self.current_heart_rate,\n \"processing\": self.current_session_processing,\n }\n\n @property\n def current_fitness_values(self) -> dict[str, Any]:\n \"\"\"Return a dict of all the 'current' fitness score parameters.\"\"\"\n return {\n \"date\": self.current_fitness_session_date,\n \"score\": self.current_sleep_fitness_score,\n \"duration\": self.current_sleep_duration_score,\n \"asleep\": self.current_latency_asleep_score,\n \"out\": self.current_latency_out_score,\n \"wakeup\": self.current_wakeup_consistency_score,\n }\n\n @property\n def last_session_date(self) -> datetime | None:\n \"\"\"Return date/time for start of last session data.\"\"\"\n return self._session_date(1)\n\n @property\n def last_session_processing(self) -> bool | None:\n \"\"\"Return processing state of current session.\"\"\"\n return self._session_processing(1)\n\n @property\n def last_sleep_score(self) -> int | None:\n \"\"\"Return sleep score from last complete sleep session.\"\"\"\n return self._get_sleep_score(1)\n\n @property\n def last_sleep_fitness_score(self) -> int | None:\n \"\"\"Return sleep fitness score for previous sleep session.\"\"\"\n return self._get_trend(1, (\"sleepFitnessScore\", \"total\"))\n\n @property\n def last_sleep_duration_score(self) -> int | None:\n \"\"\"Return sleep duration score for previous session.\"\"\"\n return self._get_quality_score(1, \"sleepDurationSeconds\")\n\n @property\n def last_latency_asleep_score(self) -> int | None:\n \"\"\"Return latency asleep score for previous session.\"\"\"\n return self._get_routine_score(1, \"latencyAsleepSeconds\")\n\n @property\n def last_latency_out_score(self) -> int | None:\n \"\"\"Return latency out score for previous session.\"\"\"\n return self._get_froutine_score(1, \"latencyOutSeconds\")\n\n @property\n def last_wakeup_consistency_score(self) -> int | None:\n \"\"\"Return wakeup consistency score for previous session.\"\"\"\n return self._get_routine_score(1, \"wakeupConsistency\")\n\n @property\n def last_fitness_session_date(self) -> str | None:\n \"\"\"Return date/time for start of previous session data.\"\"\"\n return self._get_trend(1, \"day\")\n\n @property\n def last_sleep_breakdown(self) -> dict[str, Any] | None:\n \"\"\"Return durations of sleep stages for last complete session.\"\"\"\n return self._sleep_breakdown(1)\n\n @property\n def last_bed_temp(self) -> int | float | None:\n \"\"\"Return avg bed temperature for last session.\"\"\"\n return self._calculate_interval_data(1, \"tempBedC\")\n\n @property\n def last_room_temp(self) -> int | float | None:\n \"\"\"Return avg room temperature for last session.\"\"\"\n return self._calculate_interval_data(1, \"tempRoomC\")\n\n @property\n def last_tnt(self) -> int | None:\n \"\"\"Return toss & turns for last session.\"\"\"\n return cast(\n Optional[int], self._calculate_interval_data(1, \"tnt\", average_data=False)\n )\n\n @property\n def last_resp_rate(self) -> int | float | None:\n \"\"\"Return avg respiratory rate for last session.\"\"\"\n return self._calculate_interval_data(1, \"respiratoryRate\")\n\n @property\n def last_heart_rate(self) -> int | float | None:\n \"\"\"Return avg heart rate for last session.\"\"\"\n return self._calculate_interval_data(1, \"heartRate\")\n\n @property\n def last_values(self) -> dict[str, Any]:\n \"\"\"Return a dict of all the 'last' parameters.\"\"\"\n return {\n \"date\": self.last_session_date,\n \"score\": self.last_sleep_score,\n \"breakdown\": self.last_sleep_breakdown,\n \"tnt\": self.last_tnt,\n \"bed_temp\": self.last_bed_temp,\n \"room_temp\": self.last_room_temp,\n \"resp_rate\": self.last_resp_rate,\n \"heart_rate\": self.last_heart_rate,\n \"processing\": self.last_session_processing,\n }\n\n @property\n def last_fitness_values(self) -> dict[str, Any]:\n \"\"\"Return a dict of all the 'last' fitness score parameters.\"\"\"\n return {\n \"date\": self.last_fitness_session_date,\n \"score\": self.last_sleep_fitness_score,\n \"duration\": self.last_sleep_duration_score,\n \"asleep\": self.last_latency_asleep_score,\n \"out\": self.last_latency_out_score,\n \"wakeup\": self.last_wakeup_consistency_score,\n }\n\n def trend_sleep_score(self, date: str) -> int | None:\n \"\"\"Return trend sleep score for specified date.\"\"\"\n return next(\n (day.get(\"score\") for day in self.trends if day.get(\"day\") == date),\n None,\n )\n\n def sleep_fitness_score(self, date: str) -> int | None:\n \"\"\"Return sleep fitness score for specified date.\"\"\"\n return next(\n (\n day.get(\"sleepFitnessScore\", {}).get(\"total\")\n for day in self.trends\n if day.get(\"day\") == date\n ),\n None,\n )\n\n def heating_stats(self) -> None:\n \"\"\"Calculate some heating data stats.\"\"\"\n local_5 = []\n local_10 = []\n\n for i in range(0, 10):\n if (level := self.past_heating_level(i)) is None:\n continue\n if level == 0:\n _LOGGER.debug(\"Cant calculate stats yet...\")\n return\n if i < 5:\n local_5.append(level)\n local_10.append(level)\n\n _LOGGER.debug(\"%s Heating History: %s\", self.side, local_10)\n\n try:\n # Average of 5min on the history dict.\n fiveminavg = statistics.mean(local_5)\n tenminavg = statistics.mean(local_10)\n _LOGGER.debug(\"%s Heating 5 min avg: %s\", self.side, fiveminavg)\n _LOGGER.debug(\"%s Heating 10 min avg: %s\", self.side, tenminavg)\n\n # Standard deviation\n fivestdev = statistics.stdev(local_5)\n tenstdev = statistics.stdev(local_10)\n _LOGGER.debug(\"%s Heating 5 min stdev: %s\", self.side, fivestdev)\n _LOGGER.debug(\"%s Heating 10 min stdev: %s\", self.side, tenstdev)\n\n # Variance\n fivevar = statistics.variance(local_5)\n tenvar = statistics.variance(local_10)\n _LOGGER.debug(\"%s Heating 5 min variance: %s\", self.side, fivevar)\n _LOGGER.debug(\"%s Heating 10 min variance: %s\", self.side, tenvar)\n except statistics.StatisticsError:\n _LOGGER.debug(\"Cant calculate stats yet...\")\n\n # Other possible options for exploration....\n # Pearson correlation coefficient\n # Spearman rank correlation\n # Kendalls Tau\n\n def dynamic_presence(self) -> None:\n \"\"\"\n Determine presence based on bed heating level and end presence\n time reported by the api.\n\n Idea originated from Alex Lee Yuk Cheung SmartThings Code.\n \"\"\"\n\n # self.heating_stats()\n\n # Method needs to be different for pod since it doesn't rest at 0\n # - Working idea is to track the low and adjust the scale so that low is 0\n # - Buffer changes while cooling/heating is active\n if self.target_heating_level is None or self.heating_level is None:\n return\n level_zero = self.observed_low * (-1)\n working_level = self.heating_level + level_zero\n if self.device.is_pod:\n if not self.presence:\n if working_level > 50:\n if not self.now_cooling and not self.now_heating:\n self.presence = True\n elif self.target_heating_level > 0:\n # Heating\n if working_level - self.target_heating_level >= 8:\n self.presence = True\n elif self.target_heating_level < 0:\n # Cooling\n if self.heating_level + self.target_heating_level >= 8:\n self.presence = True\n elif working_level > 25:\n # Catch rising edge\n if (\n self.past_heating_level(0) - self.past_heating_level(1) >= 2\n and self.past_heating_level(1) - self.past_heating_level(2) >= 2\n and self.past_heating_level(2) - self.past_heating_level(3) >= 2\n ):\n # Values are increasing so we are likely in bed\n if not self.now_heating:\n self.presence = True\n elif working_level - self.target_heating_level >= 8:\n self.presence = True\n\n elif self.presence:\n if working_level <= 15:\n # Failsafe, very slow\n self.presence = False\n elif working_level < 35: # Threshold is expiremental for now\n if (\n self.past_heating_level(0) - self.past_heating_level(1) < 0\n and self.past_heating_level(1) - self.past_heating_level(2) < 0\n and self.past_heating_level(2) - self.past_heating_level(3) < 0\n ):\n # Values are decreasing so we are likely out of bed\n self.presence = False\n else:\n # Method for 0 resting state\n if not self.presence:\n if self.heating_level > 50:\n # Can likely make this better\n if not self.now_heating:\n self.presence = True\n elif self.heating_level - self.target_heating_level >= 8:\n self.presence = True\n elif self.heating_level > 25:\n # Catch rising edge\n if (\n self.past_heating_level(0) - self.past_heating_level(1) >= 2\n and self.past_heating_level(1) - self.past_heating_level(2) >= 2\n and self.past_heating_level(2) - self.past_heating_level(3) >= 2\n ):\n # Values are increasing so we are likely in bed\n if not self.now_heating:\n self.presence = True\n elif self.heating_level - self.target_heating_level >= 8:\n self.presence = True\n\n elif self.presence:\n if self.heating_level <= 15:\n # Failsafe, very slow\n self.presence = False\n elif self.heating_level < 50:\n if (\n self.past_heating_level(0) - self.past_heating_level(1) < 0\n and self.past_heating_level(1) - self.past_heating_level(2) < 0\n and self.past_heating_level(2) - self.past_heating_level(3) < 0\n ):\n # Values are decreasing so we are likely out of bed\n self.presence = False\n\n # Last seen can lag real-time by up to 35min so this is\n # mostly a backup to using the heat values.\n # seen_delta = datetime.fromtimestamp(time.time()) \\\n # - datetime.strptime(self.last_seen, 'DATE_TIME_ISO_FORMAT')\n # _LOGGER.debug('%s Last seen time delta: %s', self.side,\n # seen_delta.total_seconds())\n # if self.presence and seen_delta.total_seconds() > 2100:\n # self.presence = False\n\n _LOGGER.debug(\"%s Presence Results: %s\", self.side, self.presence)\n\n async def update_user(self) -> None:\n \"\"\"Update all user data.\"\"\"\n await self.update_intervals_data()\n\n now = datetime.today()\n start = now - timedelta(days=2)\n end = now + timedelta(days=2)\n\n await self.update_trend_data(\n start.strftime(DATE_FORMAT), end.strftime(DATE_FORMAT)\n )\n await self.update_routines_data()\n\n self.bed_state_type = await self.get_bed_state_type()\n\n async def get_bed_state_type(self) -> str:\n \"\"\"Gets the bed state.\"\"\"\n url = APP_API_URL + f\"v1/users/{self.user_id}/temperature\"\n data = await self.device.api_request(\"GET\", url)\n return data[\"currentState\"][\"type\"]\n\n async def set_heating_level(self, level: int, duration: int = 0) -> None:\n \"\"\"Update heating data json.\"\"\"\n url = APP_API_URL + f\"v1/users/{self.user_id}/temperature\"\n data_for_duration = {\"timeBased\": {\"level\": level, \"durationSeconds\": duration}}\n data_for_level = {\"currentLevel\": level}\n # Catch bad low inputs\n level = max(-100, level)\n # Catch bad high inputs\n level = min(100, level)\n\n await self.turn_on_side() # Turn on side before setting temperature\n await self.device.api_request(\n \"PUT\", url, data=data_for_level\n ) # Set heating level before duration\n await self.device.api_request(\"PUT\", url, data=data_for_duration)\n\n async def set_smart_heating_level(self, level: int, sleep_stage: str) -> None:\n \"\"\"Will set the temperature level at a smart sleep stage\"\"\"\n if sleep_stage not in POSSIBLE_SLEEP_STAGES:\n raise Exception(\n f\"Invalid sleep stage {sleep_stage}. Should be one of {POSSIBLE_SLEEP_STAGES}\"\n )\n url = APP_API_URL + f\"v1/users/{self.user_id}/temperature\"\n data = await self.device.api_request(\"GET\", url)\n sleep_stages_levels = data[\"smart\"]\n # Catch bad low inputs\n level = max(-100, level)\n # Catch bad high inputs\n level = min(100, level)\n sleep_stages_levels[sleep_stage] = level\n data = {\"smart\": sleep_stages_levels}\n await self.device.api_request(\"PUT\", url, data=data)\n\n async def increment_heating_level(self, offset: int) -> None:\n \"\"\"Increment heating level with offset\"\"\"\n url = APP_API_URL + f\"v1/users/{self.user_id}/temperature\"\n current_level = await self.get_current_heating_level()\n new_level = current_level + offset\n # Catch bad low inputs\n new_level = max(-100, new_level)\n # Catch bad high inputs\n new_level = min(100, new_level)\n\n data_for_level = {\"currentLevel\": new_level}\n\n await self.device.api_request(\"PUT\", url, data=data_for_level)\n\n async def get_current_heating_level(self) -> int:\n url = APP_API_URL + f\"v1/users/{self.user_id}/temperature\"\n resp = await self.device.api_request(\"GET\", url)\n return int(resp[\"currentLevel\"])\n\n async def prime_pod(self):\n url = APP_API_URL + f\"v1/devices/{self.device.device_id}/priming/tasks\"\n data_for_priming = {\n \"notifications\": {\"users\": [self.user_id], \"meta\": \"rePriming\"}\n }\n await self.device.api_request(\"POST\", url, data=data_for_priming)\n\n async def turn_on_side(self):\n \"\"\"Turns on the side of the user\"\"\"\n url = APP_API_URL + f\"v1/users/{self.user_id}/temperature\"\n data = {\"currentState\": {\"type\": \"smart\"}}\n await self.device.api_request(\"PUT\", url, data=data)\n\n async def turn_off_side(self):\n \"\"\"Turns on the side of the user\"\"\"\n url = APP_API_URL + f\"v1/users/{self.user_id}/temperature\"\n data = {\"currentState\": {\"type\": \"off\"}}\n await self.device.api_request(\"PUT\", url, data=data)\n\n async def set_away_mode(self, action: str):\n \"\"\"Sets the away mode. The action can either be 'start' or 'stop'\"\"\"\n url = APP_API_URL + f\"v1/users/{self.user_id}/away-mode\"\n # Setting time to UTC of 24 hours ago to get API to trigger immediately\n now = str(\n (datetime.utcnow() - timedelta(days=1)).strftime(\"%Y-%m-%dT%H:%M:%S.%f\")[\n :-3\n ]\n + \"Z\"\n )\n if action != \"start\" and action != \"end\":\n raise Exception(f\"Invalid action: {action}\")\n data = {\"awayPeriod\": {action: now}}\n await self.device.api_request(\"PUT\", url, data=data)\n\n async def update_user_profile(self) -> None:\n \"\"\"Update user profile data.\"\"\"\n url = f\"{CLIENT_API_URL}/users/{self.user_id}\"\n profile_data = await self.device.api_request(\"get\", url)\n if profile_data is None:\n _LOGGER.error(\"Unable to fetch user profile data for %s\", self.user_id)\n else:\n self._user_profile = profile_data[\"user\"]\n\n async def update_trend_data(self, start_date: str, end_date: str) -> None:\n \"\"\"Update trends data json for specified time period. V2 of the api used\"\"\"\n url = f\"{CLIENT_API_URL}/users/{self.user_id}/trends\"\n params = {\n \"tz\": self.device.timezone,\n \"from\": start_date,\n \"to\": end_date,\n \"include-main\": \"false\",\n \"include-all-sessions\": \"false\",\n \"model-version\": \"v2\",\n }\n trend_data = await self.device.api_request(\"get\", url, params=params)\n self.trends = trend_data.get(\"days\", [])\n\n async def update_intervals_data(self) -> None:\n \"\"\"Update intervals data json for specified time period.\"\"\"\n url = f\"{CLIENT_API_URL}/users/{self.user_id}/intervals\"\n\n intervals = await self.device.api_request(\"get\", url)\n self.intervals = intervals.get(\"intervals\", [])\n\n async def update_routines_data(self) -> None:\n url = APP_API_URL + f\"v2/users/{self.user_id}/routines\"\n resp = await self.device.api_request(\"GET\", url)\n\n try:\n nextTimestamp = resp[\"state\"][\"nextAlarm\"][\"nextTimestamp\"]\n except KeyError:\n nextTimestamp = None\n\n if not nextTimestamp:\n self.next_alarm = None\n return\n\n self.next_alarm = self.device.convert_string_to_datetime(nextTimestamp)\n\n def _convert_string_to_datetime(self, datetime_str):\n datetime_str = str(datetime_str).strip()\n # Convert string to datetime object.\n try:\n # Try to parse the first format\n datetime_object = datetime.strptime(datetime_str, \"%Y-%m-%dT%H:%M:%SZ\")\n except ValueError:\n try:\n # Try to parse the second format\n datetime_object = datetime.strptime(\n datetime_str, \"%Y-%m-%dT%H:%M:%S.%fZ\"\n )\n except ValueError:\n # Handle if neither format is matched\n raise ValueError(f\"Unsupported date string format for {datetime_str}\")\n\n # Set the timezone to UTC\n utc_timezone = pytz.UTC\n datetime_object_utc = datetime_object.replace(tzinfo=utc_timezone)\n # Set the timezone to a specific timezone\n timezone = pytz.timezone(self.device.timezone)\n return datetime_object_utc.astimezone(timezone)" }, { "identifier": "Token", "path": "custom_components/eight_sleep/pyEight/structs.py", "snippet": "class Token:\n bearer_token: str\n expiration: float\n main_id: str" } ]
import asyncio import atexit import pytz import logging import time import httpx from datetime import datetime from typing import Any from aiohttp.client import ClientError, ClientSession, ClientTimeout from .constants import * from .exceptions import NotAuthenticatedError, RequestError from .user import EightUser from .structs import Token
9,451
self.users: dict[str, EightUser] = {} self._user_id: str | None = None self._token: str | None = None self._token_expiration: datetime | None = None self._device_ids: list[str] = [] self._is_pod: bool = False # Setup 10 element list self._device_json_list: list[dict] = [] self._api_session = client_session self._internal_session: bool = False if check_auth: self._get_auth() # Stop on exit atexit.register(self.at_exit) def at_exit(self) -> None: """Run at exit.""" try: loop = asyncio.get_running_loop() asyncio.run_coroutine_threadsafe(self.stop(), loop).result() except RuntimeError: asyncio.run(self.stop()) @property def token(self) -> str | None: """Return session token.""" return self._token @property def user_id(self) -> str | None: """Return user ID of the logged in user.""" return self._user_id @property def device_id(self) -> str | None: """Return devices id.""" return self._device_ids[0] @property def device_data(self) -> dict: """Return current raw device_data json.""" return self._device_json_list[0] @property def device_data_history(self) -> list[dict]: """Return full raw device_data json list.""" return self._device_json_list @property def need_priming(self) -> bool: return self.device_data["needsPriming"] @property def is_priming(self) -> bool: return self.device_data["priming"] @property def has_water(self) -> bool: return self.device_data["hasWater"] @property def last_prime(self): return self.convert_string_to_datetime(self.device_data["lastPrime"]) @property def is_pod(self) -> bool: """Return if device is a POD.""" return self._is_pod def convert_string_to_datetime(self, datetime_str): datetime_str = str(datetime_str).strip() # Convert string to datetime object. try: # Try to parse the first format datetime_object = datetime.strptime(datetime_str, "%Y-%m-%dT%H:%M:%SZ") except ValueError: try: # Try to parse the second format datetime_object = datetime.strptime( datetime_str, "%Y-%m-%dT%H:%M:%S.%fZ" ) except ValueError: # Handle if neither format is matched raise ValueError(f"Unsupported date string format for {datetime_str}") # Set the timezone to UTC utc_timezone = pytz.UTC datetime_object_utc = datetime_object.replace(tzinfo=utc_timezone) # Set the timezone to a specific timezone timezone = pytz.timezone(self.timezone) return datetime_object_utc.astimezone(timezone) async def _get_auth(self) -> Token: data = { "client_id": self._client_id, "client_secret": self._client_secret, "grant_type": "password", "username": self._email, "password": self._password, } async with httpx.AsyncClient() as client: response = await client.post( AUTH_URL, headers=DEFAULT_AUTH_HEADERS, json=data, timeout=DEFAULT_TIMEOUT, ) if response.status_code == 200: access_token_str = response.json()["access_token"] expiration_seconds_int = ( float(response.json()["expires_in"]) + time.time() ) main_id = response.json()["userId"] return Token(access_token_str, expiration_seconds_int, main_id) else:
""" pyeight.eight ~~~~~~~~~~~~~~~~~~~~ Provides api for Eight Sleep Copyright (c) 2022-2023 <https://github.com/lukas-clarke/pyEight> Licensed under the MIT license. """ from __future__ import annotations _LOGGER = logging.getLogger(__name__) CLIENT_TIMEOUT = ClientTimeout(total=DEFAULT_TIMEOUT) class EightSleep: """Eight sleep API object.""" def __init__( self, email: str, password: str, timezone: str, client_id: str = None, client_secret: str = None, client_session: ClientSession | None = None, check_auth: bool = False, ) -> None: """Initialize eight sleep class.""" self._email = email self._password = password # If client_id isn't set, use the default value if not client_id: client_id = "0894c7f33bb94800a03f1f4df13a4f38" self._client_id = client_id # client_secret isn't required for current Eight Sleep API auth # but can't be empty value, so setting random string if not set if not client_secret: client_secret = "ASDF" self._client_secret = client_secret self.timezone = timezone self.users: dict[str, EightUser] = {} self._user_id: str | None = None self._token: str | None = None self._token_expiration: datetime | None = None self._device_ids: list[str] = [] self._is_pod: bool = False # Setup 10 element list self._device_json_list: list[dict] = [] self._api_session = client_session self._internal_session: bool = False if check_auth: self._get_auth() # Stop on exit atexit.register(self.at_exit) def at_exit(self) -> None: """Run at exit.""" try: loop = asyncio.get_running_loop() asyncio.run_coroutine_threadsafe(self.stop(), loop).result() except RuntimeError: asyncio.run(self.stop()) @property def token(self) -> str | None: """Return session token.""" return self._token @property def user_id(self) -> str | None: """Return user ID of the logged in user.""" return self._user_id @property def device_id(self) -> str | None: """Return devices id.""" return self._device_ids[0] @property def device_data(self) -> dict: """Return current raw device_data json.""" return self._device_json_list[0] @property def device_data_history(self) -> list[dict]: """Return full raw device_data json list.""" return self._device_json_list @property def need_priming(self) -> bool: return self.device_data["needsPriming"] @property def is_priming(self) -> bool: return self.device_data["priming"] @property def has_water(self) -> bool: return self.device_data["hasWater"] @property def last_prime(self): return self.convert_string_to_datetime(self.device_data["lastPrime"]) @property def is_pod(self) -> bool: """Return if device is a POD.""" return self._is_pod def convert_string_to_datetime(self, datetime_str): datetime_str = str(datetime_str).strip() # Convert string to datetime object. try: # Try to parse the first format datetime_object = datetime.strptime(datetime_str, "%Y-%m-%dT%H:%M:%SZ") except ValueError: try: # Try to parse the second format datetime_object = datetime.strptime( datetime_str, "%Y-%m-%dT%H:%M:%S.%fZ" ) except ValueError: # Handle if neither format is matched raise ValueError(f"Unsupported date string format for {datetime_str}") # Set the timezone to UTC utc_timezone = pytz.UTC datetime_object_utc = datetime_object.replace(tzinfo=utc_timezone) # Set the timezone to a specific timezone timezone = pytz.timezone(self.timezone) return datetime_object_utc.astimezone(timezone) async def _get_auth(self) -> Token: data = { "client_id": self._client_id, "client_secret": self._client_secret, "grant_type": "password", "username": self._email, "password": self._password, } async with httpx.AsyncClient() as client: response = await client.post( AUTH_URL, headers=DEFAULT_AUTH_HEADERS, json=data, timeout=DEFAULT_TIMEOUT, ) if response.status_code == 200: access_token_str = response.json()["access_token"] expiration_seconds_int = ( float(response.json()["expires_in"]) + time.time() ) main_id = response.json()["userId"] return Token(access_token_str, expiration_seconds_int, main_id) else:
raise RequestError(
1
2023-11-01 16:15:52+00:00
12k
gickowtf/pixoo-homeassistant
custom_components/divoom_pixoo/pixoo64/_pixoo.py
[ { "identifier": "Palette", "path": "custom_components/divoom_pixoo/pixoo64/_colors.py", "snippet": "class Palette:\n BLACK = COLOR_BLACK\n WHITE = COLOR_WHITE" }, { "identifier": "retrieve_glyph", "path": "custom_components/divoom_pixoo/pixoo64/_font.py", "snippet": "def retrieve_glyph(character, font):\n if character in font:\n return font[character]\n\n return None" }, { "identifier": "FONT_GICKO", "path": "custom_components/divoom_pixoo/pixoo64/_font.py", "snippet": "FONT_GICKO = {'0': [0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 6],\n '1': [0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 4],\n '2': [0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 6],\n '3': [1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 6],\n '4': [0, 1, 1, 1, 1, 0,\n 1, 1, 0, 1, 1, 0,\n 1, 0, 0, 1, 1, 0,\n 1, 0, 0, 1, 1, 0,\n 1, 1, 1, 1, 1, 1,\n 0, 0, 0, 1, 1, 0, 6],\n '5': [1, 1, 1, 1, 1, 0,\n 1, 0, 0, 0, 0, 0,\n 1, 1, 1, 1, 1, 0,\n 0, 0, 0, 1, 1, 1,\n 1, 0, 0, 1, 1, 1,\n 0, 1, 1, 1, 1, 0, 6],\n '6': [0, 1, 1, 1, 1, 0,\n 1, 1, 0, 0, 0, 0,\n 1, 1, 1, 1, 1, 0,\n 1, 1, 0, 0, 1, 1,\n 1, 1, 0, 0, 1, 1,\n 0, 1, 1, 1, 1, 0, 6],\n '7': [1, 1, 1, 1, 1, 1,\n 0, 0, 0, 0, 1, 1,\n 0, 0, 0, 1, 1, 0,\n 0, 0, 1, 1, 0, 0,\n 0, 1, 1, 1, 0, 0,\n 0, 1, 1, 1, 0, 0, 6],\n '8': [0, 1, 1, 1, 1, 0,\n 1, 0, 0, 1, 1, 1,\n 0, 1, 1, 1, 1, 0,\n 1, 0, 0, 1, 1, 1,\n 1, 0, 0, 1, 1, 1,\n 0, 1, 1, 1, 1, 0, 6],\n '9': [0, 1, 1, 1, 1, 0,\n 1, 0, 0, 1, 1, 1,\n 1, 0, 0, 1, 1, 1,\n 0, 1, 1, 1, 1, 1,\n 0, 0, 0, 1, 1, 1,\n 0, 1, 1, 1, 1, 0, 6],\n ' ': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2],\n 'A': [0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 6],\n 'B': [1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 6],\n 'C': [0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 6],\n 'D': [1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 6],\n 'E': [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 5],\n 'F': [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 5],\n 'G': [0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 6],\n 'H': [1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 5],\n 'I': [1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 4],\n 'J': [0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 6],\n 'K': [1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 6],\n 'L': [1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 4],\n 'M': [1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 6],\n 'N': [1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 6],\n 'O': [0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 6],\n 'P': [1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 6],\n 'Q': [0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 6],\n 'R': [1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 6],\n 'S': [0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 6],\n 'T': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 6],\n 'U': [1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 6],\n 'V': [1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 6],\n 'W': [1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 6],\n 'X': [1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 6],\n 'Y': [1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 6],\n 'Z': [1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 6],\n ':': [0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 2],\n '?': [0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 6],\n '!': [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 2],\n '.': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2],\n '-': [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 3]\n }" }, { "identifier": "FONT_PICO_8", "path": "custom_components/divoom_pixoo/pixoo64/_font.py", "snippet": "FONT_PICO_8 = {'0': [1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 3], '1': [1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 3],\n '2': [1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 3], '3': [1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 3],\n '4': [1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 3], '5': [1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 3],\n '6': [1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 3], '7': [1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 3],\n '8': [1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 3], '9': [1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 3],\n 'a': [0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 3], 'b': [0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 3],\n 'c': [0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 3], 'd': [0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 3],\n 'e': [0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 3], 'f': [0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 3],\n 'g': [0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 3], 'h': [0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 3],\n 'i': [0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 3], 'j': [0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 3],\n 'k': [0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 3], 'l': [0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 3],\n 'm': [0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 3], 'n': [0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 3],\n 'o': [0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 3], 'p': [0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 3],\n 'q': [0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 3], 'r': [0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 3],\n 's': [0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 3], 't': [0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 3],\n 'u': [0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 3], 'v': [0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 3],\n 'w': [0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 3], 'x': [0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 3],\n 'y': [0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 3], 'z': [0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 3],\n 'A': [1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 3], 'B': [1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 3],\n 'C': [0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 3], 'D': [1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 3],\n 'E': [1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 3], 'F': [1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 3],\n 'G': [0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 3], 'H': [1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 3],\n 'I': [1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 3], 'J': [1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 3],\n 'K': [1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 3], 'L': [1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 3],\n 'M': [1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 3], 'N': [1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 3],\n 'O': [0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 3], 'P': [1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 3],\n 'Q': [0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 3], 'R': [1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 3],\n 'S': [0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 3], 'T': [1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 3],\n 'U': [1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 3], 'V': [1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 3],\n 'W': [1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 5], 'X': [1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 3],\n 'Y': [1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 3], 'Z': [1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 3],\n '!': [0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 3], \"'\": [0, 1, 0, 1, 3],\n '(': [0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 3], ')': [0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 3],\n '+': [0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 3], ',': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 3],\n '-': [0, 0, 0, 0, 0, 0, 1, 1, 1, 3], '<': [0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 3],\n '=': [0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 3], '>': [1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 3],\n '?': [1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 3], '[': [1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 3],\n ']': [0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 3], '^': [0, 1, 0, 1, 0, 1, 3],\n '_': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 3], ':': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 3],\n ';': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 3], '.': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 3],\n '/': [0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 3], '{': [0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 3],\n '|': [0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 3], '}': [1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 3],\n '~': [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 3], '$': [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 3],\n '@': [0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 3], '%': [1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 3],\n ' ': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3]\n }" } ]
import base64 import json import requests from enum import IntEnum from PIL import Image, ImageOps from ._colors import Palette from ._font import retrieve_glyph, FONT_GICKO, FONT_PICO_8
10,788
def draw_filled_rectangle_from_top_left_to_bottom_right_rgb(self, top_left_x=0, top_left_y=0, bottom_right_x=1, bottom_right_y=1, r=0, g=0, b=0): self.draw_filled_rectangle((top_left_x, top_left_y), (bottom_right_x, bottom_right_y), (r, g, b)) def draw_image(self, image_path_or_object, xy=(0, 0), image_resample_mode=ImageResampleMode.PIXEL_ART, pad_resample=False): image = image_path_or_object if isinstance(image_path_or_object, Image.Image) else Image.open( image_path_or_object) size = image.size width = size[0] height = size[1] # See if it needs to be scaled/resized to fit the display if width > self.size or height > self.size: if pad_resample: image = ImageOps.pad(image, (self.size, self.size), image_resample_mode) else: image.thumbnail((self.size, self.size), image_resample_mode) if self.debug: print( f'[.] Resized image to fit on screen (saving aspect ratio): "{image_path_or_object}" ({width}, {height}) ' f'-> ({image.size[0]}, {image.size[1]})') # Convert the loaded image to RGB rgb_image = image.convert('RGB') # Iterate over all pixels in the image that are left and buffer them for y in range(image.size[1]): for x in range(image.size[0]): location = (x, y) placed_x = x + xy[0] if self.size - 1 < placed_x or placed_x < 0: continue placed_y = y + xy[1] if self.size - 1 < placed_y or placed_y < 0: continue self.draw_pixel((placed_x, placed_y), rgb_image.getpixel(location)) def draw_image_at_location(self, image_path_or_object, x, y, image_resample_mode=ImageResampleMode.PIXEL_ART): self.draw_image(image_path_or_object, (x, y), image_resample_mode) def draw_line(self, start_xy, stop_xy, rgb=Palette.WHITE): line = set() # Calculate the amount of steps needed between the points to draw a nice line amount_of_steps = minimum_amount_of_steps(start_xy, stop_xy) # Iterate over them and create a nice set of pixels for step in range(amount_of_steps): if amount_of_steps == 0: interpolant = 0 else: interpolant = step / amount_of_steps # Add a pixel as a rounded location line.add( round_location(lerp_location(start_xy, stop_xy, interpolant))) # Draw the actual pixel line for pixel in line: self.draw_pixel(pixel, rgb) def draw_line_from_start_to_stop_rgb(self, start_x, start_y, stop_x, stop_y, r=255, g=255, b=255): self.draw_line((start_x, start_y), (stop_x, stop_y), (r, g, b)) def draw_pixel(self, xy, rgb): # If it's not on the screen, we're not going to bother if xy[0] < 0 or xy[0] >= self.size or xy[1] < 0 or xy[1] >= self.size: if self.debug: limit = self.size - 1 print( f'[!] Invalid coordinates given: ({xy[0]}, {xy[1]}) (maximum coordinates are ({limit}, {limit})') return # Calculate the index index = xy[0] + (xy[1] * self.size) # Color it self.draw_pixel_at_index(index, rgb) def draw_pixel_at_index(self, index, rgb): # Validate the index if index < 0 or index >= self.pixel_count: if self.debug: print(f'[!] Invalid index given: {index} (maximum index is {self.pixel_count - 1})') return # Clamp the color, just to be safe rgb = clamp_color(rgb) # Move to place in array index = index * 3 self.__buffer[index] = rgb[0] self.__buffer[index + 1] = rgb[1] self.__buffer[index + 2] = rgb[2] def draw_pixel_at_index_rgb(self, index, r, g, b): self.draw_pixel_at_index(index, (r, g, b)) def draw_pixel_at_location_rgb(self, x, y, r, g, b): self.draw_pixel((x, y), (r, g, b)) def draw_character(self, character, xy=(0, 0), rgb=Palette.WHITE, font=None): if font is None: font = FONT_PICO_8
def clamp(value, minimum=0, maximum=255): if value > maximum: return maximum if value < minimum: return minimum return value def clamp_color(rgb): return clamp(rgb[0]), clamp(rgb[1]), clamp(rgb[2]) def lerp(start, end, interpolant): return start + interpolant * (end - start) def lerp_location(xy1, xy2, interpolant): return lerp(xy1[0], xy2[0], interpolant), lerp(xy1[1], xy2[1], interpolant) def minimum_amount_of_steps(xy1, xy2): return max(abs(xy1[0] - xy2[0]), abs(xy1[1] - xy2[1])) def rgb_to_hex_color(rgb): return f'#{rgb[0]:0>2X}{rgb[1]:0>2X}{rgb[2]:0>2X}' def round_location(xy): return round(xy[0]), round(xy[1]) class Channel(IntEnum): FACES = 0 CLOUD = 1 VISUALIZER = 2 CUSTOM = 3 class ImageResampleMode(IntEnum): PIXEL_ART = Image.NEAREST class TextScrollDirection(IntEnum): LEFT = 0 RIGHT = 1 class Pixoo: __buffer = [] __buffers_send = 0 __counter = 0 __refresh_counter_limit = 32 def __init__(self, address, size=64, debug=False, refresh_connection_automatically=True): assert size in [16, 32, 64], \ 'Invalid screen size in pixels given. ' \ 'Valid options are 16, 32, and 64' self.refresh_connection_automatically = refresh_connection_automatically self.address = address self.debug = debug self.size = size # Total number of pixels self.pixel_count = self.size * self.size # Generate URL self.__url = 'http://{0}/post'.format(address) # Prefill the buffer self.fill() # Retrieve the counter self.__load_counter() # Resetting if needed if self.refresh_connection_automatically and self.__counter > self.__refresh_counter_limit: self.__reset_counter() def clear(self, rgb: object = Palette.BLACK) -> object: self.fill(rgb) def clear_rgb(self, r, g, b): self.fill_rgb(r, g, b) def draw_character_at_location_rgb(self, character, x=0, y=0, r=255, g=255, b=255): self.draw_character(character, (x, y), (r, g, b)) def draw_filled_rectangle(self, top_left_xy=(0, 0), bottom_right_xy=(1, 1), rgb=Palette.BLACK): for y in range(top_left_xy[1], bottom_right_xy[1] + 1): for x in range(top_left_xy[0], bottom_right_xy[0] + 1): self.draw_pixel((x, y), rgb) def draw_filled_rectangle_from_top_left_to_bottom_right_rgb(self, top_left_x=0, top_left_y=0, bottom_right_x=1, bottom_right_y=1, r=0, g=0, b=0): self.draw_filled_rectangle((top_left_x, top_left_y), (bottom_right_x, bottom_right_y), (r, g, b)) def draw_image(self, image_path_or_object, xy=(0, 0), image_resample_mode=ImageResampleMode.PIXEL_ART, pad_resample=False): image = image_path_or_object if isinstance(image_path_or_object, Image.Image) else Image.open( image_path_or_object) size = image.size width = size[0] height = size[1] # See if it needs to be scaled/resized to fit the display if width > self.size or height > self.size: if pad_resample: image = ImageOps.pad(image, (self.size, self.size), image_resample_mode) else: image.thumbnail((self.size, self.size), image_resample_mode) if self.debug: print( f'[.] Resized image to fit on screen (saving aspect ratio): "{image_path_or_object}" ({width}, {height}) ' f'-> ({image.size[0]}, {image.size[1]})') # Convert the loaded image to RGB rgb_image = image.convert('RGB') # Iterate over all pixels in the image that are left and buffer them for y in range(image.size[1]): for x in range(image.size[0]): location = (x, y) placed_x = x + xy[0] if self.size - 1 < placed_x or placed_x < 0: continue placed_y = y + xy[1] if self.size - 1 < placed_y or placed_y < 0: continue self.draw_pixel((placed_x, placed_y), rgb_image.getpixel(location)) def draw_image_at_location(self, image_path_or_object, x, y, image_resample_mode=ImageResampleMode.PIXEL_ART): self.draw_image(image_path_or_object, (x, y), image_resample_mode) def draw_line(self, start_xy, stop_xy, rgb=Palette.WHITE): line = set() # Calculate the amount of steps needed between the points to draw a nice line amount_of_steps = minimum_amount_of_steps(start_xy, stop_xy) # Iterate over them and create a nice set of pixels for step in range(amount_of_steps): if amount_of_steps == 0: interpolant = 0 else: interpolant = step / amount_of_steps # Add a pixel as a rounded location line.add( round_location(lerp_location(start_xy, stop_xy, interpolant))) # Draw the actual pixel line for pixel in line: self.draw_pixel(pixel, rgb) def draw_line_from_start_to_stop_rgb(self, start_x, start_y, stop_x, stop_y, r=255, g=255, b=255): self.draw_line((start_x, start_y), (stop_x, stop_y), (r, g, b)) def draw_pixel(self, xy, rgb): # If it's not on the screen, we're not going to bother if xy[0] < 0 or xy[0] >= self.size or xy[1] < 0 or xy[1] >= self.size: if self.debug: limit = self.size - 1 print( f'[!] Invalid coordinates given: ({xy[0]}, {xy[1]}) (maximum coordinates are ({limit}, {limit})') return # Calculate the index index = xy[0] + (xy[1] * self.size) # Color it self.draw_pixel_at_index(index, rgb) def draw_pixel_at_index(self, index, rgb): # Validate the index if index < 0 or index >= self.pixel_count: if self.debug: print(f'[!] Invalid index given: {index} (maximum index is {self.pixel_count - 1})') return # Clamp the color, just to be safe rgb = clamp_color(rgb) # Move to place in array index = index * 3 self.__buffer[index] = rgb[0] self.__buffer[index + 1] = rgb[1] self.__buffer[index + 2] = rgb[2] def draw_pixel_at_index_rgb(self, index, r, g, b): self.draw_pixel_at_index(index, (r, g, b)) def draw_pixel_at_location_rgb(self, x, y, r, g, b): self.draw_pixel((x, y), (r, g, b)) def draw_character(self, character, xy=(0, 0), rgb=Palette.WHITE, font=None): if font is None: font = FONT_PICO_8
matrix = retrieve_glyph(character, font)
1
2023-11-05 19:16:34+00:00
12k
jkulhanek/nerfbaselines
nerfbaselines/datasets/colmap.py
[ { "identifier": "Dataset", "path": "nerfbaselines/types.py", "snippet": "NB_PREFIX = os.path.expanduser(os.environ.get(\"NB_PREFIX\", \"~/.cache/nerfbaselines\"))\nclass Dataset:\nclass CurrentProgress:\n class RenderOutput(TypedDict):\nclass MethodInfo:\nclass Method(Protocol):\nclass RayMethod(Method):\n def __post_init__(self):\n def __len__(self):\n def __getitem__(self, i) -> \"Dataset\":\n def index(obj):\n def load_features(self, required_features, supported_camera_models=None):\n def expected_scene_scale(self):\ndef batched(array, batch_size):\n def install(cls):\n def get_info(self) -> MethodInfo:\n def render(self, cameras: Cameras, progress_callback: Optional[ProgressCallback] = None) -> Iterable[RenderOutput]: # [h w c]\n def setup_train(self, train_dataset: Dataset, *, num_iterations: int):\n def train_iteration(self, step: int):\n def save(self, path: Path):\n def __init__(self, batch_size, seed: int = 42, xnp=np):\n def render_rays(self, origins: np.ndarray, directions: np.ndarray, nears_fars: Optional[np.ndarray]) -> RenderOutput: # batch 3 # batch 3 # batch 3\n def train_iteration_rays(self, step: int, origins: np.ndarray, directions: np.ndarray, nears_fars: Optional[np.ndarray], colors: np.ndarray): # [batch 3] # [batch 3] # [batch 2] # [batch c]\n def setup_train(self, train_dataset: Dataset, *, num_iterations: int):\n def train_iteration(self, step: int):\n def render(self, cameras: Cameras, progress_callback: Optional[ProgressCallback] = None) -> Iterable[RenderOutput]:" }, { "identifier": "Indices", "path": "nerfbaselines/utils.py", "snippet": "class Indices:\n def __init__(self, steps):\n self._steps = steps\n self.total: Optional[int] = None\n\n def __contains__(self, x):\n if isinstance(self._steps, list):\n steps = self._steps\n if any(x < 0 for x in self._steps):\n assert self.total is not None, \"total must be specified for negative steps\"\n steps = set(x if x >= 0 else self.total + x for x in self._steps)\n return x in steps\n elif isinstance(self._steps, slice):\n start: int = self._steps.start or 0\n if start < 0:\n assert self.total is not None, \"total must be specified for negative start\"\n start = self.total - start\n stop: Optional[int] = self._steps.stop or self.total\n if stop is not None and stop < 0:\n assert self.total is not None, \"total must be specified for negative stop\"\n stop = self.total - stop\n step: int = self._steps.step or 1\n return x >= start and (stop is None or x < stop) and (x - start) % step == 0\n\n @classmethod\n def every_iters(cls, iters: int, zero: bool = False):\n start = iters if zero else 0\n return cls(slice(start, None, iters))\n\n def __repr__(self):\n if isinstance(self._steps, list):\n return \",\".join(map(str, self._steps))\n elif isinstance(self._steps, slice):\n out = f\"{self._steps.start or ''}:{self._steps.stop or ''}\"\n if self._steps.step is not None:\n out += f\":{self._steps.step}\"\n return out\n else:\n return repr(self._steps)\n\n def __str__(self):\n return repr(self)" }, { "identifier": "CameraModel", "path": "nerfbaselines/cameras.py", "snippet": "class CameraModel(Enum):\n PINHOLE = 0\n OPENCV = 1\n OPENCV_FISHEYE = 2\n FULL_OPENCV = 3" }, { "identifier": "Cameras", "path": "nerfbaselines/cameras.py", "snippet": "class Cameras:\n poses: np.ndarray # [N, (R, t)]\n normalized_intrinsics: np.ndarray # [N, (fx,fy,cx,cy)]\n\n # Distortions\n camera_types: np.ndarray # [N]\n distortion_parameters: np.ndarray # [N, num_params]\n\n image_sizes: Optional[np.ndarray] # [N, 2]\n nears_fars: Optional[np.ndarray] # [N, 2]\n\n @cached_property\n def intrinsics(self):\n assert self.image_sizes is not None\n assert self.normalized_intrinsics.shape[:-1] == self.image_sizes.shape[:-1], \"normalized_intrinsics and image_sizes must be broadcastable\"\n return self.normalized_intrinsics * self.image_sizes[..., :1]\n\n def __len__(self):\n if len(self.poses.shape) == 2:\n return 1\n return len(self.poses)\n\n def item(self):\n assert len(self) == 1, \"Cameras must have exactly one element to be converted to a single camera\"\n if len(self.poses.shape) == 2:\n return self\n return self[0]\n\n def __getitem__(self, index):\n return type(self)(\n poses=self.poses[index],\n normalized_intrinsics=self.normalized_intrinsics[index],\n camera_types=self.camera_types[index],\n distortion_parameters=self.distortion_parameters[index],\n image_sizes=self.image_sizes[index] if self.image_sizes is not None else None,\n nears_fars=self.nears_fars[index] if self.nears_fars is not None else None,\n )\n\n def __setitem__(self, index, value):\n assert (self.image_sizes is None) == (value.image_sizes is None), \"Either both or none of the cameras must have image sizes\"\n assert (self.nears_fars is None) == (value.nears_fars is None), \"Either both or none of the cameras must have nears and fars\"\n self.poses[index] = value.poses\n self.normalized_intrinsics[index] = value.normalized_intrinsics\n self.camera_types[index] = value.camera_types\n self.distortion_parameters[index] = value.distortion_parameters\n if self.image_sizes is not None:\n self.image_sizes[index] = value.image_sizes\n if self.nears_fars is not None:\n self.nears_fars[index] = value.nears_fars\n\n def __iter__(self):\n for i in range(len(self)):\n yield self[i]\n\n def get_rays(self, xy: np.ndarray, xnp=np) -> Tuple[np.ndarray, np.ndarray]:\n assert xy.shape[-1] == 2\n assert xy.shape[0] == len(self)\n assert xy.dtype.kind in {\"i\", \"u\"}, \"xy must be integer\"\n\n xy = xy.astype(xnp.float32) + 0.5\n return self.unproject(xy, xnp=xnp)\n\n def unproject(self, xy: np.ndarray, xnp=np) -> Tuple[np.ndarray, np.ndarray]:\n assert xy.shape[-1] == 2\n assert is_broadcastable(xy.shape[:-1], self.poses.shape[:-2]), \"xy must be broadcastable with poses, shapes: {}, {}\".format(xy.shape[:-1], self.poses.shape[:-2])\n assert xy.dtype.kind == \"f\"\n fx, fy, cx, cy = xnp.moveaxis(self.intrinsics, -1, 0)\n x = xy[..., 0]\n y = xy[..., 1]\n u = (x - cx) / fx\n v = (y - cy) / fy\n\n uv = xnp.stack((u, v), -1)\n uv = _undistort(self.camera_types, self.distortion_parameters, uv, xnp=xnp)\n directions = xnp.concatenate((uv, xnp.ones_like(uv[..., :1])), -1)\n\n # Switch from OpenCV to OpenGL coordinate system\n directions[..., 1:] *= -1\n\n rotation = self.poses[..., :3, :3] # (..., 3, 3)\n directions = (directions[..., None, :] * rotation).sum(-1)\n origins = xnp.broadcast_to(self.poses[..., :3, 3], directions.shape)\n return origins, directions\n\n def project(self, xyz: np.ndarray, xnp=np) -> np.ndarray:\n eps = xnp.finfo(xyz.dtype).eps\n assert xyz.shape[-1] == 3\n assert is_broadcastable(xyz.shape[:-1], self.poses.shape[:-2]), \"xyz must be broadcastable with poses, shapes: {}, {}\".format(xyz.shape[:-1], self.poses.shape[:-2])\n\n # World -> Camera\n origins = self.poses[..., :3, 3]\n rotation = self.poses[..., :3, :3]\n # Rotation and translation\n uvw = xyz - origins\n uvw = (rotation * uvw[..., :, None]).sum(-2)\n # Switch from OpenGL to OpenCV coordinate system\n uvw[..., 1:] *= -1\n\n # Camera -> Camera distorted\n uv = xnp.divide(uvw[..., :2], uvw[..., 2:], out=xnp.zeros_like(uvw[..., :2]), where=xnp.abs(uvw[..., 2:]) > eps)\n\n uv = _distort(self.camera_types, self.distortion_parameters, uv, xnp=xnp)\n x, y = xnp.moveaxis(uv, -1, 0)\n\n # Transform to image coordinates\n # Camera distorted -> Image\n fx, fy, cx, cy = xnp.moveaxis(self.intrinsics, -1, 0)\n x = fx * x + cx\n y = fy * y + cy\n return xnp.stack((x, y), -1)\n\n @classmethod\n def cat(cls, values: Sequence[\"Cameras\"]) -> \"Cameras\":\n return cls(\n poses=np.concatenate([v.poses for v in values]),\n normalized_intrinsics=np.concatenate([v.normalized_intrinsics for v in values]),\n camera_types=np.concatenate([v.camera_types for v in values]),\n distortion_parameters=np.concatenate([v.distortion_parameters for v in values]),\n image_sizes=np.concatenate([v.image_sizes for v in values]) if any(v.image_sizes is not None for v in values) else None,\n nears_fars=np.concatenate([v.nears_fars for v in values]) if any(v.nears_fars is not None for v in values) else None,\n )\n\n def with_image_sizes(self, image_sizes: np.ndarray) -> \"Cameras\":\n return dataclasses.replace(self, image_sizes=image_sizes)" }, { "identifier": "read_cameras_binary", "path": "nerfbaselines/datasets/_colmap_utils.py", "snippet": "def read_cameras_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::WriteCamerasBinary(const std::string& path)\n void Reconstruction::ReadCamerasBinary(const std::string& path)\n \"\"\"\n cameras = {}\n with open(path_to_model_file, \"rb\") as fid:\n num_cameras = read_next_bytes(fid, 8, \"Q\")[0]\n for _ in range(num_cameras):\n camera_properties = read_next_bytes(fid, num_bytes=24, format_char_sequence=\"iiQQ\")\n camera_id = camera_properties[0]\n model_id = camera_properties[1]\n model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name\n width = camera_properties[2]\n height = camera_properties[3]\n num_params = CAMERA_MODEL_IDS[model_id].num_params\n params = read_next_bytes(fid, num_bytes=8 * num_params, format_char_sequence=\"d\" * num_params)\n cameras[camera_id] = Camera(\n id=camera_id,\n model=model_name,\n width=width,\n height=height,\n params=np.array(params),\n )\n assert len(cameras) == num_cameras\n return cameras" }, { "identifier": "read_images_binary", "path": "nerfbaselines/datasets/_colmap_utils.py", "snippet": "def read_images_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadImagesBinary(const std::string& path)\n void Reconstruction::WriteImagesBinary(const std::string& path)\n \"\"\"\n images = {}\n with open(path_to_model_file, \"rb\") as fid:\n num_reg_images = read_next_bytes(fid, 8, \"Q\")[0]\n for _ in range(num_reg_images):\n binary_image_properties = read_next_bytes(fid, num_bytes=64, format_char_sequence=\"idddddddi\")\n image_id = binary_image_properties[0]\n qvec = np.array(binary_image_properties[1:5])\n tvec = np.array(binary_image_properties[5:8])\n camera_id = binary_image_properties[8]\n image_name = \"\"\n current_char = read_next_bytes(fid, 1, \"c\")[0]\n while current_char != b\"\\x00\": # look for the ASCII 0 entry\n image_name += current_char.decode(\"utf-8\")\n current_char = read_next_bytes(fid, 1, \"c\")[0]\n num_points2D = read_next_bytes(fid, num_bytes=8, format_char_sequence=\"Q\")[0]\n x_y_id_s = read_next_bytes(\n fid,\n num_bytes=24 * num_points2D,\n format_char_sequence=\"ddq\" * num_points2D,\n )\n xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])), tuple(map(float, x_y_id_s[1::3]))])\n point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))\n images[image_id] = Image(\n id=image_id,\n qvec=qvec,\n tvec=tvec,\n camera_id=camera_id,\n name=image_name,\n xys=xys,\n point3D_ids=point3D_ids,\n )\n return images" }, { "identifier": "read_points3D_binary", "path": "nerfbaselines/datasets/_colmap_utils.py", "snippet": "def read_points3D_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadPoints3DBinary(const std::string& path)\n void Reconstruction::WritePoints3DBinary(const std::string& path)\n \"\"\"\n points3D = {}\n with open(path_to_model_file, \"rb\") as fid:\n num_points = read_next_bytes(fid, 8, \"Q\")[0]\n for _ in range(num_points):\n binary_point_line_properties = read_next_bytes(fid, num_bytes=43, format_char_sequence=\"QdddBBBd\")\n point3D_id = binary_point_line_properties[0]\n xyz = np.array(binary_point_line_properties[1:4])\n rgb = np.array(binary_point_line_properties[4:7])\n error = np.array(binary_point_line_properties[7])\n track_length = read_next_bytes(fid, num_bytes=8, format_char_sequence=\"Q\")[0]\n track_elems = read_next_bytes(\n fid,\n num_bytes=8 * track_length,\n format_char_sequence=\"ii\" * track_length,\n )\n image_ids = np.array(tuple(map(int, track_elems[0::2])))\n point2D_idxs = np.array(tuple(map(int, track_elems[1::2])))\n points3D[point3D_id] = Point3D(\n id=point3D_id,\n xyz=xyz,\n rgb=rgb,\n error=error,\n image_ids=image_ids,\n point2D_idxs=point2D_idxs,\n )\n return points3D" }, { "identifier": "qvec2rotmat", "path": "nerfbaselines/datasets/_colmap_utils.py", "snippet": "def qvec2rotmat(self):\n return qvec2rotmat(self.qvec)" }, { "identifier": "read_cameras_text", "path": "nerfbaselines/datasets/_colmap_utils.py", "snippet": "class Image(BaseImage):\n def qvec2rotmat(self):\ndef read_next_bytes(fid, num_bytes, format_char_sequence, endian_character=\"<\"):\ndef write_next_bytes(fid, data, format_char_sequence, endian_character=\"<\"):\ndef read_cameras_text(path):\ndef read_cameras_binary(path_to_model_file):\ndef write_cameras_text(cameras, path):\ndef write_cameras_binary(cameras, path_to_model_file):\ndef read_images_text(path):\ndef read_images_binary(path_to_model_file):\ndef write_images_text(images, path):\ndef write_images_binary(images, path_to_model_file):\ndef read_points3D_text(path):\ndef read_points3D_binary(path_to_model_file):\ndef write_points3D_text(points3D, path):\ndef write_points3D_binary(points3D, path_to_model_file):\ndef detect_model_format(path, ext):\ndef read_model(path, ext=\"\"):\ndef write_model(cameras, images, points3D, path, ext=\".bin\"):\ndef qvec2rotmat(qvec):\ndef rotmat2qvec(R):\nCAMERA_MODELS = {\n CameraModel(model_id=0, model_name=\"SIMPLE_PINHOLE\", num_params=3),\n CameraModel(model_id=1, model_name=\"PINHOLE\", num_params=4),\n CameraModel(model_id=2, model_name=\"SIMPLE_RADIAL\", num_params=4),\n CameraModel(model_id=3, model_name=\"RADIAL\", num_params=5),\n CameraModel(model_id=4, model_name=\"OPENCV\", num_params=8),\n CameraModel(model_id=5, model_name=\"OPENCV_FISHEYE\", num_params=8),\n CameraModel(model_id=6, model_name=\"FULL_OPENCV\", num_params=12),\n CameraModel(model_id=7, model_name=\"FOV\", num_params=5),\n CameraModel(model_id=8, model_name=\"SIMPLE_RADIAL_FISHEYE\", num_params=4),\n CameraModel(model_id=9, model_name=\"RADIAL_FISHEYE\", num_params=5),\n CameraModel(model_id=10, model_name=\"THIN_PRISM_FISHEYE\", num_params=12),\n}\nCAMERA_MODEL_IDS = dict([(camera_model.model_id, camera_model) for camera_model in CAMERA_MODELS])\nCAMERA_MODEL_NAMES = dict([(camera_model.model_name, camera_model) for camera_model in CAMERA_MODELS])\n HEADER = \"# Camera list with one line of data per camera:\\n\" + \"# CAMERA_ID, MODEL, WIDTH, HEIGHT, PARAMS[]\\n\" + \"# Number of cameras: {}\\n\".format(len(cameras))\n HEADER = (\n \"# Image list with two lines of data per image:\\n\"\n + \"# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\\n\"\n + \"# POINTS2D[] as (X, Y, POINT3D_ID)\\n\"\n + \"# Number of images: {}, mean observations per image: {}\\n\".format(len(images), mean_observations)\n )\n HEADER = (\n \"# 3D point list with one line of data per point:\\n\"\n + \"# POINT3D_ID, X, Y, Z, R, G, B, ERROR, TRACK[] as (IMAGE_ID, POINT2D_IDX)\\n\"\n + \"# Number of points: {}, mean track length: {}\\n\".format(len(points3D), mean_track_length)\n )\n K = (\n np.array(\n [\n [Rxx - Ryy - Rzz, 0, 0, 0],\n [Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0],\n [Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0],\n [Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz],\n ]\n )\n / 3.0\n )" }, { "identifier": "DatasetNotFoundError", "path": "nerfbaselines/datasets/_common.py", "snippet": "def single(xs):\ndef _dataset_undistort_unsupported(dataset: Dataset, supported_camera_models):\ndef dataset_load_features(dataset: Dataset, required_features, supported_camera_models=None):\n def __init__(self, errors, message):\n def write_to_logger(self, color=True, terminal_width=None):\nclass DatasetNotFoundError(Exception):\nclass MultiDatasetError(DatasetNotFoundError):" } ]
import typing import logging import numpy as np from collections import OrderedDict from pathlib import Path from typing import Tuple, Optional, Dict from ..types import Dataset, DatasetFeature, FrozenSet from ..utils import Indices from ..cameras import CameraModel, Cameras from ._colmap_utils import read_cameras_binary, read_images_binary, read_points3D_binary, qvec2rotmat from ._colmap_utils import read_cameras_text, read_images_text, read_points3D_text, Image, Camera, Point3D from ._common import DatasetNotFoundError, padded_stack
7,315
# du = u * thetad / r - u; # dv = v * thetad / r - v; # else: # du = dv = 0 fl_x = float(camera_params[0]) fl_y = float(camera_params[1]) cx = float(camera_params[2]) cy = float(camera_params[3]) out["k1"] = float(camera_params[4]) out["k2"] = float(camera_params[5]) out["k3"] = float(camera_params[6]) out["k4"] = float(camera_params[7]) camera_model = CameraModel.OPENCV_FISHEYE elif camera.model == "FULL_OPENCV": # fx, fy, cx, cy, k1, k2, p1, p2, k3, k4, k5, k6 # u2 = u ** 2 # uv = u * v # v2 = v ** 2 # r2 = u2 + v2 # r4 = r2 * r2 # r6 = r4 * r2 # radial = (1 + k1 * r2 + k2 * r4 + k3 * r6) / # (1 + k4 * r2 + k5 * r4 + k6 * r6) # du = u * radial + 2 * p1 * uv + p2 * (r2 + 2 * u2) - u # dv = v * radial + 2 * p2 * uv + p1 * (r2 + 2 * v2) - v fl_x = float(camera_params[0]) fl_y = float(camera_params[1]) cx = float(camera_params[2]) cy = float(camera_params[3]) out["k1"] = float(camera_params[4]) out["k2"] = float(camera_params[5]) out["p1"] = float(camera_params[6]) out["p2"] = float(camera_params[7]) out["k3"] = float(camera_params[8]) out["k4"] = float(camera_params[9]) out["k5"] = float(camera_params[10]) out["k6"] = float(camera_params[11]) raise NotImplementedError(f"{camera.model} camera model is not supported yet!") elif camera.model == "FOV": # fx, fy, cx, cy, omega fl_x = float(camera_params[0]) fl_y = float(camera_params[1]) cx = float(camera_params[2]) cy = float(camera_params[3]) out["omega"] = float(camera_params[4]) raise NotImplementedError(f"{camera.model} camera model is not supported yet!") elif camera.model == "SIMPLE_RADIAL_FISHEYE": # f, cx, cy, k # r = sqrt(u ** 2 + v ** 2) # if r > eps: # theta = atan(r) # theta2 = theta ** 2 # thetad = theta * (1 + k * theta2) # du = u * thetad / r - u; # dv = v * thetad / r - v; # else: # du = dv = 0 fl_x = float(camera_params[0]) fl_y = float(camera_params[0]) cx = float(camera_params[1]) cy = float(camera_params[2]) out["k1"] = float(camera_params[3]) camera_model = CameraModel.OPENCV_FISHEYE elif camera.model == "RADIAL_FISHEYE": # f, cx, cy, k1, k2 # r = sqrt(u ** 2 + v ** 2) # if r > eps: # theta = atan(r) # theta2 = theta ** 2 # theta4 = theta2 ** 2 # thetad = theta * (1 + k * theta2) # thetad = theta * (1 + k1 * theta2 + k2 * theta4) # du = u * thetad / r - u; # dv = v * thetad / r - v; # else: # du = dv = 0 fl_x = float(camera_params[0]) fl_y = float(camera_params[0]) cx = float(camera_params[1]) cy = float(camera_params[2]) out["k1"] = float(camera_params[3]) out["k2"] = float(camera_params[4]) out["k3"] = 0.0 out["k4"] = 0.0 camera_model = CameraModel.OPENCV_FISHEYE else: # THIN_PRISM_FISHEYE not supported! raise NotImplementedError(f"{camera.model} camera model is not supported yet!") image_width: int = camera.width image_height: int = camera.height intrinsics = np.array([fl_x, fl_y, cx, cy], dtype=np.float32) / float(image_width) distortion_params = np.array([out.get(k, 0.0) for k in ("k1", "k2", "p1", "p2", "k3", "k4")], dtype=np.float32) return intrinsics, camera_model.value, distortion_params, (image_width, image_height) def load_colmap_dataset(path: Path, images_path: Optional[Path] = None, split: Optional[str] = None, test_indices: Optional[Indices] = None, features: Optional[FrozenSet[DatasetFeature]] = None): if features is None: features = typing.cast(FrozenSet[DatasetFeature], {}) load_points = "points3D_xyz" in features or "points3D_rgb" in features if split: assert split in {"train", "test"} # Load COLMAP dataset colmap_path = path / "sparse" / "0" if images_path is None: images_path = Path("images") images_path = path / images_path if not colmap_path.exists(): raise DatasetNotFoundError("Missing 'sparse/0' folder in COLMAP dataset") if not (colmap_path / "cameras.bin").exists() and not (colmap_path / "cameras.txt").exists(): raise DatasetNotFoundError("Missing 'sparse/0/cameras.{bin,txt}' file in COLMAP dataset") if not images_path.exists(): raise DatasetNotFoundError("Missing 'images' folder in COLMAP dataset") if (colmap_path / "cameras.bin").exists(): cameras = read_cameras_binary(colmap_path / "cameras.bin") elif (colmap_path / "cameras.txt").exists():
def _parse_colmap_camera_params(camera: Camera) -> Tuple[np.ndarray, int, np.ndarray, Tuple[int, int]]: """ Parses all currently supported COLMAP cameras into the transforms.json metadata Args: camera: COLMAP camera Returns: transforms.json metadata containing camera's intrinsics and distortion parameters """ # Parameters match https://github.com/colmap/colmap/blob/dev/src/base/camera_models.h out = OrderedDict() # Default in Python 3.7+ camera_params = camera.params camera_model: CameraModel if camera.model == "SIMPLE_PINHOLE": # du = 0 # dv = 0 fl_x = float(camera_params[0]) fl_y = float(camera_params[0]) cx = float(camera_params[1]) cy = float(camera_params[2]) camera_model = CameraModel.PINHOLE elif camera.model == "PINHOLE": # f, cx, cy, k # du = 0 # dv = 0 fl_x = float(camera_params[0]) fl_y = float(camera_params[1]) cx = float(camera_params[2]) cy = float(camera_params[3]) camera_model = CameraModel.PINHOLE elif camera.model == "SIMPLE_RADIAL": # f, cx, cy, k # r2 = u**2 + v**2; # radial = k * r2 # du = u * radial # dv = u * radial fl_x = float(camera_params[0]) fl_y = float(camera_params[0]) cx = float(camera_params[1]) cy = float(camera_params[2]) out["k1"] = float(camera_params[3]) camera_model = CameraModel.OPENCV elif camera.model == "RADIAL": # f, cx, cy, k1, k2 # r2 = u**2 + v**2; # radial = k1 * r2 + k2 * r2 ** 2 # du = u * radial # dv = v * radial fl_x = float(camera_params[0]) fl_y = float(camera_params[0]) cx = float(camera_params[1]) cy = float(camera_params[2]) out["k1"] = float(camera_params[3]) out["k2"] = float(camera_params[4]) camera_model = CameraModel.OPENCV elif camera.model == "OPENCV": # fx, fy, cx, cy, k1, k2, p1, p2 # uv = u * v; # r2 = u**2 + v**2 # radial = k1 * r2 + k2 * r2 ** 2 # du = u * radial + 2 * p1 * u*v + p2 * (r2 + 2 * u**2) # dv = v * radial + 2 * p2 * u*v + p1 * (r2 + 2 * v**2) fl_x = float(camera_params[0]) fl_y = float(camera_params[1]) cx = float(camera_params[2]) cy = float(camera_params[3]) out["k1"] = float(camera_params[4]) out["k2"] = float(camera_params[5]) out["p1"] = float(camera_params[6]) out["p2"] = float(camera_params[7]) camera_model = CameraModel.OPENCV elif camera.model == "OPENCV_FISHEYE": # fx, fy, cx, cy, k1, k2, k3, k4 # r = sqrt(u**2 + v**2) # if r > eps: # theta = atan(r) # theta2 = theta ** 2 # theta4 = theta2 ** 2 # theta6 = theta4 * theta2 # theta8 = theta4 ** 2 # thetad = theta * (1 + k1 * theta2 + k2 * theta4 + k3 * theta6 + k4 * theta8) # du = u * thetad / r - u; # dv = v * thetad / r - v; # else: # du = dv = 0 fl_x = float(camera_params[0]) fl_y = float(camera_params[1]) cx = float(camera_params[2]) cy = float(camera_params[3]) out["k1"] = float(camera_params[4]) out["k2"] = float(camera_params[5]) out["k3"] = float(camera_params[6]) out["k4"] = float(camera_params[7]) camera_model = CameraModel.OPENCV_FISHEYE elif camera.model == "FULL_OPENCV": # fx, fy, cx, cy, k1, k2, p1, p2, k3, k4, k5, k6 # u2 = u ** 2 # uv = u * v # v2 = v ** 2 # r2 = u2 + v2 # r4 = r2 * r2 # r6 = r4 * r2 # radial = (1 + k1 * r2 + k2 * r4 + k3 * r6) / # (1 + k4 * r2 + k5 * r4 + k6 * r6) # du = u * radial + 2 * p1 * uv + p2 * (r2 + 2 * u2) - u # dv = v * radial + 2 * p2 * uv + p1 * (r2 + 2 * v2) - v fl_x = float(camera_params[0]) fl_y = float(camera_params[1]) cx = float(camera_params[2]) cy = float(camera_params[3]) out["k1"] = float(camera_params[4]) out["k2"] = float(camera_params[5]) out["p1"] = float(camera_params[6]) out["p2"] = float(camera_params[7]) out["k3"] = float(camera_params[8]) out["k4"] = float(camera_params[9]) out["k5"] = float(camera_params[10]) out["k6"] = float(camera_params[11]) raise NotImplementedError(f"{camera.model} camera model is not supported yet!") elif camera.model == "FOV": # fx, fy, cx, cy, omega fl_x = float(camera_params[0]) fl_y = float(camera_params[1]) cx = float(camera_params[2]) cy = float(camera_params[3]) out["omega"] = float(camera_params[4]) raise NotImplementedError(f"{camera.model} camera model is not supported yet!") elif camera.model == "SIMPLE_RADIAL_FISHEYE": # f, cx, cy, k # r = sqrt(u ** 2 + v ** 2) # if r > eps: # theta = atan(r) # theta2 = theta ** 2 # thetad = theta * (1 + k * theta2) # du = u * thetad / r - u; # dv = v * thetad / r - v; # else: # du = dv = 0 fl_x = float(camera_params[0]) fl_y = float(camera_params[0]) cx = float(camera_params[1]) cy = float(camera_params[2]) out["k1"] = float(camera_params[3]) camera_model = CameraModel.OPENCV_FISHEYE elif camera.model == "RADIAL_FISHEYE": # f, cx, cy, k1, k2 # r = sqrt(u ** 2 + v ** 2) # if r > eps: # theta = atan(r) # theta2 = theta ** 2 # theta4 = theta2 ** 2 # thetad = theta * (1 + k * theta2) # thetad = theta * (1 + k1 * theta2 + k2 * theta4) # du = u * thetad / r - u; # dv = v * thetad / r - v; # else: # du = dv = 0 fl_x = float(camera_params[0]) fl_y = float(camera_params[0]) cx = float(camera_params[1]) cy = float(camera_params[2]) out["k1"] = float(camera_params[3]) out["k2"] = float(camera_params[4]) out["k3"] = 0.0 out["k4"] = 0.0 camera_model = CameraModel.OPENCV_FISHEYE else: # THIN_PRISM_FISHEYE not supported! raise NotImplementedError(f"{camera.model} camera model is not supported yet!") image_width: int = camera.width image_height: int = camera.height intrinsics = np.array([fl_x, fl_y, cx, cy], dtype=np.float32) / float(image_width) distortion_params = np.array([out.get(k, 0.0) for k in ("k1", "k2", "p1", "p2", "k3", "k4")], dtype=np.float32) return intrinsics, camera_model.value, distortion_params, (image_width, image_height) def load_colmap_dataset(path: Path, images_path: Optional[Path] = None, split: Optional[str] = None, test_indices: Optional[Indices] = None, features: Optional[FrozenSet[DatasetFeature]] = None): if features is None: features = typing.cast(FrozenSet[DatasetFeature], {}) load_points = "points3D_xyz" in features or "points3D_rgb" in features if split: assert split in {"train", "test"} # Load COLMAP dataset colmap_path = path / "sparse" / "0" if images_path is None: images_path = Path("images") images_path = path / images_path if not colmap_path.exists(): raise DatasetNotFoundError("Missing 'sparse/0' folder in COLMAP dataset") if not (colmap_path / "cameras.bin").exists() and not (colmap_path / "cameras.txt").exists(): raise DatasetNotFoundError("Missing 'sparse/0/cameras.{bin,txt}' file in COLMAP dataset") if not images_path.exists(): raise DatasetNotFoundError("Missing 'images' folder in COLMAP dataset") if (colmap_path / "cameras.bin").exists(): cameras = read_cameras_binary(colmap_path / "cameras.bin") elif (colmap_path / "cameras.txt").exists():
cameras = read_cameras_text(colmap_path / "cameras.txt")
8
2023-11-07 20:22:35+00:00
12k