code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def adversarial_group_calibration( y_pred: np.ndarray, y_std: np.ndarray, y_true: np.ndarray, cali_type: str, prop_type: str = "interval", num_bins: int = 100, num_group_bins: int = 10, draw_with_replacement: bool = False, num_trials: int = 10, num_group_draws: int = 10, verbose: bool = False, ) -> Namespace: """Adversarial group calibration. Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of the predicted standard deviations for the held out dataset. y_true: 1D array of the true labels in the held out dataset. cali_type: type of calibration error to measure; one of ["mean_abs", "root_mean_sq"]. prop_type: "interval" to measure observed proportions for centered prediction intervals, and "quantile" for observed proportions below a predicted quantile. num_bins: number of discretizations for the probability space [0, 1]. num_group_bins: number of discretizations for group size proportions between 0 and 1. draw_with_replacement: True to draw subgroups that draw from the dataset with replacement. num_trials: number of trials to estimate the worst calibration error per group size. num_group_draws: number of subgroups to draw per given group size to measure calibration error on. verbose: True to print progress statements. Returns: A Namespace with an array of the group sizes, the mean of the worst calibration errors for each group size, and the standard error of the worst calibration error for each group size """ # Check that input arrays are flat assert_is_flat_same_shape(y_pred, y_std, y_true) # Check that input std is positive assert_is_positive(y_std) # Check that prop_type is one of 'interval' or 'quantile' assert prop_type in ["interval", "quantile"] num_pts = y_true.shape[0] if cali_type == "mean_abs": cali_fn = mean_absolute_calibration_error elif cali_type == "root_mean_sq": cali_fn = root_mean_squared_calibration_error num_pts = y_std.shape[0] ratio_arr = np.linspace(0, 1, num_group_bins) score_mean_per_ratio = [] score_stderr_per_ratio = [] if verbose: print( ( "Measuring adversarial group calibration by spanning group" " size between {} and {}, in {} intervals" ).format(np.min(ratio_arr), np.max(ratio_arr), num_group_bins) ) progress = tqdm(ratio_arr) if verbose else ratio_arr for r in progress: group_size = max([int(round(num_pts * r)), 2]) score_per_trial = [] # list of worst miscalibrations encountered for _ in range(num_trials): group_miscal_scores = [] for g_idx in range(num_group_draws): rand_idx = np.random.choice( num_pts, group_size, replace=draw_with_replacement ) group_y_pred = y_pred[rand_idx] group_y_true = y_true[rand_idx] group_y_std = y_std[rand_idx] group_miscal = cali_fn( group_y_pred, group_y_std, group_y_true, num_bins=num_bins, vectorized=True, prop_type=prop_type, ) group_miscal_scores.append(group_miscal) max_miscal_score = np.max(group_miscal_scores) score_per_trial.append(max_miscal_score) score_mean_across_trials = np.mean(score_per_trial) score_stderr_across_trials = np.std(score_per_trial, ddof=1) score_mean_per_ratio.append(score_mean_across_trials) score_stderr_per_ratio.append(score_stderr_across_trials) out = Namespace( group_size=ratio_arr, score_mean=np.array(score_mean_per_ratio), score_stderr=np.array(score_stderr_per_ratio), ) return out
Adversarial group calibration. Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of the predicted standard deviations for the held out dataset. y_true: 1D array of the true labels in the held out dataset. cali_type: type of calibration error to measure; one of ["mean_abs", "root_mean_sq"]. prop_type: "interval" to measure observed proportions for centered prediction intervals, and "quantile" for observed proportions below a predicted quantile. num_bins: number of discretizations for the probability space [0, 1]. num_group_bins: number of discretizations for group size proportions between 0 and 1. draw_with_replacement: True to draw subgroups that draw from the dataset with replacement. num_trials: number of trials to estimate the worst calibration error per group size. num_group_draws: number of subgroups to draw per given group size to measure calibration error on. verbose: True to print progress statements. Returns: A Namespace with an array of the group sizes, the mean of the worst calibration errors for each group size, and the standard error of the worst calibration error for each group size
adversarial_group_calibration
python
uncertainty-toolbox/uncertainty-toolbox
uncertainty_toolbox/metrics_calibration.py
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics_calibration.py
MIT
def miscalibration_area_from_proportions( exp_proportions: np.ndarray, obs_proportions: np.ndarray ) -> float: """Miscalibration area from expected and observed proportions lists. This function returns the same output as `miscalibration_area` directly from a list of expected proportions (the proportion of data that you expect to observe within prediction intervals) and a list of observed proportions (the proportion data that you observe within prediction intervals). Args: exp_proportions: expected proportion of data within prediction intervals. obs_proportions: observed proportion of data within prediction intervals. Returns: A single scalar that contains the miscalibration area. """ areas = trapezoid_area( exp_proportions[:-1], exp_proportions[:-1], obs_proportions[:-1], exp_proportions[1:], exp_proportions[1:], obs_proportions[1:], absolute=True, ) return areas.sum()
Miscalibration area from expected and observed proportions lists. This function returns the same output as `miscalibration_area` directly from a list of expected proportions (the proportion of data that you expect to observe within prediction intervals) and a list of observed proportions (the proportion data that you observe within prediction intervals). Args: exp_proportions: expected proportion of data within prediction intervals. obs_proportions: observed proportion of data within prediction intervals. Returns: A single scalar that contains the miscalibration area.
miscalibration_area_from_proportions
python
uncertainty-toolbox/uncertainty-toolbox
uncertainty_toolbox/metrics_calibration.py
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics_calibration.py
MIT
def get_proportion_lists_vectorized( y_pred: np.ndarray, y_std: np.ndarray, y_true: np.ndarray, num_bins: int = 100, recal_model: Any = None, prop_type: str = "interval", ) -> Tuple[np.ndarray, np.ndarray]: """Arrays of expected and observed proportions Returns the expected proportions and observed proportion of points falling into intervals corresponding to a range of quantiles. Computations here are vectorized for faster execution, but this function is not suited when there are memory constraints. Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of the predicted standard deviations for the held out dataset. y_true: 1D array of the true labels in the held out dataset. num_bins: number of discretizations for the probability space [0, 1]. recal_model: an sklearn isotonic regression model which recalibrates the predictions. prop_type: "interval" to measure observed proportions for centered prediction intervals, and "quantile" for observed proportions below a predicted quantile. Returns: A tuple of two numpy arrays, expected proportions and observed proportions """ # Check that input arrays are flat assert_is_flat_same_shape(y_pred, y_std, y_true) # Check that input std is positive assert_is_positive(y_std) # Check that prop_type is one of 'interval' or 'quantile' assert prop_type in ["interval", "quantile"] # Compute proportions exp_proportions = np.linspace(0, 1, num_bins) # If we are recalibrating, input proportions are recalibrated proportions if recal_model is not None: in_exp_proportions = recal_model.predict(exp_proportions) else: in_exp_proportions = exp_proportions residuals = y_pred - y_true normalized_residuals = (residuals.flatten() / y_std.flatten()).reshape(-1, 1) norm = stats.norm(loc=0, scale=1) if prop_type == "interval": gaussian_lower_bound = norm.ppf(0.5 - in_exp_proportions / 2.0) gaussian_upper_bound = norm.ppf(0.5 + in_exp_proportions / 2.0) above_lower = normalized_residuals >= gaussian_lower_bound below_upper = normalized_residuals <= gaussian_upper_bound within_quantile = above_lower * below_upper obs_proportions = np.sum(within_quantile, axis=0).flatten() / len(residuals) elif prop_type == "quantile": gaussian_quantile_bound = norm.ppf(in_exp_proportions) below_quantile = normalized_residuals <= gaussian_quantile_bound obs_proportions = np.sum(below_quantile, axis=0).flatten() / len(residuals) return exp_proportions, obs_proportions
Arrays of expected and observed proportions Returns the expected proportions and observed proportion of points falling into intervals corresponding to a range of quantiles. Computations here are vectorized for faster execution, but this function is not suited when there are memory constraints. Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of the predicted standard deviations for the held out dataset. y_true: 1D array of the true labels in the held out dataset. num_bins: number of discretizations for the probability space [0, 1]. recal_model: an sklearn isotonic regression model which recalibrates the predictions. prop_type: "interval" to measure observed proportions for centered prediction intervals, and "quantile" for observed proportions below a predicted quantile. Returns: A tuple of two numpy arrays, expected proportions and observed proportions
get_proportion_lists_vectorized
python
uncertainty-toolbox/uncertainty-toolbox
uncertainty_toolbox/metrics_calibration.py
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics_calibration.py
MIT
def get_proportion_lists( y_pred: np.ndarray, y_std: np.ndarray, y_true: np.ndarray, num_bins: int = 100, recal_model: IsotonicRegression = None, prop_type: str = "interval", ) -> Tuple[np.ndarray, np.ndarray]: """Arrays of expected and observed proportions Return arrays of expected and observed proportions of points falling into intervals corresponding to a range of quantiles. Computations here are not vectorized, in case there are memory constraints. Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of the predicted standard deviations for the held out dataset. y_true: 1D array of the true labels in the held out dataset. num_bins: number of discretizations for the probability space [0, 1]. recal_model: an sklearn isotonic regression model which recalibrates the predictions. prop_type: "interval" to measure observed proportions for centered prediction intervals, and "quantile" for observed proportions below a predicted quantile. Returns: A tuple of two numpy arrays, expected proportions and observed proportions """ # Check that input arrays are flat assert_is_flat_same_shape(y_pred, y_std, y_true) # Check that input std is positive assert_is_positive(y_std) # Check that prop_type is one of 'interval' or 'quantile' assert prop_type in ["interval", "quantile"] # Compute proportions exp_proportions = np.linspace(0, 1, num_bins) # If we are recalibrating, input proportions are recalibrated proportions if recal_model is not None: in_exp_proportions = recal_model.predict(exp_proportions) else: in_exp_proportions = exp_proportions if prop_type == "interval": obs_proportions = [ get_proportion_in_interval(y_pred, y_std, y_true, quantile) for quantile in in_exp_proportions ] elif prop_type == "quantile": obs_proportions = [ get_proportion_under_quantile(y_pred, y_std, y_true, quantile) for quantile in in_exp_proportions ] return exp_proportions, obs_proportions
Arrays of expected and observed proportions Return arrays of expected and observed proportions of points falling into intervals corresponding to a range of quantiles. Computations here are not vectorized, in case there are memory constraints. Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of the predicted standard deviations for the held out dataset. y_true: 1D array of the true labels in the held out dataset. num_bins: number of discretizations for the probability space [0, 1]. recal_model: an sklearn isotonic regression model which recalibrates the predictions. prop_type: "interval" to measure observed proportions for centered prediction intervals, and "quantile" for observed proportions below a predicted quantile. Returns: A tuple of two numpy arrays, expected proportions and observed proportions
get_proportion_lists
python
uncertainty-toolbox/uncertainty-toolbox
uncertainty_toolbox/metrics_calibration.py
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics_calibration.py
MIT
def get_proportion_in_interval( y_pred: np.ndarray, y_std: np.ndarray, y_true: np.ndarray, quantile: float ) -> float: """For a specified quantile, return the proportion of points falling into an interval corresponding to that quantile. Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of the predicted standard deviations for the held out dataset. y_true: 1D array of the true labels in the held out dataset. quantile: a specified quantile level Returns: A single scalar which is the proportion of the true labels falling into the prediction interval for the specified quantile. """ # Check that input arrays are flat assert_is_flat_same_shape(y_pred, y_std, y_true) # Check that input std is positive assert_is_positive(y_std) # Computer lower and upper bound for quantile norm = stats.norm(loc=0, scale=1) lower_bound = norm.ppf(0.5 - quantile / 2) upper_bound = norm.ppf(0.5 + quantile / 2) # Compute proportion of normalized residuals within lower to upper bound residuals = y_pred - y_true normalized_residuals = residuals.reshape(-1) / y_std.reshape(-1) num_within_quantile = 0 for resid in normalized_residuals: if lower_bound <= resid and resid <= upper_bound: num_within_quantile += 1.0 proportion = num_within_quantile / len(residuals) return proportion
For a specified quantile, return the proportion of points falling into an interval corresponding to that quantile. Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of the predicted standard deviations for the held out dataset. y_true: 1D array of the true labels in the held out dataset. quantile: a specified quantile level Returns: A single scalar which is the proportion of the true labels falling into the prediction interval for the specified quantile.
get_proportion_in_interval
python
uncertainty-toolbox/uncertainty-toolbox
uncertainty_toolbox/metrics_calibration.py
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics_calibration.py
MIT
def get_proportion_under_quantile( y_pred: np.ndarray, y_std: np.ndarray, y_true: np.ndarray, quantile: float, ) -> float: """Get the proportion of data that are below the predicted quantile. Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of the predicted standard deviations for the held out dataset. y_true: 1D array of the true labels in the held out dataset. quantile: The quantile level to check. Returns: The proportion of data below the quantile level. """ # Check that input arrays are flat assert_is_flat_same_shape(y_pred, y_std, y_true) # Check that input std is positive assert_is_positive(y_std) # Computer lower and upper bound for quantile norm = stats.norm(loc=0, scale=1) quantile_bound = norm.ppf(quantile) # Compute proportion of normalized residuals within lower to upper bound residuals = y_pred - y_true normalized_residuals = residuals / y_std num_below_quantile = 0 for resid in normalized_residuals: if resid <= quantile_bound: num_below_quantile += 1.0 proportion = num_below_quantile / len(residuals) return proportion
Get the proportion of data that are below the predicted quantile. Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of the predicted standard deviations for the held out dataset. y_true: 1D array of the true labels in the held out dataset. quantile: The quantile level to check. Returns: The proportion of data below the quantile level.
get_proportion_under_quantile
python
uncertainty-toolbox/uncertainty-toolbox
uncertainty_toolbox/metrics_calibration.py
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics_calibration.py
MIT
def get_prediction_interval( y_pred: np.ndarray, y_std: np.ndarray, quantile: np.ndarray, recal_model: Optional[IsotonicRegression] = None, ) -> Namespace: """Return the centered predictional interval corresponding to a quantile. For a specified quantile level q (must be a float, or a singleton), return the centered prediction interval corresponding to the pair of quantiles at levels (0.5-q/2) and (0.5+q/2), i.e. interval that has nominal coverage equal to q. Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of the predicted standard deviations for the held out dataset. quantile: The quantile level to check. recal_model: A recalibration model to apply before computing the interval. Returns: Namespace containing the lower and upper bound corresponding to the centered interval. """ if isinstance(quantile, float): quantile = np.array([quantile]) # Check that input arrays are flat assert_is_flat_same_shape(y_pred, y_std) assert_is_flat_same_shape(quantile) assert quantile.size == 1 # Check that input std is positive assert_is_positive(y_std) if not np.logical_and((0.0 < quantile.item()), (quantile.item() < 1.0)): raise ValueError("Quantile must be greater than 0.0 and less than 1.0") # if recal_model is not None, calculate recalibrated quantile if recal_model is not None: quantile = recal_model.predict(quantile) # Computer lower and upper bound for quantile norm = stats.norm(loc=y_pred, scale=y_std) lower_bound = norm.ppf(0.5 - quantile / 2) upper_bound = norm.ppf(0.5 + quantile / 2) bounds = Namespace( upper=upper_bound, lower=lower_bound, ) return bounds
Return the centered predictional interval corresponding to a quantile. For a specified quantile level q (must be a float, or a singleton), return the centered prediction interval corresponding to the pair of quantiles at levels (0.5-q/2) and (0.5+q/2), i.e. interval that has nominal coverage equal to q. Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of the predicted standard deviations for the held out dataset. quantile: The quantile level to check. recal_model: A recalibration model to apply before computing the interval. Returns: Namespace containing the lower and upper bound corresponding to the centered interval.
get_prediction_interval
python
uncertainty-toolbox/uncertainty-toolbox
uncertainty_toolbox/metrics_calibration.py
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics_calibration.py
MIT
def get_quantile( y_pred: np.ndarray, y_std: np.ndarray, quantile: np.ndarray, recal_model: Optional[IsotonicRegression] = None, ) -> float: """Return the value corresponding with a quantile. For a specified quantile level q (must be a float, or a singleton), return the quantile prediction, i.e. bound that has nominal coverage below the bound equal to q. Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of the predicted standard deviations for the held out dataset. quantile: The quantile level to check. recal_model: A recalibration model to apply before computing the interval. Returns: The value at which the quantile is achieved. """ if isinstance(quantile, float): quantile = np.array([quantile]) # Check that input arrays are flat assert_is_flat_same_shape(y_pred, y_std) assert_is_flat_same_shape(quantile) assert quantile.size == 1 # Check that input std is positive assert_is_positive(y_std) if not np.logical_and((0.0 < quantile.item()), (quantile.item() < 1.0)): raise ValueError("Quantile must be greater than 0.0 and less than 1.0") # if recal_model is not None, calculate recalibrated quantile if recal_model is not None: quantile = recal_model.predict(quantile) # Computer quantile bound norm = stats.norm(loc=y_pred, scale=y_std) quantile_prediction = norm.ppf(quantile).flatten() return quantile_prediction
Return the value corresponding with a quantile. For a specified quantile level q (must be a float, or a singleton), return the quantile prediction, i.e. bound that has nominal coverage below the bound equal to q. Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of the predicted standard deviations for the held out dataset. quantile: The quantile level to check. recal_model: A recalibration model to apply before computing the interval. Returns: The value at which the quantile is achieved.
get_quantile
python
uncertainty-toolbox/uncertainty-toolbox
uncertainty_toolbox/metrics_calibration.py
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics_calibration.py
MIT
def nll_gaussian( y_pred: np.ndarray, y_std: np.ndarray, y_true: np.ndarray, scaled: bool = True, ) -> float: """Negative log likelihood for a gaussian. The negative log likelihood for held out data (y_true) given predictive uncertainty with mean (y_pred) and standard-deviation (y_std). Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of the predicted standard deviations for the held out dataset. y_true: 1D array of the true labels in the held out dataset. scaled: Whether to scale the negative log likelihood by size of held out set. Returns: The negative log likelihood for the heldout set. """ # Check that input arrays are flat assert_is_flat_same_shape(y_pred, y_std, y_true) # Set residuals residuals = y_pred - y_true # Compute nll nll_list = stats.norm.logpdf(residuals, scale=y_std) nll = -1 * np.sum(nll_list) # Potentially scale so that sum becomes mean if scaled: nll = nll / len(nll_list) return nll
Negative log likelihood for a gaussian. The negative log likelihood for held out data (y_true) given predictive uncertainty with mean (y_pred) and standard-deviation (y_std). Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of the predicted standard deviations for the held out dataset. y_true: 1D array of the true labels in the held out dataset. scaled: Whether to scale the negative log likelihood by size of held out set. Returns: The negative log likelihood for the heldout set.
nll_gaussian
python
uncertainty-toolbox/uncertainty-toolbox
uncertainty_toolbox/metrics_scoring_rule.py
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics_scoring_rule.py
MIT
def crps_gaussian( y_pred: np.ndarray, y_std: np.ndarray, y_true: np.ndarray, scaled: bool = True, ) -> float: """The negatively oriented continuous ranked probability score for Gaussians. Computes CRPS for held out data (y_true) given predictive uncertainty with mean (y_pred) and standard-deviation (y_std). Each test point is given equal weight in the overall score over the test set. Negatively oriented means a smaller value is more desirable. Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of he predicted standard deviations for the held out dataset. y_true: 1D array of the true labels in the held out dataset. scaled: Whether to scale the score by size of held out set. Returns: The crps for the heldout set. """ # Check that input arrays are flat assert_is_flat_same_shape(y_pred, y_std, y_true) # Compute crps y_standardized = (y_true - y_pred) / y_std term_1 = 1 / np.sqrt(np.pi) term_2 = 2 * stats.norm.pdf(y_standardized, loc=0, scale=1) term_3 = y_standardized * (2 * stats.norm.cdf(y_standardized, loc=0, scale=1) - 1) crps_list = -1 * y_std * (term_1 - term_2 - term_3) crps = np.sum(crps_list) # Potentially scale so that sum becomes mean if scaled: crps = crps / len(crps_list) return crps
The negatively oriented continuous ranked probability score for Gaussians. Computes CRPS for held out data (y_true) given predictive uncertainty with mean (y_pred) and standard-deviation (y_std). Each test point is given equal weight in the overall score over the test set. Negatively oriented means a smaller value is more desirable. Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of he predicted standard deviations for the held out dataset. y_true: 1D array of the true labels in the held out dataset. scaled: Whether to scale the score by size of held out set. Returns: The crps for the heldout set.
crps_gaussian
python
uncertainty-toolbox/uncertainty-toolbox
uncertainty_toolbox/metrics_scoring_rule.py
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics_scoring_rule.py
MIT
def check_score( y_pred: np.ndarray, y_std: np.ndarray, y_true: np.ndarray, scaled: bool = True, start_q: float = 0.01, end_q: float = 0.99, resolution: int = 99, ) -> float: """The negatively oriented check score. Computes the negatively oriented check score for held out data (y_true) given predictive uncertainty with mean (y_pred) and standard-deviation (y_std). Each test point and each quantile is given equal weight in the overall score over the test set and list of quantiles. The score is computed by scanning over a sequence of quantiles of the predicted distributions, starting at (start_q) and ending at (end_q). Negatively oriented means a smaller value is more desirable. Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of the predicted standard deviations for the held out dataset. y_true: 1D array of the true labels in the held out dataset. scaled: Whether to scale the score by size of held out set. start_q: The lower bound of the quantiles to use for computation. end_q: The upper bound of the quantiles to use for computation. resolution: The number of quantiles to use for computation. Returns: The check score. """ # Check that input arrays are flat assert_is_flat_same_shape(y_pred, y_std, y_true) test_qs = np.linspace(start_q, end_q, resolution) check_list = [] for q in test_qs: q_level = stats.norm.ppf(q, loc=y_pred, scale=y_std) # pred quantile diff = q_level - y_true mask = (diff >= 0).astype(float) - q score_per_q = np.mean(mask * diff) check_list.append(score_per_q) check_score = np.sum(check_list) if scaled: check_score = check_score / len(check_list) return check_score
The negatively oriented check score. Computes the negatively oriented check score for held out data (y_true) given predictive uncertainty with mean (y_pred) and standard-deviation (y_std). Each test point and each quantile is given equal weight in the overall score over the test set and list of quantiles. The score is computed by scanning over a sequence of quantiles of the predicted distributions, starting at (start_q) and ending at (end_q). Negatively oriented means a smaller value is more desirable. Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of the predicted standard deviations for the held out dataset. y_true: 1D array of the true labels in the held out dataset. scaled: Whether to scale the score by size of held out set. start_q: The lower bound of the quantiles to use for computation. end_q: The upper bound of the quantiles to use for computation. resolution: The number of quantiles to use for computation. Returns: The check score.
check_score
python
uncertainty-toolbox/uncertainty-toolbox
uncertainty_toolbox/metrics_scoring_rule.py
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics_scoring_rule.py
MIT
def interval_score( y_pred: np.ndarray, y_std: np.ndarray, y_true: np.ndarray, scaled: bool = True, start_p: float = 0.01, end_p: float = 0.99, resolution: int = 99, ) -> float: """The negatively oriented interval score. Compute the negatively oriented interval score for held out data (y_true) given predictive uncertainty with mean (y_pred) and standard-deviation (y_std). Each test point and each percentile is given equal weight in the overall score over the test set and list of quantiles. Negatively oriented means a smaller value is more desirable. This metric is computed by scanning over a sequence of prediction intervals. Where p is the amount of probability captured from a centered prediction interval, intervals are formed starting at p=(start_p) and ending at p=(end_p). Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of the predicted standard deviations for the held out dataset. y_true: 1D array of the true labels in the held out dataset. scaled: Whether to scale the score by size of held out set. start_p: The lower bound of probability to capture in a prediction interval. end_p: The upper bound of probability to capture in a prediction interval. resolution: The number of prediction intervals to use to compute the metric. Returns: The interval score. """ # Check that input arrays are flat assert_is_flat_same_shape(y_pred, y_std, y_true) test_ps = np.linspace(start_p, end_p, resolution) int_list = [] for p in test_ps: low_p, high_p = 0.5 - (p / 2.0), 0.5 + (p / 2.0) # p% PI pred_l = stats.norm.ppf(low_p, loc=y_pred, scale=y_std) pred_u = stats.norm.ppf(high_p, loc=y_pred, scale=y_std) below_l = ((pred_l - y_true) > 0).astype(float) above_u = ((y_true - pred_u) > 0).astype(float) score_per_p = ( (pred_u - pred_l) + (2.0 / (1 - p)) * (pred_l - y_true) * below_l + (2.0 / (1 - p)) * (y_true - pred_u) * above_u ) mean_score_per_p = np.mean(score_per_p) int_list.append(mean_score_per_p) int_score = np.sum(int_list) if scaled: int_score = int_score / len(int_list) return int_score
The negatively oriented interval score. Compute the negatively oriented interval score for held out data (y_true) given predictive uncertainty with mean (y_pred) and standard-deviation (y_std). Each test point and each percentile is given equal weight in the overall score over the test set and list of quantiles. Negatively oriented means a smaller value is more desirable. This metric is computed by scanning over a sequence of prediction intervals. Where p is the amount of probability captured from a centered prediction interval, intervals are formed starting at p=(start_p) and ending at p=(end_p). Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of the predicted standard deviations for the held out dataset. y_true: 1D array of the true labels in the held out dataset. scaled: Whether to scale the score by size of held out set. start_p: The lower bound of probability to capture in a prediction interval. end_p: The upper bound of probability to capture in a prediction interval. resolution: The number of prediction intervals to use to compute the metric. Returns: The interval score.
interval_score
python
uncertainty-toolbox/uncertainty-toolbox
uncertainty_toolbox/metrics_scoring_rule.py
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/metrics_scoring_rule.py
MIT
def get_q_idx(exp_props: np.ndarray, q: float) -> int: """Utility function which outputs the array index of an element. Gets the (approximate) index of a specified probability value, q, in the expected proportions array. Used as a utility function in isotonic regression recalibration. Args: exp_props: 1D array of expected probabilities. q: a specified probability float. Returns: An index which specifies the (approximate) index of q in exp_props """ num_pts = exp_props.shape[0] target_idx = None for idx, x in enumerate(exp_props): if idx + 1 == num_pts: if round(q, 2) == round(float(exp_props[-1]), 2): target_idx = exp_props.shape[0] - 1 break if x <= q < exp_props[idx + 1]: target_idx = idx break if target_idx is None: raise ValueError("q must be within exp_props") return target_idx
Utility function which outputs the array index of an element. Gets the (approximate) index of a specified probability value, q, in the expected proportions array. Used as a utility function in isotonic regression recalibration. Args: exp_props: 1D array of expected probabilities. q: a specified probability float. Returns: An index which specifies the (approximate) index of q in exp_props
get_q_idx
python
uncertainty-toolbox/uncertainty-toolbox
uncertainty_toolbox/recalibration.py
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/recalibration.py
MIT
def iso_recal( exp_props: np.ndarray, obs_props: np.ndarray, ) -> IsotonicRegression: """Recalibration algorithm based on isotonic regression. Fits and outputs an isotonic recalibration model that maps observed probabilities to expected probabilities. This mapping provides the necessary adjustments to produce better calibrated outputs. Args: exp_props: 1D array of expected probabilities (values must span [0, 1]). obs_props: 1D array of observed probabilities. Returns: An sklearn IsotonicRegression recalibration model. """ # Flatten exp_props = exp_props.flatten() obs_props = obs_props.flatten() min_obs = np.min(obs_props) max_obs = np.max(obs_props) iso_model = IsotonicRegression(increasing=True, out_of_bounds="clip") # just need observed prop values between 0 and 1 # problematic if min_obs_p > 0 and max_obs_p < 1 if not (min_obs == 0.0) and (max_obs == 1.0): print("Obs props not ideal: from {} to {}".format(min_obs, max_obs)) exp_0_idx = get_q_idx(exp_props, 0.0) exp_1_idx = get_q_idx(exp_props, 1.0) within_01 = obs_props[exp_0_idx : exp_1_idx + 1] beg_idx, end_idx = None, None # Handle beg_idx if exp_0_idx != 0: min_obs_below = np.min(obs_props[:exp_0_idx]) min_obs_within = np.min(within_01) if min_obs_below < min_obs_within: i = exp_0_idx - 1 while obs_props[i] > min_obs_below: i -= 1 beg_idx = i elif np.sum((within_01 == min_obs_within).astype(float)) > 1: # multiple minima in within_01 ==> get last min idx i = exp_1_idx - 1 while obs_props[i] > min_obs_within: i -= 1 beg_idx = i elif np.sum((within_01 == min_obs_within).astype(float)) == 1: beg_idx = int(np.argmin(within_01) + exp_0_idx) else: raise RuntimeError("Inspect input arrays. Cannot set beginning index.") else: beg_idx = exp_0_idx # Handle end_idx if exp_1_idx < obs_props.shape[0] - 1: max_obs_above = np.max(obs_props[exp_1_idx + 1 :]) max_obs_within = np.max(within_01) if max_obs_above > max_obs_within: i = exp_1_idx + 1 while obs_props[i] < max_obs_above: i += 1 end_idx = i + 1 elif np.sum((within_01 == max_obs_within).astype(float)) > 1: # multiple minima in within_01 ==> get last min idx i = beg_idx while obs_props[i] < max_obs_within: i += 1 end_idx = i + 1 elif np.sum((within_01 == max_obs_within).astype(float)) == 1: end_idx = int(exp_0_idx + np.argmax(within_01) + 1) else: raise RuntimeError("Inspect input arrays. Cannot set ending index.") else: end_idx = exp_1_idx + 1 if end_idx <= beg_idx: raise RuntimeError("Ending index before beginning index") filtered_obs_props = obs_props[beg_idx:end_idx] filtered_exp_props = exp_props[beg_idx:end_idx] try: iso_model = iso_model.fit(filtered_obs_props, filtered_exp_props) except Exception: raise RuntimeError("Failed to fit isotonic regression model") return iso_model
Recalibration algorithm based on isotonic regression. Fits and outputs an isotonic recalibration model that maps observed probabilities to expected probabilities. This mapping provides the necessary adjustments to produce better calibrated outputs. Args: exp_props: 1D array of expected probabilities (values must span [0, 1]). obs_props: 1D array of observed probabilities. Returns: An sklearn IsotonicRegression recalibration model.
iso_recal
python
uncertainty-toolbox/uncertainty-toolbox
uncertainty_toolbox/recalibration.py
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/recalibration.py
MIT
def optimize_recalibration_ratio( y_mean: np.ndarray, y_std: np.ndarray, y_true: np.ndarray, criterion: str = "ma_cal", optimizer_bounds: Tuple[float, float] = (1e-2, 1e2), ) -> float: """Scale factor which uniformly recalibrates predicted standard deviations. Searches via black-box optimization the standard deviation scale factor (opt_ratio) which produces the best recalibration, i.e. updated standard deviation can be written as opt_ratio * y_std. Args: y_mean: 1D array of the predicted means for the recalibration dataset. y_std: 1D array of the predicted standard deviations for the recalibration dataset. y_true: 1D array of the true means for the recalibration dataset. criterion: calibration metric to optimize for during recalibration; must be one of {"ma_cal", "rms_cal", "miscal"}. optimizer_bounds: The bounds for the ratio given to the optimizer. Returns: A single scalar which optimally recalibrates the predicted standard deviations. """ if criterion == "ma_cal": cal_fn = uct.metrics.mean_absolute_calibration_error worst_cal = 0.5 elif criterion == "rms_cal": cal_fn = uct.metrics.root_mean_squared_calibration_error worst_cal = np.sqrt(1.0 / 3.0) elif criterion == "miscal": cal_fn = uct.metrics.miscalibration_area worst_cal = 0.5 else: raise RuntimeError("Wrong criterion option") def obj(ratio): # If ratio is 0, return worst-possible calibration metric if ratio == 0: return worst_cal curr_cal = cal_fn(y_mean, ratio * y_std, y_true) return curr_cal result = minimize_scalar(fun=obj, bounds=optimizer_bounds) opt_ratio = result.x if not result.success: raise Warning("Optimization did not succeed") original_cal = cal_fn(y_mean, y_std, y_true) ratio_cal = cal_fn(y_mean, opt_ratio * y_std, y_true) if ratio_cal > original_cal: raise Warning( "No better calibration found, no recalibration performed and returning original uncertainties" ) opt_ratio = 1.0 return opt_ratio
Scale factor which uniformly recalibrates predicted standard deviations. Searches via black-box optimization the standard deviation scale factor (opt_ratio) which produces the best recalibration, i.e. updated standard deviation can be written as opt_ratio * y_std. Args: y_mean: 1D array of the predicted means for the recalibration dataset. y_std: 1D array of the predicted standard deviations for the recalibration dataset. y_true: 1D array of the true means for the recalibration dataset. criterion: calibration metric to optimize for during recalibration; must be one of {"ma_cal", "rms_cal", "miscal"}. optimizer_bounds: The bounds for the ratio given to the optimizer. Returns: A single scalar which optimally recalibrates the predicted standard deviations.
optimize_recalibration_ratio
python
uncertainty-toolbox/uncertainty-toolbox
uncertainty_toolbox/recalibration.py
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/recalibration.py
MIT
def get_std_recalibrator( y_mean: np.ndarray, y_std: np.ndarray, y_true: np.ndarray, criterion: str = "ma_cal", optimizer_bounds: Tuple[float, float] = (1e-2, 1e2), ) -> Callable[[np.ndarray], np.ndarray]: """Standard deviation recalibrator. Computes the standard deviation recalibration ratio and returns a function which takes in an array of uncalibrated standard deviations and returns an array of recalibrated standard deviations. Args: y_mean: 1D array of the predicted means for the recalibration dataset. y_std: 1D array of the predicted standard deviations for the recalibration dataset. y_true: 1D array of the true means for the recalibration dataset. criterion: calibration metric to optimize for during recalibration; must be one of {"ma_cal", "rms_cal", "miscal"}. optimizer_bounds: The bounds for the ratio given to the optimizer. Returns: A function which takes uncalibrated standard deviations as input and outputs the recalibrated standard deviations. """ std_recal_ratio = optimize_recalibration_ratio( y_mean, y_std, y_true, criterion, optimizer_bounds=optimizer_bounds ) def std_recalibrator(std_arr): return std_recal_ratio * std_arr return std_recalibrator
Standard deviation recalibrator. Computes the standard deviation recalibration ratio and returns a function which takes in an array of uncalibrated standard deviations and returns an array of recalibrated standard deviations. Args: y_mean: 1D array of the predicted means for the recalibration dataset. y_std: 1D array of the predicted standard deviations for the recalibration dataset. y_true: 1D array of the true means for the recalibration dataset. criterion: calibration metric to optimize for during recalibration; must be one of {"ma_cal", "rms_cal", "miscal"}. optimizer_bounds: The bounds for the ratio given to the optimizer. Returns: A function which takes uncalibrated standard deviations as input and outputs the recalibrated standard deviations.
get_std_recalibrator
python
uncertainty-toolbox/uncertainty-toolbox
uncertainty_toolbox/recalibration.py
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/recalibration.py
MIT
def get_quantile_recalibrator( y_pred: np.ndarray, y_std: np.ndarray, y_true: np.ndarray, ) -> Callable[[np.ndarray, np.ndarray, Union[float, np.ndarray]], np.ndarray]: """Quantile recalibrator. Fits an isotonic regression recalibration model and returns a function which takes in the mean and standard deviation predictions and a specified quantile level, and returns the recalibrated quantile. Args: y_pred: 1D array of the predicted means for the recalibration dataset. y_std: 1D array of the predicted standard deviations for the recalibration dataset. y_true: 1D array of the true means for the recalibration dataset. Returns: A function which outputs the recalibrated quantile prediction. """ exp_props, obs_props = uct.get_proportion_lists_vectorized( y_pred, y_std, y_true, prop_type="quantile" ) iso_model = iso_recal(exp_props, obs_props) def quantile_recalibrator(y_pred, y_std, quantile): recal_quantile = uct.metrics_calibration.get_quantile( y_pred, y_std, quantile, recal_model=iso_model ) return recal_quantile return quantile_recalibrator
Quantile recalibrator. Fits an isotonic regression recalibration model and returns a function which takes in the mean and standard deviation predictions and a specified quantile level, and returns the recalibrated quantile. Args: y_pred: 1D array of the predicted means for the recalibration dataset. y_std: 1D array of the predicted standard deviations for the recalibration dataset. y_true: 1D array of the true means for the recalibration dataset. Returns: A function which outputs the recalibrated quantile prediction.
get_quantile_recalibrator
python
uncertainty-toolbox/uncertainty-toolbox
uncertainty_toolbox/recalibration.py
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/recalibration.py
MIT
def get_interval_recalibrator( y_pred: np.ndarray, y_std: np.ndarray, y_true: np.ndarray, ) -> Callable[[np.ndarray, np.ndarray, Union[float, np.ndarray]], np.ndarray]: """Prediction interval recalibrator. Fits an isotonic regression recalibration model and returns a function which takes in the mean and standard deviation predictions and a specified centered interval coverage level, and returns the recalibrated interval. Args: y_pred: 1D array of the predicted means for the recalibration dataset. y_std: 1D array of the predicted standard deviations for the recalibration dataset. y_true: 1D array of the true means for the recalibration dataset. Returns: A function which outputs the recalibrated prediction interval. """ exp_props, obs_props = uct.get_proportion_lists_vectorized( y_pred, y_std, y_true, prop_type="interval" ) iso_model = iso_recal(exp_props, obs_props) def interval_recalibrator(y_pred, y_std, quantile): recal_bounds = uct.metrics_calibration.get_prediction_interval( y_pred, y_std, quantile, recal_model=iso_model ) return recal_bounds return interval_recalibrator
Prediction interval recalibrator. Fits an isotonic regression recalibration model and returns a function which takes in the mean and standard deviation predictions and a specified centered interval coverage level, and returns the recalibrated interval. Args: y_pred: 1D array of the predicted means for the recalibration dataset. y_std: 1D array of the predicted standard deviations for the recalibration dataset. y_true: 1D array of the true means for the recalibration dataset. Returns: A function which outputs the recalibrated prediction interval.
get_interval_recalibrator
python
uncertainty-toolbox/uncertainty-toolbox
uncertainty_toolbox/recalibration.py
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/recalibration.py
MIT
def assert_is_flat_same_shape(*args: Any) -> Union[bool, NoReturn]: """Check if inputs are all same-length 1d numpy.ndarray. Args: args: the numpy arrays to check. Returns: True if all arrays are flat and the same shape, or else raises assertion error. """ assert len(args) > 0 assert isinstance(args[0], np.ndarray), "All inputs must be of type numpy.ndarray" first_shape = args[0].shape for arr in args: assert isinstance(arr, np.ndarray), "All inputs must be of type numpy.ndarray" assert len(arr.shape) == 1, "All inputs must be 1d numpy.ndarray" assert arr.shape == first_shape, "All inputs must have the same length" return True
Check if inputs are all same-length 1d numpy.ndarray. Args: args: the numpy arrays to check. Returns: True if all arrays are flat and the same shape, or else raises assertion error.
assert_is_flat_same_shape
python
uncertainty-toolbox/uncertainty-toolbox
uncertainty_toolbox/utils.py
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/utils.py
MIT
def assert_is_positive(*args: Any) -> Union[bool, NoReturn]: """Assert that all numpy arrays are positive. Args: args: the numpy arrays to check. Returns: True if all elements in all arrays are positive values, or else raises assertion error. """ assert len(args) > 0 for arr in args: assert isinstance(arr, np.ndarray), "All inputs must be of type numpy.ndarray" assert np.all(arr > 0.0) return True
Assert that all numpy arrays are positive. Args: args: the numpy arrays to check. Returns: True if all elements in all arrays are positive values, or else raises assertion error.
assert_is_positive
python
uncertainty-toolbox/uncertainty-toolbox
uncertainty_toolbox/utils.py
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/utils.py
MIT
def trapezoid_area( xl: np.ndarray, al: np.ndarray, bl: np.ndarray, xr: np.ndarray, ar: np.ndarray, br: np.ndarray, absolute: bool = True, ) -> Numeric: """ Calculate the area of a vertical-sided trapezoid, formed connecting the following points: (xl, al) - (xl, bl) - (xr, br) - (xr, ar) - (xl, al) This function considers the case that the edges of the trapezoid might cross, and explicitly accounts for this. Args: xl: The x coordinate of the left-hand points of the trapezoid al: The y coordinate of the first left-hand point of the trapezoid bl: The y coordinate of the second left-hand point of the trapezoid xr: The x coordinate of the right-hand points of the trapezoid ar: The y coordinate of the first right-hand point of the trapezoid br: The y coordinate of the second right-hand point of the trapezoid absolute: Whether to calculate the absolute area, or allow a negative area (e.g. if a and b are swapped) Returns: The area of the given trapezoid. """ # Differences dl = bl - al dr = br - ar # The ordering is the same for both iff they do not cross. cross = dl * dr < 0 # Treat the degenerate case as a trapezoid cross = cross * (1 - ((dl == 0) * (dr == 0))) # trapezoid for non-crossing lines area_trapezoid = (xr - xl) * 0.5 * ((bl - al) + (br - ar)) if absolute: area_trapezoid = np.abs(area_trapezoid) # Hourglass for crossing lines. # NaNs should only appear in the degenerate and parallel cases. # Those NaNs won't get through the final multiplication so it's ok. with np.errstate(divide="ignore", invalid="ignore"): x_intersect = intersection((xl, bl), (xr, br), (xl, al), (xr, ar))[0] tl_area = 0.5 * (bl - al) * (x_intersect - xl) tr_area = 0.5 * (br - ar) * (xr - x_intersect) if absolute: area_hourglass = np.abs(tl_area) + np.abs(tr_area) else: area_hourglass = tl_area + tr_area # The nan_to_num function allows us to do 0 * nan = 0 return (1 - cross) * area_trapezoid + cross * np.nan_to_num(area_hourglass)
Calculate the area of a vertical-sided trapezoid, formed connecting the following points: (xl, al) - (xl, bl) - (xr, br) - (xr, ar) - (xl, al) This function considers the case that the edges of the trapezoid might cross, and explicitly accounts for this. Args: xl: The x coordinate of the left-hand points of the trapezoid al: The y coordinate of the first left-hand point of the trapezoid bl: The y coordinate of the second left-hand point of the trapezoid xr: The x coordinate of the right-hand points of the trapezoid ar: The y coordinate of the first right-hand point of the trapezoid br: The y coordinate of the second right-hand point of the trapezoid absolute: Whether to calculate the absolute area, or allow a negative area (e.g. if a and b are swapped) Returns: The area of the given trapezoid.
trapezoid_area
python
uncertainty-toolbox/uncertainty-toolbox
uncertainty_toolbox/utils.py
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/utils.py
MIT
def intersection( p1: Tuple[Numeric, Numeric], p2: Tuple[Numeric, Numeric], p3: Tuple[Numeric, Numeric], p4: Tuple[Numeric, Numeric], ) -> Tuple[Numeric, Numeric]: """ Calculate the intersection of two lines between four points, as defined in https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection. This is an array option and works can be used to calculate the intersections of entire arrays of points at the same time. Args: p1: The point (x1, y1), first point of Line 1 p2: The point (x2, y2), second point of Line 1 p3: The point (x3, y3), first point of Line 2 p4: The point (x4, y4), second point of Line 2 Returns: The point of intersection of the two lines, or (np.nan, np.nan) if the lines are parallel """ x1, y1 = p1 x2, y2 = p2 x3, y3 = p3 x4, y4 = p4 D = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4) x = ((x1 * y2 - y1 * x2) * (x3 - x4) - (x1 - x2) * (x3 * y4 - y3 * x4)) / D y = ((x1 * y2 - y1 * x2) * (y3 - y4) - (y1 - y2) * (x3 * y4 - y3 * x4)) / D return x, y
Calculate the intersection of two lines between four points, as defined in https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection. This is an array option and works can be used to calculate the intersections of entire arrays of points at the same time. Args: p1: The point (x1, y1), first point of Line 1 p2: The point (x2, y2), second point of Line 1 p3: The point (x3, y3), first point of Line 2 p4: The point (x4, y4), second point of Line 2 Returns: The point of intersection of the two lines, or (np.nan, np.nan) if the lines are parallel
intersection
python
uncertainty-toolbox/uncertainty-toolbox
uncertainty_toolbox/utils.py
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/utils.py
MIT
def plot_xy( y_pred: np.ndarray, y_std: np.ndarray, y_true: np.ndarray, x: np.ndarray, n_subset: Union[int, None] = None, ylims: Union[Tuple[float, float], None] = None, xlims: Union[Tuple[float, float], None] = None, num_stds_confidence_bound: int = 2, leg_loc: Union[int, str] = 3, ax: Union[matplotlib.axes.Axes, None] = None, ) -> matplotlib.axes.Axes: """Plot one-dimensional inputs with associated predicted values, predictive uncertainties, and true values. Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of the predicted standard deviations for the held out dataset. y_true: 1D array of the true labels in the held out dataset. x: 1D array of input values for the held out dataset. n_subset: Number of points to plot after filtering. ylims: a tuple of y axis plotting bounds, given as (lower, upper). xlims: a tuple of x axis plotting bounds, given as (lower, upper). num_stds_confidence_bound: width of confidence band, in terms of number of standard deviations. leg_loc: location of legend as a str or legend code int. ax: matplotlib.axes.Axes object. Returns: matplotlib.axes.Axes object with plot added. """ # Create ax if it doesn't exist if ax is None: fig, ax = plt.subplots(figsize=(5, 5)) # Order points in order of increasing x order = np.argsort(x) y_pred, y_std, y_true, x = ( y_pred[order], y_std[order], y_true[order], x[order], ) # Optionally select a subset if n_subset is not None: [y_pred, y_std, y_true, x] = filter_subset([y_pred, y_std, y_true, x], n_subset) intervals = num_stds_confidence_bound * y_std h1 = ax.plot(x, y_true, ".", mec="#ff7f0e", mfc="None") h2 = ax.plot(x, y_pred, "-", c="#1f77b4", linewidth=2) h3 = ax.fill_between( x, y_pred - intervals, y_pred + intervals, color="lightsteelblue", alpha=0.4, ) ax.legend( [h1[0], h2[0], h3], ["Observations", "Predictions", "$95\%$ Interval"], loc=leg_loc, ) # Format plot if ylims is not None: ax.set_ylim(ylims) if xlims is not None: ax.set_xlim(xlims) ax.set_xlabel("$x$") ax.set_ylabel("$y$") ax.set_title("Confidence Band") ax.set_aspect(1.0 / ax.get_data_ratio(), adjustable="box") return ax
Plot one-dimensional inputs with associated predicted values, predictive uncertainties, and true values. Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of the predicted standard deviations for the held out dataset. y_true: 1D array of the true labels in the held out dataset. x: 1D array of input values for the held out dataset. n_subset: Number of points to plot after filtering. ylims: a tuple of y axis plotting bounds, given as (lower, upper). xlims: a tuple of x axis plotting bounds, given as (lower, upper). num_stds_confidence_bound: width of confidence band, in terms of number of standard deviations. leg_loc: location of legend as a str or legend code int. ax: matplotlib.axes.Axes object. Returns: matplotlib.axes.Axes object with plot added.
plot_xy
python
uncertainty-toolbox/uncertainty-toolbox
uncertainty_toolbox/viz.py
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/viz.py
MIT
def plot_intervals( y_pred: np.ndarray, y_std: np.ndarray, y_true: np.ndarray, n_subset: Union[int, None] = None, ylims: Union[Tuple[float, float], None] = None, num_stds_confidence_bound: int = 2, ax: Union[matplotlib.axes.Axes, None] = None, ) -> matplotlib.axes.Axes: """Plot predictions and predictive intervals versus true values. Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of the predicted standard deviations for the held out dataset. y_true: 1D array of the true labels in the held out dataset. n_subset: Number of points to plot after filtering. ylims: a tuple of y axis plotting bounds, given as (lower, upper). num_stds_confidence_bound: width of intervals, in terms of number of standard deviations. ax: matplotlib.axes.Axes object. Returns: matplotlib.axes.Axes object with plot added. """ # Create ax if it doesn't exist if ax is None: fig, ax = plt.subplots(figsize=(5, 5)) # Optionally select a subset if n_subset is not None: [y_pred, y_std, y_true] = filter_subset([y_pred, y_std, y_true], n_subset) # Compute intervals intervals = num_stds_confidence_bound * y_std # Plot ax.errorbar( y_true, y_pred, intervals, fmt="o", ls="none", linewidth=1.5, c="#1f77b4", alpha=0.5, ) h1 = ax.plot(y_true, y_pred, "o", c="#1f77b4") # Determine lims if ylims is None: intervals_lower_upper = [y_pred - intervals, y_pred + intervals] lims_ext = [ int(np.floor(np.min(intervals_lower_upper[0]))), int(np.ceil(np.max(intervals_lower_upper[1]))), ] else: lims_ext = ylims # plot 45-degree line h2 = ax.plot(lims_ext, lims_ext, "--", linewidth=1.5, c="#ff7f0e") # Legend ax.legend([h1[0], h2[0]], ["Predictions", "$f(x) = x$"], loc=4) # Format plot ax.set_xlim(lims_ext) ax.set_ylim(lims_ext) ax.set_xlabel("Observed Values") ax.set_ylabel("Predicted Values and Intervals") ax.set_title("Prediction Intervals") ax.set_aspect(1.0 / ax.get_data_ratio(), adjustable="box") return ax
Plot predictions and predictive intervals versus true values. Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of the predicted standard deviations for the held out dataset. y_true: 1D array of the true labels in the held out dataset. n_subset: Number of points to plot after filtering. ylims: a tuple of y axis plotting bounds, given as (lower, upper). num_stds_confidence_bound: width of intervals, in terms of number of standard deviations. ax: matplotlib.axes.Axes object. Returns: matplotlib.axes.Axes object with plot added.
plot_intervals
python
uncertainty-toolbox/uncertainty-toolbox
uncertainty_toolbox/viz.py
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/viz.py
MIT
def plot_intervals_ordered( y_pred: np.ndarray, y_std: np.ndarray, y_true: np.ndarray, n_subset: Union[int, None] = None, ylims: Union[Tuple[float, float], None] = None, num_stds_confidence_bound: int = 2, ax: Union[matplotlib.axes.Axes, None] = None, ) -> matplotlib.axes.Axes: """Plot predictions and predictive intervals versus true values, with points ordered by true value along x-axis. Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of the predicted standard deviations for the held out dataset. y_true: 1D array of the true labels in the held out dataset. n_subset: Number of points to plot after filtering. ylims: a tuple of y axis plotting bounds, given as (lower, upper). num_stds_confidence_bound: width of intervals, in terms of number of standard deviations. ax: matplotlib.axes.Axes object. Returns: matplotlib.axes.Axes object with plot added. """ # Create ax if it doesn't exist if ax is None: fig, ax = plt.subplots(figsize=(5, 5)) # Optionally select a subset if n_subset is not None: [y_pred, y_std, y_true] = filter_subset([y_pred, y_std, y_true], n_subset) order = np.argsort(y_true.flatten()) y_pred, y_std, y_true = y_pred[order], y_std[order], y_true[order] xs = np.arange(len(order)) intervals = num_stds_confidence_bound * y_std # Plot ax.errorbar( xs, y_pred, intervals, fmt="o", ls="none", linewidth=1.5, c="#1f77b4", alpha=0.5, ) h1 = ax.plot(xs, y_pred, "o", c="#1f77b4") h2 = ax.plot(xs, y_true, "--", linewidth=2.0, c="#ff7f0e") # Legend ax.legend([h1[0], h2[0]], ["Predicted Values", "Observed Values"], loc=4) # Determine lims if ylims is None: intervals_lower_upper = [y_pred - intervals, y_pred + intervals] lims_ext = [ int(np.floor(np.min(intervals_lower_upper[0]))), int(np.ceil(np.max(intervals_lower_upper[1]))), ] else: lims_ext = ylims # Format plot ax.set_ylim(lims_ext) ax.set_xlabel("Index (Ordered by Observed Value)") ax.set_ylabel("Predicted Values and Intervals") ax.set_title("Ordered Prediction Intervals") ax.set_aspect(1.0 / ax.get_data_ratio(), adjustable="box") return ax
Plot predictions and predictive intervals versus true values, with points ordered by true value along x-axis. Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of the predicted standard deviations for the held out dataset. y_true: 1D array of the true labels in the held out dataset. n_subset: Number of points to plot after filtering. ylims: a tuple of y axis plotting bounds, given as (lower, upper). num_stds_confidence_bound: width of intervals, in terms of number of standard deviations. ax: matplotlib.axes.Axes object. Returns: matplotlib.axes.Axes object with plot added.
plot_intervals_ordered
python
uncertainty-toolbox/uncertainty-toolbox
uncertainty_toolbox/viz.py
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/viz.py
MIT
def plot_calibration( y_pred: np.ndarray, y_std: np.ndarray, y_true: np.ndarray, n_subset: Union[int, None] = None, curve_label: Union[str, None] = None, vectorized: bool = True, exp_props: Union[np.ndarray, None] = None, obs_props: Union[np.ndarray, None] = None, ax: Union[matplotlib.axes.Axes, None] = None, prop_type: str = "interval", ) -> matplotlib.axes.Axes: """Plot the observed proportion vs prediction proportion of outputs falling into a range of intervals, and display miscalibration area. Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of the predicted standard deviations for the held out dataset. y_true: 1D array of the true labels in the held out dataset. n_subset: Number of points to plot after filtering. curve_label: legend label str for calibration curve. vectorized: plot using get_proportion_lists_vectorized. exp_props: plot using the given expected proportions. obs_props: plot using the given observed proportions. ax: matplotlib.axes.Axes object. prop_type: "interval" to measure observed proportions for centered prediction intervals, and "quantile" for observed proportions below a predicted quantile. Ignored if exp_props and obs_props are provided as inputs. Returns: matplotlib.axes.Axes object with plot added. """ # Create ax if it doesn't exist if ax is None: fig, ax = plt.subplots(figsize=(5, 5)) # Optionally select a subset if n_subset is not None: [y_pred, y_std, y_true] = filter_subset([y_pred, y_std, y_true], n_subset) if (exp_props is None) or (obs_props is None): # Compute exp_proportions and obs_proportions if vectorized: ( exp_proportions, obs_proportions, ) = get_proportion_lists_vectorized( y_pred, y_std, y_true, prop_type=prop_type ) else: (exp_proportions, obs_proportions) = get_proportion_lists( y_pred, y_std, y_true, prop_type=prop_type ) else: # If expected and observed proportions are given exp_proportions = np.array(exp_props).flatten() obs_proportions = np.array(obs_props).flatten() if exp_proportions.shape != obs_proportions.shape: raise RuntimeError("exp_props and obs_props shape mismatch") # Set label if curve_label is None: curve_label = "Predictor" # Plot ax.plot([0, 1], [0, 1], "--", label="Ideal", c="#ff7f0e") ax.plot(exp_proportions, obs_proportions, label=curve_label, c="#1f77b4") ax.fill_between(exp_proportions, exp_proportions, obs_proportions, alpha=0.2) # Format plot ax.set_xlabel("Predicted Proportion in Interval") ax.set_ylabel("Observed Proportion in Interval") ax.axis("square") buff = 0.01 ax.set_xlim([0 - buff, 1 + buff]) ax.set_ylim([0 - buff, 1 + buff]) ax.set_title("Average Calibration") # Compute miscalibration area miscalibration_area = miscalibration_area_from_proportions( exp_proportions=exp_proportions, obs_proportions=obs_proportions ) # Annotate plot with the miscalibration area ax.text( x=0.95, y=0.05, s="Miscalibration area = %.2f" % miscalibration_area, verticalalignment="bottom", horizontalalignment="right", fontsize="small", ) return ax
Plot the observed proportion vs prediction proportion of outputs falling into a range of intervals, and display miscalibration area. Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of the predicted standard deviations for the held out dataset. y_true: 1D array of the true labels in the held out dataset. n_subset: Number of points to plot after filtering. curve_label: legend label str for calibration curve. vectorized: plot using get_proportion_lists_vectorized. exp_props: plot using the given expected proportions. obs_props: plot using the given observed proportions. ax: matplotlib.axes.Axes object. prop_type: "interval" to measure observed proportions for centered prediction intervals, and "quantile" for observed proportions below a predicted quantile. Ignored if exp_props and obs_props are provided as inputs. Returns: matplotlib.axes.Axes object with plot added.
plot_calibration
python
uncertainty-toolbox/uncertainty-toolbox
uncertainty_toolbox/viz.py
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/viz.py
MIT
def plot_adversarial_group_calibration( y_pred: np.ndarray, y_std: np.ndarray, y_true: np.ndarray, n_subset: Union[int, None] = None, cali_type: str = "mean_abs", curve_label: Union[str, None] = None, group_size: Union[np.ndarray, None] = None, score_mean: Union[np.ndarray, None] = None, score_stderr: Union[np.ndarray, None] = None, ax: Union[matplotlib.axes.Axes, None] = None, ) -> matplotlib.axes.Axes: """Plot adversarial group calibration plots by varying group size from 0% to 100% of dataset size and recording the worst calibration occurred for each group size. Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of the predicted standard deviations for the held out dataset. y_true: 1D array of the true labels in the held out dataset. n_subset: Number of points to plot after filtering. cali_type: Calibration type str. curve_label: legend label str for calibration curve. group_size: 1D array of group size ratios in [0, 1]. score_mean: 1D array of metric means for group size ratios in group_size. score_stderr: 1D array of metric standard devations for group size ratios in group_size. ax: matplotlib.axes.Axes object. Returns: matplotlib.axes.Axes object with plot added. """ # Create ax if it doesn't exist if ax is None: fig, ax = plt.subplots(figsize=(7, 5)) # Optionally select a subset if n_subset is not None: [y_pred, y_std, y_true] = filter_subset([y_pred, y_std, y_true], n_subset) # Compute group_size, score_mean, score_stderr if (group_size is None) or (score_mean is None): # Compute adversarial group calibration adv_group_cali_namespace = adversarial_group_calibration( y_pred, y_std, y_true, cali_type=cali_type ) group_size = adv_group_cali_namespace.group_size score_mean = adv_group_cali_namespace.score_mean score_stderr = adv_group_cali_namespace.score_stderr else: # If expected and observed proportions are give group_size = np.array(group_size).flatten() score_mean = np.array(score_mean).flatten() score_stderr = np.array(score_stderr).flatten() if (group_size.shape != score_mean.shape) or ( group_size.shape != score_stderr.shape ): raise RuntimeError( "Input arrays for adversarial group calibration shape mismatch" ) # Set label if curve_label is None: curve_label = "Predictor" # Plot ax.plot(group_size, score_mean, "-o", label=curve_label, c="#1f77b4") ax.fill_between( group_size, score_mean - score_stderr, score_mean + score_stderr, alpha=0.2, ) # Format plot buff = 0.02 ax.set_xlim([0 - buff, 1 + buff]) ax.set_ylim([0 - buff, 0.5 + buff]) ax.set_xlabel("Group size") ax.set_ylabel("Calibration Error of Worst Group") ax.set_title("Adversarial Group Calibration") return ax
Plot adversarial group calibration plots by varying group size from 0% to 100% of dataset size and recording the worst calibration occurred for each group size. Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of the predicted standard deviations for the held out dataset. y_true: 1D array of the true labels in the held out dataset. n_subset: Number of points to plot after filtering. cali_type: Calibration type str. curve_label: legend label str for calibration curve. group_size: 1D array of group size ratios in [0, 1]. score_mean: 1D array of metric means for group size ratios in group_size. score_stderr: 1D array of metric standard devations for group size ratios in group_size. ax: matplotlib.axes.Axes object. Returns: matplotlib.axes.Axes object with plot added.
plot_adversarial_group_calibration
python
uncertainty-toolbox/uncertainty-toolbox
uncertainty_toolbox/viz.py
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/viz.py
MIT
def plot_sharpness( y_std: np.ndarray, n_subset: Union[int, None] = None, ax: Union[matplotlib.axes.Axes, None] = None, ) -> matplotlib.axes.Axes: """Plot sharpness of the predictive uncertainties. Args: y_std: 1D array of the predicted standard deviations for the held out dataset. n_subset: Number of points to plot after filtering. ax: matplotlib.axes.Axes object. Returns: matplotlib.axes.Axes object with plot added. """ # Create ax if it doesn't exist if ax is None: fig, ax = plt.subplots(figsize=(5, 5)) # Optionally select a subset if n_subset is not None: y_std = filter_subset([y_std], n_subset)[0] # Plot sharpness curve ax.hist(y_std, edgecolor="#1f77b4", color="#a5c8e1", density=True) # Format plot xlim = (y_std.min(), y_std.max()) ax.set_xlim(xlim) ax.set_xlabel("Predicted Standard Deviation") ax.set_ylabel("Normalized Frequency") ax.set_title("Sharpness") ax.set_yticklabels([]) ax.set_yticks([]) # Calculate and report sharpness sharpness = np.sqrt(np.mean(y_std**2)) ax.axvline(x=sharpness, label="sharpness", color="k", linewidth=2, ls="--") if sharpness < (xlim[0] + xlim[1]) / 2: text = "\n Sharpness = %.2f" % sharpness h_align = "left" else: text = "\nSharpness = %.2f " % sharpness h_align = "right" ax.text( x=sharpness, y=ax.get_ylim()[1], s=text, verticalalignment="top", horizontalalignment=h_align, fontsize="small", ) return ax
Plot sharpness of the predictive uncertainties. Args: y_std: 1D array of the predicted standard deviations for the held out dataset. n_subset: Number of points to plot after filtering. ax: matplotlib.axes.Axes object. Returns: matplotlib.axes.Axes object with plot added.
plot_sharpness
python
uncertainty-toolbox/uncertainty-toolbox
uncertainty_toolbox/viz.py
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/viz.py
MIT
def plot_residuals_vs_stds( y_pred: np.ndarray, y_std: np.ndarray, y_true: np.ndarray, n_subset: Union[int, None] = None, ax: Union[matplotlib.axes.Axes, None] = None, ) -> matplotlib.axes.Axes: """Plot absolute value of the prediction residuals versus standard deviations of the predictive uncertainties. Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of the predicted standard deviations for the held out dataset. y_true: 1D array of the true labels in the held out dataset. n_subset: Number of points to plot after filtering. ax: matplotlib.axes.Axes object. Returns: matplotlib.axes.Axes object with plot added. """ # Create ax if it doesn't exist if ax is None: fig, ax = plt.subplots(figsize=(5, 5)) # Optionally select a subset if n_subset is not None: [y_pred, y_std, y_true] = filter_subset([y_pred, y_std, y_true], n_subset) # Compute residuals residuals = y_true - y_pred # Put stds on same scale as residuals residuals_sum = np.sum(np.abs(residuals)) y_std_scaled = (y_std / np.sum(y_std)) * residuals_sum # Plot residuals vs standard devs h1 = ax.plot(y_std_scaled, np.abs(residuals), "o", c="#1f77b4") # Plot 45-degree line xlims = ax.get_xlim() ylims = ax.get_ylim() lims = [np.min([xlims[0], ylims[0]]), np.max([xlims[1], ylims[1]])] h2 = ax.plot(lims, lims, "--", c="#ff7f0e") # Legend ax.legend([h1[0], h2[0]], ["Predictions", "$f(x) = x$"], loc=4) # Format plot ax.set_xlabel("Standard Deviations (Scaled)") ax.set_ylabel("Residuals (Absolute Value)") ax.set_title("Residuals vs. Predictive Standard Deviations") ax.set_xlim(lims) ax.set_ylim(lims) ax.axis("square") return ax
Plot absolute value of the prediction residuals versus standard deviations of the predictive uncertainties. Args: y_pred: 1D array of the predicted means for the held out dataset. y_std: 1D array of the predicted standard deviations for the held out dataset. y_true: 1D array of the true labels in the held out dataset. n_subset: Number of points to plot after filtering. ax: matplotlib.axes.Axes object. Returns: matplotlib.axes.Axes object with plot added.
plot_residuals_vs_stds
python
uncertainty-toolbox/uncertainty-toolbox
uncertainty_toolbox/viz.py
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/viz.py
MIT
def filter_subset(input_list: List[List[Any]], n_subset: int) -> List[List[Any]]: """Keep only n_subset random indices from all lists given in input_list. Args: input_list: list of lists. n_subset: Number of points to plot after filtering. Returns: List of all input lists with sizes reduced to n_subset. """ assert type(n_subset) is int n_total = len(input_list[0]) idx = np.random.choice(range(n_total), n_subset, replace=False) idx = np.sort(idx) output_list = [] for inp in input_list: outp = inp[idx] output_list.append(outp) return output_list
Keep only n_subset random indices from all lists given in input_list. Args: input_list: list of lists. n_subset: Number of points to plot after filtering. Returns: List of all input lists with sizes reduced to n_subset.
filter_subset
python
uncertainty-toolbox/uncertainty-toolbox
uncertainty_toolbox/viz.py
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/viz.py
MIT
def set_style(style_str: str = "default") -> NoReturn: """Set the matplotlib plotting style. Args: style_str: string for style file. """ if style_str == "default": plt.style.use((pathlib.Path(__file__).parent / "matplotlibrc").resolve())
Set the matplotlib plotting style. Args: style_str: string for style file.
set_style
python
uncertainty-toolbox/uncertainty-toolbox
uncertainty_toolbox/viz.py
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/viz.py
MIT
def save_figure( file_name: str = "figure", ext_list: Union[list, str, None] = None, white_background: bool = True, ) -> NoReturn: """Save matplotlib figure for all extensions in ext_list. Args: file_name: name of saved image file. ext_list: list of strings (or single string) denoting file type. white_background: set background of image to white if True. """ # Default ext_list if ext_list is None: ext_list = ["pdf", "png"] # If ext_list is a single str if isinstance(ext_list, str): ext_list = [ext_list] # Set facecolor and edgecolor (fc, ec) = ("w", "w") if white_background else ("none", "none") # Save each type in ext_list for ext in ext_list: save_str = file_name + "." + ext plt.savefig(save_str, bbox_inches="tight", facecolor=fc, edgecolor=ec) print(f"Saved figure {save_str}")
Save matplotlib figure for all extensions in ext_list. Args: file_name: name of saved image file. ext_list: list of strings (or single string) denoting file type. white_background: set background of image to white if True.
save_figure
python
uncertainty-toolbox/uncertainty-toolbox
uncertainty_toolbox/viz.py
https://github.com/uncertainty-toolbox/uncertainty-toolbox/blob/master/uncertainty_toolbox/viz.py
MIT
def setup(self) -> None: """Load the model into memory to make running multiple predictions efficient""" self.enhancer = FaceEnhancement( base_dir="checkpoints", size=512, model="GPEN-BFR-512", use_sr=False, sr_model="rrdb_realesrnet_psnr", channel_multiplier=2, narrow=1, device="cuda", ) self.restorer = GFPGANer( model_path="checkpoints/GFPGANv1.3.pth", upscale=1, arch="clean", channel_multiplier=2, bg_upsampler=None, ) self.croper = Croper("checkpoints/shape_predictor_68_face_landmarks.dat") self.kp_extractor = KeypointExtractor() face3d_net_path = "checkpoints/face3d_pretrain_epoch_20.pth" self.net_recon = load_face3d_net(face3d_net_path, "cuda") self.lm3d_std = load_lm3d("checkpoints/BFM")
Load the model into memory to make running multiple predictions efficient
setup
python
OpenTalker/video-retalking
predict.py
https://github.com/OpenTalker/video-retalking/blob/master/predict.py
Apache-2.0
def predict( self, face: Path = Input(description="Input video file of a talking-head."), input_audio: Path = Input(description="Input audio file."), ) -> Path: """Run a single prediction on the model""" device = "cuda" args = argparse.Namespace( DNet_path="checkpoints/DNet.pt", LNet_path="checkpoints/LNet.pth", ENet_path="checkpoints/ENet.pth", face3d_net_path="checkpoints/face3d_pretrain_epoch_20.pth", face=str(face), audio=str(input_audio), exp_img="neutral", outfile=None, fps=25, pads=[0, 20, 0, 0], face_det_batch_size=4, LNet_batch_size=16, img_size=384, crop=[0, -1, 0, -1], box=[-1, -1, -1, -1], nosmooth=False, static=False, up_face="original", one_shot=False, without_rl1=False, tmp_dir="temp", re_preprocess=False, ) base_name = args.face.split("/")[-1] if args.face.split(".")[1] in ["jpg", "png", "jpeg"]: full_frames = [cv2.imread(args.face)] args.static = True fps = args.fps else: video_stream = cv2.VideoCapture(args.face) fps = video_stream.get(cv2.CAP_PROP_FPS) full_frames = [] while True: still_reading, frame = video_stream.read() if not still_reading: video_stream.release() break y1, y2, x1, x2 = args.crop if x2 == -1: x2 = frame.shape[1] if y2 == -1: y2 = frame.shape[0] frame = frame[y1:y2, x1:x2] full_frames.append(frame) full_frames_RGB = [ cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) for frame in full_frames ] full_frames_RGB, crop, quad = self.croper.crop(full_frames_RGB, xsize=512) clx, cly, crx, cry = crop lx, ly, rx, ry = quad lx, ly, rx, ry = int(lx), int(ly), int(rx), int(ry) oy1, oy2, ox1, ox2 = ( cly + ly, min(cly + ry, full_frames[0].shape[0]), clx + lx, min(clx + rx, full_frames[0].shape[1]), ) # original_size = (ox2 - ox1, oy2 - oy1) frames_pil = [ Image.fromarray(cv2.resize(frame, (256, 256))) for frame in full_frames_RGB ] # get the landmark according to the detected face. if ( not os.path.isfile("temp/" + base_name + "_landmarks.txt") or args.re_preprocess ): print("[Step 1] Landmarks Extraction in Video.") lm = self.kp_extractor.extract_keypoint( frames_pil, "./temp/" + base_name + "_landmarks.txt" ) else: print("[Step 1] Using saved landmarks.") lm = np.loadtxt("temp/" + base_name + "_landmarks.txt").astype(np.float32) lm = lm.reshape([len(full_frames), -1, 2]) if ( not os.path.isfile("temp/" + base_name + "_coeffs.npy") or args.exp_img is not None or args.re_preprocess ): video_coeffs = [] for idx in tqdm( range(len(frames_pil)), desc="[Step 2] 3DMM Extraction In Video:" ): frame = frames_pil[idx] W, H = frame.size lm_idx = lm[idx].reshape([-1, 2]) if np.mean(lm_idx) == -1: lm_idx = (self.lm3d_std[:, :2] + 1) / 2.0 lm_idx = np.concatenate([lm_idx[:, :1] * W, lm_idx[:, 1:2] * H], 1) else: lm_idx[:, -1] = H - 1 - lm_idx[:, -1] trans_params, im_idx, lm_idx, _ = align_img( frame, lm_idx, self.lm3d_std ) trans_params = np.array( [float(item) for item in np.hsplit(trans_params, 5)] ).astype(np.float32) im_idx_tensor = ( torch.tensor(np.array(im_idx) / 255.0, dtype=torch.float32) .permute(2, 0, 1) .to(device) .unsqueeze(0) ) with torch.no_grad(): coeffs = split_coeff(self.net_recon(im_idx_tensor)) pred_coeff = {key: coeffs[key].cpu().numpy() for key in coeffs} pred_coeff = np.concatenate( [ pred_coeff["id"], pred_coeff["exp"], pred_coeff["tex"], pred_coeff["angle"], pred_coeff["gamma"], pred_coeff["trans"], trans_params[None], ], 1, ) video_coeffs.append(pred_coeff) semantic_npy = np.array(video_coeffs)[:, 0] np.save("temp/" + base_name + "_coeffs.npy", semantic_npy) else: print("[Step 2] Using saved coeffs.") semantic_npy = np.load("temp/" + base_name + "_coeffs.npy").astype( np.float32 ) # generate the 3dmm coeff from a single image if args.exp_img == "smile": expression = torch.tensor( loadmat("checkpoints/expression.mat")["expression_mouth"] )[0] else: print("using expression center") expression = torch.tensor( loadmat("checkpoints/expression.mat")["expression_center"] )[0] # load DNet, model(LNet and ENet) D_Net, model = load_model(args, device) if ( not os.path.isfile("temp/" + base_name + "_stablized.npy") or args.re_preprocess ): imgs = [] for idx in tqdm( range(len(frames_pil)), desc="[Step 3] Stabilize the expression In Video:", ): if args.one_shot: source_img = trans_image(frames_pil[0]).unsqueeze(0).to(device) semantic_source_numpy = semantic_npy[0:1] else: source_img = trans_image(frames_pil[idx]).unsqueeze(0).to(device) semantic_source_numpy = semantic_npy[idx : idx + 1] ratio = find_crop_norm_ratio(semantic_source_numpy, semantic_npy) coeff = ( transform_semantic(semantic_npy, idx, ratio).unsqueeze(0).to(device) ) # hacking the new expression coeff[:, :64, :] = expression[None, :64, None].to(device) with torch.no_grad(): output = D_Net(source_img, coeff) img_stablized = np.uint8( ( output["fake_image"] .squeeze(0) .permute(1, 2, 0) .cpu() .clamp_(-1, 1) .numpy() + 1 ) / 2.0 * 255 ) imgs.append(cv2.cvtColor(img_stablized, cv2.COLOR_RGB2BGR)) np.save("temp/" + base_name + "_stablized.npy", imgs) del D_Net else: print("[Step 3] Using saved stabilized video.") imgs = np.load("temp/" + base_name + "_stablized.npy") torch.cuda.empty_cache() if not args.audio.endswith(".wav"): command = "ffmpeg -loglevel error -y -i {} -strict -2 {}".format( args.audio, "temp/{}/temp.wav".format(args.tmp_dir) ) subprocess.call(command, shell=True) args.audio = "temp/{}/temp.wav".format(args.tmp_dir) wav = audio.load_wav(args.audio, 16000) mel = audio.melspectrogram(wav) if np.isnan(mel.reshape(-1)).sum() > 0: raise ValueError( "Mel contains nan! Using a TTS voice? Add a small epsilon noise to the wav file and try again" ) mel_step_size, mel_idx_multiplier, i, mel_chunks = 16, 80.0 / fps, 0, [] while True: start_idx = int(i * mel_idx_multiplier) if start_idx + mel_step_size > len(mel[0]): mel_chunks.append(mel[:, len(mel[0]) - mel_step_size :]) break mel_chunks.append(mel[:, start_idx : start_idx + mel_step_size]) i += 1 print("[Step 4] Load audio; Length of mel chunks: {}".format(len(mel_chunks))) imgs = imgs[: len(mel_chunks)] full_frames = full_frames[: len(mel_chunks)] lm = lm[: len(mel_chunks)] imgs_enhanced = [] for idx in tqdm(range(len(imgs)), desc="[Step 5] Reference Enhancement"): img = imgs[idx] pred, _, _ = self.enhancer.process( img, img, face_enhance=True, possion_blending=False ) imgs_enhanced.append(pred) gen = datagen( imgs_enhanced.copy(), mel_chunks, full_frames, args, (oy1, oy2, ox1, ox2) ) frame_h, frame_w = full_frames[0].shape[:-1] out = cv2.VideoWriter( "temp/{}/result.mp4".format(args.tmp_dir), cv2.VideoWriter_fourcc(*"mp4v"), fps, (frame_w, frame_h), ) if args.up_face != "original": instance = GANimationModel() instance.initialize() instance.setup() # kp_extractor = KeypointExtractor() for i, ( img_batch, mel_batch, frames, coords, img_original, f_frames, ) in enumerate( tqdm( gen, desc="[Step 6] Lip Synthesis:", total=int(np.ceil(float(len(mel_chunks)) / args.LNet_batch_size)), ) ): img_batch = torch.FloatTensor(np.transpose(img_batch, (0, 3, 1, 2))).to( device ) mel_batch = torch.FloatTensor(np.transpose(mel_batch, (0, 3, 1, 2))).to( device ) img_original = ( torch.FloatTensor(np.transpose(img_original, (0, 3, 1, 2))).to(device) / 255.0 ) # BGR -> RGB with torch.no_grad(): incomplete, reference = torch.split(img_batch, 3, dim=1) pred, low_res = model(mel_batch, img_batch, reference) pred = torch.clamp(pred, 0, 1) if args.up_face in ["sad", "angry", "surprise"]: tar_aus = exp_aus_dict[args.up_face] else: pass if args.up_face == "original": cur_gen_faces = img_original else: test_batch = { "src_img": torch.nn.functional.interpolate( (img_original * 2 - 1), size=(128, 128), mode="bilinear" ), "tar_aus": tar_aus.repeat(len(incomplete), 1), } instance.feed_batch(test_batch) instance.forward() cur_gen_faces = torch.nn.functional.interpolate( instance.fake_img / 2.0 + 0.5, size=(384, 384), mode="bilinear" ) if args.without_rl1 is not False: incomplete, reference = torch.split(img_batch, 3, dim=1) mask = torch.where( incomplete == 0, torch.ones_like(incomplete), torch.zeros_like(incomplete), ) pred = pred * mask + cur_gen_faces * (1 - mask) pred = pred.cpu().numpy().transpose(0, 2, 3, 1) * 255.0 torch.cuda.empty_cache() for p, f, xf, c in zip(pred, frames, f_frames, coords): y1, y2, x1, x2 = c p = cv2.resize(p.astype(np.uint8), (x2 - x1, y2 - y1)) ff = xf.copy() ff[y1:y2, x1:x2] = p # month region enhancement by GFPGAN cropped_faces, restored_faces, restored_img = self.restorer.enhance( ff, has_aligned=False, only_center_face=True, paste_back=True ) # 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, mm = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 0, 0, 0, 0, 0, 0] mouse_mask = np.zeros_like(restored_img) tmp_mask = self.enhancer.faceparser.process( restored_img[y1:y2, x1:x2], mm )[0] mouse_mask[y1:y2, x1:x2] = ( cv2.resize(tmp_mask, (x2 - x1, y2 - y1))[:, :, np.newaxis] / 255.0 ) height, width = ff.shape[:2] restored_img, ff, full_mask = [ cv2.resize(x, (512, 512)) for x in (restored_img, ff, np.float32(mouse_mask)) ] img = Laplacian_Pyramid_Blending_with_mask( restored_img, ff, full_mask[:, :, 0], 10 ) pp = np.uint8(cv2.resize(np.clip(img, 0, 255), (width, height))) pp, orig_faces, enhanced_faces = self.enhancer.process( pp, xf, bbox=c, face_enhance=False, possion_blending=True ) out.write(pp) out.release() output_file = "/tmp/output.mp4" command = "ffmpeg -loglevel error -y -i {} -i {} -strict -2 -q:v 1 {}".format( args.audio, "temp/{}/result.mp4".format(args.tmp_dir), output_file ) subprocess.call(command, shell=True) return Path(output_file)
Run a single prediction on the model
predict
python
OpenTalker/video-retalking
predict.py
https://github.com/OpenTalker/video-retalking/blob/master/predict.py
Apache-2.0
def image_transform(self, images, lm): """ param: images: -- PIL image lm: -- numpy array """ W,H = images.size if np.mean(lm) == -1: lm = (self.lm3d_std[:, :2]+1)/2. lm = np.concatenate( [lm[:, :1]*W, lm[:, 1:2]*H], 1 ) else: lm[:, -1] = H - 1 - lm[:, -1] trans_params, img, lm, _ = align_img(images, lm, self.lm3d_std) img = torch.tensor(np.array(img)/255., dtype=torch.float32).permute(2, 0, 1) trans_params = np.array([float(item) for item in np.hsplit(trans_params, 5)]) trans_params = torch.tensor(trans_params.astype(np.float32)) return img, trans_params
param: images: -- PIL image lm: -- numpy array
image_transform
python
OpenTalker/video-retalking
third_part/face3d/coeff_detector.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/coeff_detector.py
Apache-2.0
def __init__(self, opt): """Initialize the class; save the options in the class Parameters: opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions """ self.opt = opt # self.root = opt.dataroot self.current_epoch = 0
Initialize the class; save the options in the class Parameters: opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
__init__
python
OpenTalker/video-retalking
third_part/face3d/data/base_dataset.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/data/base_dataset.py
Apache-2.0
def default_flist_reader(flist): """ flist format: impath label\nimpath label\n ...(same to caffe's filelist) """ imlist = [] with open(flist, 'r') as rf: for line in rf.readlines(): impath = line.strip() imlist.append(impath) return imlist
flist format: impath label impath label ...(same to caffe's filelist)
default_flist_reader
python
OpenTalker/video-retalking
third_part/face3d/data/flist_dataset.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/data/flist_dataset.py
Apache-2.0
def __init__(self, opt): """Initialize this dataset class. Parameters: opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions """ BaseDataset.__init__(self, opt) self.lm3d_std = load_lm3d(opt.bfm_folder) msk_names = default_flist_reader(opt.flist) self.msk_paths = [os.path.join(opt.data_root, i) for i in msk_names] self.size = len(self.msk_paths) self.opt = opt self.name = 'train' if opt.isTrain else 'val' if '_' in opt.flist: self.name += '_' + opt.flist.split(os.sep)[-1].split('_')[0]
Initialize this dataset class. Parameters: opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
__init__
python
OpenTalker/video-retalking
third_part/face3d/data/flist_dataset.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/data/flist_dataset.py
Apache-2.0
def __getitem__(self, index): """Return a data point and its metadata information. Parameters: index (int) -- a random integer for data indexing Returns a dictionary that contains A, B, A_paths and B_paths img (tensor) -- an image in the input domain msk (tensor) -- its corresponding attention mask lm (tensor) -- its corresponding 3d landmarks im_paths (str) -- image paths aug_flag (bool) -- a flag used to tell whether its raw or augmented """ msk_path = self.msk_paths[index % self.size] # make sure index is within then range img_path = msk_path.replace('mask/', '') lm_path = '.'.join(msk_path.replace('mask', 'landmarks').split('.')[:-1]) + '.txt' raw_img = Image.open(img_path).convert('RGB') raw_msk = Image.open(msk_path).convert('RGB') raw_lm = np.loadtxt(lm_path).astype(np.float32) _, img, lm, msk = align_img(raw_img, raw_lm, self.lm3d_std, raw_msk) aug_flag = self.opt.use_aug and self.opt.isTrain if aug_flag: img, lm, msk = self._augmentation(img, lm, self.opt, msk) _, H = img.size M = estimate_norm(lm, H) transform = get_transform() img_tensor = transform(img) msk_tensor = transform(msk)[:1, ...] lm_tensor = parse_label(lm) M_tensor = parse_label(M) return {'imgs': img_tensor, 'lms': lm_tensor, 'msks': msk_tensor, 'M': M_tensor, 'im_paths': img_path, 'aug_flag': aug_flag, 'dataset': self.name}
Return a data point and its metadata information. Parameters: index (int) -- a random integer for data indexing Returns a dictionary that contains A, B, A_paths and B_paths img (tensor) -- an image in the input domain msk (tensor) -- its corresponding attention mask lm (tensor) -- its corresponding 3d landmarks im_paths (str) -- image paths aug_flag (bool) -- a flag used to tell whether its raw or augmented
__getitem__
python
OpenTalker/video-retalking
third_part/face3d/data/flist_dataset.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/data/flist_dataset.py
Apache-2.0
def modify_commandline_options(parser, is_train): """Add new dataset-specific options, and rewrite default values for existing options. Parameters: parser -- original option parser is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. Returns: the modified parser. """ parser.add_argument('--new_dataset_option', type=float, default=1.0, help='new dataset option') parser.set_defaults(max_dataset_size=10, new_dataset_option=2.0) # specify dataset-specific default values return parser
Add new dataset-specific options, and rewrite default values for existing options. Parameters: parser -- original option parser is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. Returns: the modified parser.
modify_commandline_options
python
OpenTalker/video-retalking
third_part/face3d/data/template_dataset.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/data/template_dataset.py
Apache-2.0
def __init__(self, opt): """Initialize this dataset class. Parameters: opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions A few things can be done here. - save the options (have been done in BaseDataset) - get image paths and meta information of the dataset. - define the image transformation. """ # save the option and dataset root BaseDataset.__init__(self, opt) # get the image paths of your dataset; self.image_paths = [] # You can call sorted(make_dataset(self.root, opt.max_dataset_size)) to get all the image paths under the directory self.root # define the default transform function. You can use <base_dataset.get_transform>; You can also define your custom transform function self.transform = get_transform(opt)
Initialize this dataset class. Parameters: opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions A few things can be done here. - save the options (have been done in BaseDataset) - get image paths and meta information of the dataset. - define the image transformation.
__init__
python
OpenTalker/video-retalking
third_part/face3d/data/template_dataset.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/data/template_dataset.py
Apache-2.0
def __getitem__(self, index): """Return a data point and its metadata information. Parameters: index -- a random integer for data indexing Returns: a dictionary of data with their names. It usually contains the data itself and its metadata information. Step 1: get a random image path: e.g., path = self.image_paths[index] Step 2: load your data from the disk: e.g., image = Image.open(path).convert('RGB'). Step 3: convert your data to a PyTorch tensor. You can use helpder functions such as self.transform. e.g., data = self.transform(image) Step 4: return a data point as a dictionary. """ path = 'temp' # needs to be a string data_A = None # needs to be a tensor data_B = None # needs to be a tensor return {'data_A': data_A, 'data_B': data_B, 'path': path}
Return a data point and its metadata information. Parameters: index -- a random integer for data indexing Returns: a dictionary of data with their names. It usually contains the data itself and its metadata information. Step 1: get a random image path: e.g., path = self.image_paths[index] Step 2: load your data from the disk: e.g., image = Image.open(path).convert('RGB'). Step 3: convert your data to a PyTorch tensor. You can use helpder functions such as self.transform. e.g., data = self.transform(image) Step 4: return a data point as a dictionary.
__getitem__
python
OpenTalker/video-retalking
third_part/face3d/data/template_dataset.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/data/template_dataset.py
Apache-2.0
def find_dataset_using_name(dataset_name): """Import the module "data/[dataset_name]_dataset.py". In the file, the class called DatasetNameDataset() will be instantiated. It has to be a subclass of BaseDataset, and it is case-insensitive. """ dataset_filename = "data." + dataset_name + "_dataset" datasetlib = importlib.import_module(dataset_filename) dataset = None target_dataset_name = dataset_name.replace('_', '') + 'dataset' for name, cls in datasetlib.__dict__.items(): if name.lower() == target_dataset_name.lower() \ and issubclass(cls, BaseDataset): dataset = cls if dataset is None: raise NotImplementedError("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name)) return dataset
Import the module "data/[dataset_name]_dataset.py". In the file, the class called DatasetNameDataset() will be instantiated. It has to be a subclass of BaseDataset, and it is case-insensitive.
find_dataset_using_name
python
OpenTalker/video-retalking
third_part/face3d/data/__init__.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/data/__init__.py
Apache-2.0
def create_dataset(opt, rank=0): """Create a dataset given the option. This function wraps the class CustomDatasetDataLoader. This is the main interface between this package and 'train.py'/'test.py' Example: >>> from data import create_dataset >>> dataset = create_dataset(opt) """ data_loader = CustomDatasetDataLoader(opt, rank=rank) dataset = data_loader.load_data() return dataset
Create a dataset given the option. This function wraps the class CustomDatasetDataLoader. This is the main interface between this package and 'train.py'/'test.py' Example: >>> from data import create_dataset >>> dataset = create_dataset(opt)
create_dataset
python
OpenTalker/video-retalking
third_part/face3d/data/__init__.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/data/__init__.py
Apache-2.0
def __init__(self, opt, rank=0): """Initialize this class Step 1: create a dataset instance given the name [dataset_mode] Step 2: create a multi-threaded data loader. """ self.opt = opt dataset_class = find_dataset_using_name(opt.dataset_mode) self.dataset = dataset_class(opt) self.sampler = None print("rank %d %s dataset [%s] was created" % (rank, self.dataset.name, type(self.dataset).__name__)) if opt.use_ddp and opt.isTrain: world_size = opt.world_size self.sampler = torch.utils.data.distributed.DistributedSampler( self.dataset, num_replicas=world_size, rank=rank, shuffle=not opt.serial_batches ) self.dataloader = torch.utils.data.DataLoader( self.dataset, sampler=self.sampler, num_workers=int(opt.num_threads / world_size), batch_size=int(opt.batch_size / world_size), drop_last=True) else: self.dataloader = torch.utils.data.DataLoader( self.dataset, batch_size=opt.batch_size, shuffle=(not opt.serial_batches) and opt.isTrain, num_workers=int(opt.num_threads), drop_last=True )
Initialize this class Step 1: create a dataset instance given the name [dataset_mode] Step 2: create a multi-threaded data loader.
__init__
python
OpenTalker/video-retalking
third_part/face3d/data/__init__.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/data/__init__.py
Apache-2.0
def __init__(self, opt): """Initialize the BaseModel class. Parameters: opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions When creating your custom class, you need to implement your own initialization. In this fucntion, you should first call <BaseModel.__init__(self, opt)> Then, you need to define four lists: -- self.loss_names (str list): specify the training losses that you want to plot and save. -- self.model_names (str list): specify the images that you want to display and save. -- self.visual_names (str list): define networks used in our training. -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example. """ self.opt = opt self.isTrain = opt.isTrain self.device = torch.device('cpu') self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir self.loss_names = [] self.model_names = [] self.visual_names = [] self.parallel_names = [] self.optimizers = [] self.image_paths = [] self.metric = 0 # used for learning rate policy 'plateau'
Initialize the BaseModel class. Parameters: opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions When creating your custom class, you need to implement your own initialization. In this fucntion, you should first call <BaseModel.__init__(self, opt)> Then, you need to define four lists: -- self.loss_names (str list): specify the training losses that you want to plot and save. -- self.model_names (str list): specify the images that you want to display and save. -- self.visual_names (str list): define networks used in our training. -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
__init__
python
OpenTalker/video-retalking
third_part/face3d/models/base_model.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/base_model.py
Apache-2.0
def setup(self, opt): """Load and print networks; create schedulers Parameters: opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions """ if self.isTrain: self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers] if not self.isTrain or opt.continue_train: load_suffix = opt.epoch self.load_networks(load_suffix) # self.print_networks(opt.verbose)
Load and print networks; create schedulers Parameters: opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
setup
python
OpenTalker/video-retalking
third_part/face3d/models/base_model.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/base_model.py
Apache-2.0
def update_learning_rate(self): """Update learning rates for all the networks; called at the end of every epoch""" for scheduler in self.schedulers: if self.opt.lr_policy == 'plateau': scheduler.step(self.metric) else: scheduler.step() lr = self.optimizers[0].param_groups[0]['lr'] print('learning rate = %.7f' % lr)
Update learning rates for all the networks; called at the end of every epoch
update_learning_rate
python
OpenTalker/video-retalking
third_part/face3d/models/base_model.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/base_model.py
Apache-2.0
def get_current_visuals(self): """Return visualization images. train.py will display these images with visdom, and save the images to a HTML""" visual_ret = OrderedDict() for name in self.visual_names: if isinstance(name, str): visual_ret[name] = getattr(self, name)[:, :3, ...] return visual_ret
Return visualization images. train.py will display these images with visdom, and save the images to a HTML
get_current_visuals
python
OpenTalker/video-retalking
third_part/face3d/models/base_model.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/base_model.py
Apache-2.0
def get_current_losses(self): """Return traning losses / errors. train.py will print out these errors on console, and save them to a file""" errors_ret = OrderedDict() for name in self.loss_names: if isinstance(name, str): errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number return errors_ret
Return traning losses / errors. train.py will print out these errors on console, and save them to a file
get_current_losses
python
OpenTalker/video-retalking
third_part/face3d/models/base_model.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/base_model.py
Apache-2.0
def save_networks(self, epoch): """Save all the networks to the disk. Parameters: epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name) """ if not os.path.isdir(self.save_dir): os.makedirs(self.save_dir) save_filename = 'epoch_%s.pth' % (epoch) save_path = os.path.join(self.save_dir, save_filename) save_dict = {} for name in self.model_names: if isinstance(name, str): net = getattr(self, name) if isinstance(net, torch.nn.DataParallel) or isinstance(net, torch.nn.parallel.DistributedDataParallel): net = net.module save_dict[name] = net.state_dict() for i, optim in enumerate(self.optimizers): save_dict['opt_%02d'%i] = optim.state_dict() for i, sched in enumerate(self.schedulers): save_dict['sched_%02d'%i] = sched.state_dict() torch.save(save_dict, save_path)
Save all the networks to the disk. Parameters: epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
save_networks
python
OpenTalker/video-retalking
third_part/face3d/models/base_model.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/base_model.py
Apache-2.0
def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0): """Fix InstanceNorm checkpoints incompatibility (prior to 0.4)""" key = keys[i] if i + 1 == len(keys): # at the end, pointing to a parameter/buffer if module.__class__.__name__.startswith('InstanceNorm') and \ (key == 'running_mean' or key == 'running_var'): if getattr(module, key) is None: state_dict.pop('.'.join(keys)) if module.__class__.__name__.startswith('InstanceNorm') and \ (key == 'num_batches_tracked'): state_dict.pop('.'.join(keys)) else: self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
Fix InstanceNorm checkpoints incompatibility (prior to 0.4)
__patch_instance_norm_state_dict
python
OpenTalker/video-retalking
third_part/face3d/models/base_model.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/base_model.py
Apache-2.0
def load_networks(self, epoch): """Load all the networks from the disk. Parameters: epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name) """ if self.opt.isTrain and self.opt.pretrained_name is not None: load_dir = os.path.join(self.opt.checkpoints_dir, self.opt.pretrained_name) else: load_dir = self.save_dir load_filename = 'epoch_%s.pth' % (epoch) load_path = os.path.join(load_dir, load_filename) state_dict = torch.load(load_path, map_location=self.device) print('loading the model from %s' % load_path) for name in self.model_names: if isinstance(name, str): net = getattr(self, name) if isinstance(net, torch.nn.DataParallel): net = net.module net.load_state_dict(state_dict[name]) if self.opt.phase != 'test': if self.opt.continue_train: print('loading the optim from %s' % load_path) for i, optim in enumerate(self.optimizers): optim.load_state_dict(state_dict['opt_%02d'%i]) try: print('loading the sched from %s' % load_path) for i, sched in enumerate(self.schedulers): sched.load_state_dict(state_dict['sched_%02d'%i]) except: print('Failed to load schedulers, set schedulers according to epoch count manually') for i, sched in enumerate(self.schedulers): sched.last_epoch = self.opt.epoch_count - 1
Load all the networks from the disk. Parameters: epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
load_networks
python
OpenTalker/video-retalking
third_part/face3d/models/base_model.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/base_model.py
Apache-2.0
def print_networks(self, verbose): """Print the total number of parameters in the network and (if verbose) network architecture Parameters: verbose (bool) -- if verbose: print the network architecture """ print('---------- Networks initialized -------------') for name in self.model_names: if isinstance(name, str): net = getattr(self, name) num_params = 0 for param in net.parameters(): num_params += param.numel() if verbose: print(net) print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6)) print('-----------------------------------------------')
Print the total number of parameters in the network and (if verbose) network architecture Parameters: verbose (bool) -- if verbose: print the network architecture
print_networks
python
OpenTalker/video-retalking
third_part/face3d/models/base_model.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/base_model.py
Apache-2.0
def set_requires_grad(self, nets, requires_grad=False): """Set requies_grad=Fasle for all the networks to avoid unnecessary computations Parameters: nets (network list) -- a list of networks requires_grad (bool) -- whether the networks require gradients or not """ if not isinstance(nets, list): nets = [nets] for net in nets: if net is not None: for param in net.parameters(): param.requires_grad = requires_grad
Set requies_grad=Fasle for all the networks to avoid unnecessary computations Parameters: nets (network list) -- a list of networks requires_grad (bool) -- whether the networks require gradients or not
set_requires_grad
python
OpenTalker/video-retalking
third_part/face3d/models/base_model.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/base_model.py
Apache-2.0
def compute_shape(self, id_coeff, exp_coeff): """ Return: face_shape -- torch.tensor, size (B, N, 3) Parameters: id_coeff -- torch.tensor, size (B, 80), identity coeffs exp_coeff -- torch.tensor, size (B, 64), expression coeffs """ batch_size = id_coeff.shape[0] id_part = torch.einsum('ij,aj->ai', self.id_base, id_coeff) exp_part = torch.einsum('ij,aj->ai', self.exp_base, exp_coeff) face_shape = id_part + exp_part + self.mean_shape.reshape([1, -1]) return face_shape.reshape([batch_size, -1, 3])
Return: face_shape -- torch.tensor, size (B, N, 3) Parameters: id_coeff -- torch.tensor, size (B, 80), identity coeffs exp_coeff -- torch.tensor, size (B, 64), expression coeffs
compute_shape
python
OpenTalker/video-retalking
third_part/face3d/models/bfm.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/bfm.py
Apache-2.0
def compute_texture(self, tex_coeff, normalize=True): """ Return: face_texture -- torch.tensor, size (B, N, 3), in RGB order, range (0, 1.) Parameters: tex_coeff -- torch.tensor, size (B, 80) """ batch_size = tex_coeff.shape[0] face_texture = torch.einsum('ij,aj->ai', self.tex_base, tex_coeff) + self.mean_tex if normalize: face_texture = face_texture / 255. return face_texture.reshape([batch_size, -1, 3])
Return: face_texture -- torch.tensor, size (B, N, 3), in RGB order, range (0, 1.) Parameters: tex_coeff -- torch.tensor, size (B, 80)
compute_texture
python
OpenTalker/video-retalking
third_part/face3d/models/bfm.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/bfm.py
Apache-2.0
def compute_norm(self, face_shape): """ Return: vertex_norm -- torch.tensor, size (B, N, 3) Parameters: face_shape -- torch.tensor, size (B, N, 3) """ v1 = face_shape[:, self.face_buf[:, 0]] v2 = face_shape[:, self.face_buf[:, 1]] v3 = face_shape[:, self.face_buf[:, 2]] e1 = v1 - v2 e2 = v2 - v3 face_norm = torch.cross(e1, e2, dim=-1) face_norm = F.normalize(face_norm, dim=-1, p=2) face_norm = torch.cat([face_norm, torch.zeros(face_norm.shape[0], 1, 3).to(self.device)], dim=1) vertex_norm = torch.sum(face_norm[:, self.point_buf], dim=2) vertex_norm = F.normalize(vertex_norm, dim=-1, p=2) return vertex_norm
Return: vertex_norm -- torch.tensor, size (B, N, 3) Parameters: face_shape -- torch.tensor, size (B, N, 3)
compute_norm
python
OpenTalker/video-retalking
third_part/face3d/models/bfm.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/bfm.py
Apache-2.0
def compute_color(self, face_texture, face_norm, gamma): """ Return: face_color -- torch.tensor, size (B, N, 3), range (0, 1.) Parameters: face_texture -- torch.tensor, size (B, N, 3), from texture model, range (0, 1.) face_norm -- torch.tensor, size (B, N, 3), rotated face normal gamma -- torch.tensor, size (B, 27), SH coeffs """ batch_size = gamma.shape[0] v_num = face_texture.shape[1] a, c = self.SH.a, self.SH.c gamma = gamma.reshape([batch_size, 3, 9]) gamma = gamma + self.init_lit gamma = gamma.permute(0, 2, 1) Y = torch.cat([ a[0] * c[0] * torch.ones_like(face_norm[..., :1]).to(self.device), -a[1] * c[1] * face_norm[..., 1:2], a[1] * c[1] * face_norm[..., 2:], -a[1] * c[1] * face_norm[..., :1], a[2] * c[2] * face_norm[..., :1] * face_norm[..., 1:2], -a[2] * c[2] * face_norm[..., 1:2] * face_norm[..., 2:], 0.5 * a[2] * c[2] / np.sqrt(3.) * (3 * face_norm[..., 2:] ** 2 - 1), -a[2] * c[2] * face_norm[..., :1] * face_norm[..., 2:], 0.5 * a[2] * c[2] * (face_norm[..., :1] ** 2 - face_norm[..., 1:2] ** 2) ], dim=-1) r = Y @ gamma[..., :1] g = Y @ gamma[..., 1:2] b = Y @ gamma[..., 2:] face_color = torch.cat([r, g, b], dim=-1) * face_texture return face_color
Return: face_color -- torch.tensor, size (B, N, 3), range (0, 1.) Parameters: face_texture -- torch.tensor, size (B, N, 3), from texture model, range (0, 1.) face_norm -- torch.tensor, size (B, N, 3), rotated face normal gamma -- torch.tensor, size (B, 27), SH coeffs
compute_color
python
OpenTalker/video-retalking
third_part/face3d/models/bfm.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/bfm.py
Apache-2.0
def compute_rotation(self, angles): """ Return: rot -- torch.tensor, size (B, 3, 3) pts @ trans_mat Parameters: angles -- torch.tensor, size (B, 3), radian """ batch_size = angles.shape[0] ones = torch.ones([batch_size, 1]).to(self.device) zeros = torch.zeros([batch_size, 1]).to(self.device) x, y, z = angles[:, :1], angles[:, 1:2], angles[:, 2:], rot_x = torch.cat([ ones, zeros, zeros, zeros, torch.cos(x), -torch.sin(x), zeros, torch.sin(x), torch.cos(x) ], dim=1).reshape([batch_size, 3, 3]) rot_y = torch.cat([ torch.cos(y), zeros, torch.sin(y), zeros, ones, zeros, -torch.sin(y), zeros, torch.cos(y) ], dim=1).reshape([batch_size, 3, 3]) rot_z = torch.cat([ torch.cos(z), -torch.sin(z), zeros, torch.sin(z), torch.cos(z), zeros, zeros, zeros, ones ], dim=1).reshape([batch_size, 3, 3]) rot = rot_z @ rot_y @ rot_x return rot.permute(0, 2, 1)
Return: rot -- torch.tensor, size (B, 3, 3) pts @ trans_mat Parameters: angles -- torch.tensor, size (B, 3), radian
compute_rotation
python
OpenTalker/video-retalking
third_part/face3d/models/bfm.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/bfm.py
Apache-2.0
def to_image(self, face_shape): """ Return: face_proj -- torch.tensor, size (B, N, 2), y direction is opposite to v direction Parameters: face_shape -- torch.tensor, size (B, N, 3) """ # to image_plane face_proj = face_shape @ self.persc_proj face_proj = face_proj[..., :2] / face_proj[..., 2:] return face_proj
Return: face_proj -- torch.tensor, size (B, N, 2), y direction is opposite to v direction Parameters: face_shape -- torch.tensor, size (B, N, 3)
to_image
python
OpenTalker/video-retalking
third_part/face3d/models/bfm.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/bfm.py
Apache-2.0
def split_coeff(self, coeffs): """ Return: coeffs_dict -- a dict of torch.tensors Parameters: coeffs -- torch.tensor, size (B, 256) """ id_coeffs = coeffs[:, :80] exp_coeffs = coeffs[:, 80: 144] tex_coeffs = coeffs[:, 144: 224] angles = coeffs[:, 224: 227] gammas = coeffs[:, 227: 254] translations = coeffs[:, 254:] return { 'id': id_coeffs, 'exp': exp_coeffs, 'tex': tex_coeffs, 'angle': angles, 'gamma': gammas, 'trans': translations }
Return: coeffs_dict -- a dict of torch.tensors Parameters: coeffs -- torch.tensor, size (B, 256)
split_coeff
python
OpenTalker/video-retalking
third_part/face3d/models/bfm.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/bfm.py
Apache-2.0
def compute_for_render(self, coeffs): """ Return: face_vertex -- torch.tensor, size (B, N, 3), in camera coordinate face_color -- torch.tensor, size (B, N, 3), in RGB order landmark -- torch.tensor, size (B, 68, 2), y direction is opposite to v direction Parameters: coeffs -- torch.tensor, size (B, 257) """ coef_dict = self.split_coeff(coeffs) face_shape = self.compute_shape(coef_dict['id'], coef_dict['exp']) rotation = self.compute_rotation(coef_dict['angle']) face_shape_transformed = self.transform(face_shape, rotation, coef_dict['trans']) face_vertex = self.to_camera(face_shape_transformed) face_proj = self.to_image(face_vertex) landmark = self.get_landmarks(face_proj) face_texture = self.compute_texture(coef_dict['tex']) face_norm = self.compute_norm(face_shape) face_norm_roted = face_norm @ rotation face_color = self.compute_color(face_texture, face_norm_roted, coef_dict['gamma']) return face_vertex, face_texture, face_color, landmark
Return: face_vertex -- torch.tensor, size (B, N, 3), in camera coordinate face_color -- torch.tensor, size (B, N, 3), in RGB order landmark -- torch.tensor, size (B, 68, 2), y direction is opposite to v direction Parameters: coeffs -- torch.tensor, size (B, 257)
compute_for_render
python
OpenTalker/video-retalking
third_part/face3d/models/bfm.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/bfm.py
Apache-2.0
def modify_commandline_options(parser, is_train=True): """ Configures options specific for CUT model """ # net structure and parameters parser.add_argument('--net_recon', type=str, default='resnet50', choices=['resnet18', 'resnet34', 'resnet50'], help='network structure') parser.add_argument('--init_path', type=str, default='checkpoints/init_model/resnet50-0676ba61.pth') parser.add_argument('--use_last_fc', type=util.str2bool, nargs='?', const=True, default=False, help='zero initialize the last fc') parser.add_argument('--bfm_folder', type=str, default='BFM') parser.add_argument('--bfm_model', type=str, default='BFM_model_front.mat', help='bfm model') # renderer parameters parser.add_argument('--focal', type=float, default=1015.) parser.add_argument('--center', type=float, default=112.) parser.add_argument('--camera_d', type=float, default=10.) parser.add_argument('--z_near', type=float, default=5.) parser.add_argument('--z_far', type=float, default=15.) if is_train: # training parameters parser.add_argument('--net_recog', type=str, default='r50', choices=['r18', 'r43', 'r50'], help='face recog network structure') parser.add_argument('--net_recog_path', type=str, default='checkpoints/recog_model/ms1mv3_arcface_r50_fp16/backbone.pth') parser.add_argument('--use_crop_face', type=util.str2bool, nargs='?', const=True, default=False, help='use crop mask for photo loss') parser.add_argument('--use_predef_M', type=util.str2bool, nargs='?', const=True, default=False, help='use predefined M for predicted face') # augmentation parameters parser.add_argument('--shift_pixs', type=float, default=10., help='shift pixels') parser.add_argument('--scale_delta', type=float, default=0.1, help='delta scale factor') parser.add_argument('--rot_angle', type=float, default=10., help='rot angles, degree') # loss weights parser.add_argument('--w_feat', type=float, default=0.2, help='weight for feat loss') parser.add_argument('--w_color', type=float, default=1.92, help='weight for loss loss') parser.add_argument('--w_reg', type=float, default=3.0e-4, help='weight for reg loss') parser.add_argument('--w_id', type=float, default=1.0, help='weight for id_reg loss') parser.add_argument('--w_exp', type=float, default=0.8, help='weight for exp_reg loss') parser.add_argument('--w_tex', type=float, default=1.7e-2, help='weight for tex_reg loss') parser.add_argument('--w_gamma', type=float, default=10.0, help='weight for gamma loss') parser.add_argument('--w_lm', type=float, default=1.6e-3, help='weight for lm loss') parser.add_argument('--w_reflc', type=float, default=5.0, help='weight for reflc loss') opt, _ = parser.parse_known_args() parser.set_defaults( focal=1015., center=112., camera_d=10., use_last_fc=False, z_near=5., z_far=15. ) if is_train: parser.set_defaults( use_crop_face=True, use_predef_M=False ) return parser
Configures options specific for CUT model
modify_commandline_options
python
OpenTalker/video-retalking
third_part/face3d/models/facerecon_model.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/facerecon_model.py
Apache-2.0
def __init__(self, opt): """Initialize this model class. Parameters: opt -- training/test options A few things can be done here. - (required) call the initialization function of BaseModel - define loss function, visualization images, model names, and optimizers """ BaseModel.__init__(self, opt) # call the initialization method of BaseModel self.visual_names = ['output_vis'] self.model_names = ['net_recon'] self.parallel_names = self.model_names + ['renderer'] self.net_recon = networks.define_net_recon( net_recon=opt.net_recon, use_last_fc=opt.use_last_fc, init_path=opt.init_path ) self.facemodel = ParametricFaceModel( bfm_folder=opt.bfm_folder, camera_distance=opt.camera_d, focal=opt.focal, center=opt.center, is_train=self.isTrain, default_name=opt.bfm_model ) fov = 2 * np.arctan(opt.center / opt.focal) * 180 / np.pi self.renderer = MeshRenderer( rasterize_fov=fov, znear=opt.z_near, zfar=opt.z_far, rasterize_size=int(2 * opt.center) ) if self.isTrain: self.loss_names = ['all', 'feat', 'color', 'lm', 'reg', 'gamma', 'reflc'] self.net_recog = networks.define_net_recog( net_recog=opt.net_recog, pretrained_path=opt.net_recog_path ) # loss func name: (compute_%s_loss) % loss_name self.compute_feat_loss = perceptual_loss self.comupte_color_loss = photo_loss self.compute_lm_loss = landmark_loss self.compute_reg_loss = reg_loss self.compute_reflc_loss = reflectance_loss self.optimizer = torch.optim.Adam(self.net_recon.parameters(), lr=opt.lr) self.optimizers = [self.optimizer] self.parallel_names += ['net_recog'] # Our program will automatically call <model.setup> to define schedulers, load networks, and print networks
Initialize this model class. Parameters: opt -- training/test options A few things can be done here. - (required) call the initialization function of BaseModel - define loss function, visualization images, model names, and optimizers
__init__
python
OpenTalker/video-retalking
third_part/face3d/models/facerecon_model.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/facerecon_model.py
Apache-2.0
def set_input(self, input): """Unpack input data from the dataloader and perform necessary pre-processing steps. Parameters: input: a dictionary that contains the data itself and its metadata information. """ self.input_img = input['imgs'].to(self.device) self.atten_mask = input['msks'].to(self.device) if 'msks' in input else None self.gt_lm = input['lms'].to(self.device) if 'lms' in input else None self.trans_m = input['M'].to(self.device) if 'M' in input else None self.image_paths = input['im_paths'] if 'im_paths' in input else None
Unpack input data from the dataloader and perform necessary pre-processing steps. Parameters: input: a dictionary that contains the data itself and its metadata information.
set_input
python
OpenTalker/video-retalking
third_part/face3d/models/facerecon_model.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/facerecon_model.py
Apache-2.0
def compute_losses(self): """Calculate losses, gradients, and update network weights; called in every training iteration""" assert self.net_recog.training == False trans_m = self.trans_m if not self.opt.use_predef_M: trans_m = estimate_norm_torch(self.pred_lm, self.input_img.shape[-2]) pred_feat = self.net_recog(self.pred_face, trans_m) gt_feat = self.net_recog(self.input_img, self.trans_m) self.loss_feat = self.opt.w_feat * self.compute_feat_loss(pred_feat, gt_feat) face_mask = self.pred_mask if self.opt.use_crop_face: face_mask, _, _ = self.renderer(self.pred_vertex, self.facemodel.front_face_buf) face_mask = face_mask.detach() self.loss_color = self.opt.w_color * self.comupte_color_loss( self.pred_face, self.input_img, self.atten_mask * face_mask) loss_reg, loss_gamma = self.compute_reg_loss(self.pred_coeffs_dict, self.opt) self.loss_reg = self.opt.w_reg * loss_reg self.loss_gamma = self.opt.w_gamma * loss_gamma self.loss_lm = self.opt.w_lm * self.compute_lm_loss(self.pred_lm, self.gt_lm) self.loss_reflc = self.opt.w_reflc * self.compute_reflc_loss(self.pred_tex, self.facemodel.skin_mask) self.loss_all = self.loss_feat + self.loss_color + self.loss_reg + self.loss_gamma \ + self.loss_lm + self.loss_reflc
Calculate losses, gradients, and update network weights; called in every training iteration
compute_losses
python
OpenTalker/video-retalking
third_part/face3d/models/facerecon_model.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/facerecon_model.py
Apache-2.0
def forward(imageA, imageB, M): """ 1 - cosine distance Parameters: imageA --torch.tensor (B, 3, H, W), range (0, 1) , RGB order imageB --same as imageA """ imageA = self.preprocess(resize_n_crop(imageA, M, self.input_size)) imageB = self.preprocess(resize_n_crop(imageB, M, self.input_size)) # freeze bn self.recog_net.eval() id_featureA = F.normalize(self.recog_net(imageA), dim=-1, p=2) id_featureB = F.normalize(self.recog_net(imageB), dim=-1, p=2) cosine_d = torch.sum(id_featureA * id_featureB, dim=-1) # assert torch.sum((cosine_d > 1).float()) == 0 return torch.sum(1 - cosine_d) / cosine_d.shape[0]
1 - cosine distance Parameters: imageA --torch.tensor (B, 3, H, W), range (0, 1) , RGB order imageB --same as imageA
forward
python
OpenTalker/video-retalking
third_part/face3d/models/losses.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/losses.py
Apache-2.0
def photo_loss(imageA, imageB, mask, eps=1e-6): """ l2 norm (with sqrt, to ensure backward stabililty, use eps, otherwise Nan may occur) Parameters: imageA --torch.tensor (B, 3, H, W), range (0, 1), RGB order imageB --same as imageA """ loss = torch.sqrt(eps + torch.sum((imageA - imageB) ** 2, dim=1, keepdims=True)) * mask loss = torch.sum(loss) / torch.max(torch.sum(mask), torch.tensor(1.0).to(mask.device)) return loss
l2 norm (with sqrt, to ensure backward stabililty, use eps, otherwise Nan may occur) Parameters: imageA --torch.tensor (B, 3, H, W), range (0, 1), RGB order imageB --same as imageA
photo_loss
python
OpenTalker/video-retalking
third_part/face3d/models/losses.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/losses.py
Apache-2.0
def landmark_loss(predict_lm, gt_lm, weight=None): """ weighted mse loss Parameters: predict_lm --torch.tensor (B, 68, 2) gt_lm --torch.tensor (B, 68, 2) weight --numpy.array (1, 68) """ if not weight: weight = np.ones([68]) weight[28:31] = 20 weight[-8:] = 20 weight = np.expand_dims(weight, 0) weight = torch.tensor(weight).to(predict_lm.device) loss = torch.sum((predict_lm - gt_lm)**2, dim=-1) * weight loss = torch.sum(loss) / (predict_lm.shape[0] * predict_lm.shape[1]) return loss
weighted mse loss Parameters: predict_lm --torch.tensor (B, 68, 2) gt_lm --torch.tensor (B, 68, 2) weight --numpy.array (1, 68)
landmark_loss
python
OpenTalker/video-retalking
third_part/face3d/models/losses.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/losses.py
Apache-2.0
def reg_loss(coeffs_dict, opt=None): """ l2 norm without the sqrt, from yu's implementation (mse) tf.nn.l2_loss https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss Parameters: coeffs_dict -- a dict of torch.tensors , keys: id, exp, tex, angle, gamma, trans """ # coefficient regularization to ensure plausible 3d faces if opt: w_id, w_exp, w_tex = opt.w_id, opt.w_exp, opt.w_tex else: w_id, w_exp, w_tex = 1, 1, 1, 1 creg_loss = w_id * torch.sum(coeffs_dict['id'] ** 2) + \ w_exp * torch.sum(coeffs_dict['exp'] ** 2) + \ w_tex * torch.sum(coeffs_dict['tex'] ** 2) creg_loss = creg_loss / coeffs_dict['id'].shape[0] # gamma regularization to ensure a nearly-monochromatic light gamma = coeffs_dict['gamma'].reshape([-1, 3, 9]) gamma_mean = torch.mean(gamma, dim=1, keepdims=True) gamma_loss = torch.mean((gamma - gamma_mean) ** 2) return creg_loss, gamma_loss
l2 norm without the sqrt, from yu's implementation (mse) tf.nn.l2_loss https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss Parameters: coeffs_dict -- a dict of torch.tensors , keys: id, exp, tex, angle, gamma, trans
reg_loss
python
OpenTalker/video-retalking
third_part/face3d/models/losses.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/losses.py
Apache-2.0
def reflectance_loss(texture, mask): """ minimize texture variance (mse), albedo regularization to ensure an uniform skin albedo Parameters: texture --torch.tensor, (B, N, 3) mask --torch.tensor, (N), 1 or 0 """ mask = mask.reshape([1, mask.shape[0], 1]) texture_mean = torch.sum(mask * texture, dim=1, keepdims=True) / torch.sum(mask) loss = torch.sum(((texture - texture_mean) * mask)**2) / (texture.shape[0] * torch.sum(mask)) return loss
minimize texture variance (mse), albedo regularization to ensure an uniform skin albedo Parameters: texture --torch.tensor, (B, N, 3) mask --torch.tensor, (N), 1 or 0
reflectance_loss
python
OpenTalker/video-retalking
third_part/face3d/models/losses.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/losses.py
Apache-2.0
def resnext50_32x4d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: r"""ResNeXt-50 32x4d model from `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ kwargs['groups'] = 32 kwargs['width_per_group'] = 4 return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
ResNeXt-50 32x4d model from `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
resnext50_32x4d
python
OpenTalker/video-retalking
third_part/face3d/models/networks.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/networks.py
Apache-2.0
def resnext101_32x8d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: r"""ResNeXt-101 32x8d model from `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ kwargs['groups'] = 32 kwargs['width_per_group'] = 8 return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
ResNeXt-101 32x8d model from `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
resnext101_32x8d
python
OpenTalker/video-retalking
third_part/face3d/models/networks.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/networks.py
Apache-2.0
def wide_resnet50_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: r"""Wide ResNet-50-2 model from `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_. The model is the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The number of channels in outer 1x1 convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 channels, and in Wide ResNet-50-2 has 2048-1024-2048. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ kwargs['width_per_group'] = 64 * 2 return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
Wide ResNet-50-2 model from `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_. The model is the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The number of channels in outer 1x1 convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 channels, and in Wide ResNet-50-2 has 2048-1024-2048. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
wide_resnet50_2
python
OpenTalker/video-retalking
third_part/face3d/models/networks.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/networks.py
Apache-2.0
def wide_resnet101_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: r"""Wide ResNet-101-2 model from `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_. The model is the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The number of channels in outer 1x1 convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 channels, and in Wide ResNet-50-2 has 2048-1024-2048. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ kwargs['width_per_group'] = 64 * 2 return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
Wide ResNet-101-2 model from `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_. The model is the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The number of channels in outer 1x1 convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 channels, and in Wide ResNet-50-2 has 2048-1024-2048. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
wide_resnet101_2
python
OpenTalker/video-retalking
third_part/face3d/models/networks.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/networks.py
Apache-2.0
def modify_commandline_options(parser, is_train=True): """Add new model-specific options and rewrite default values for existing options. Parameters: parser -- the option parser is_train -- if it is training phase or test phase. You can use this flag to add training-specific or test-specific options. Returns: the modified parser. """ parser.set_defaults(dataset_mode='aligned') # You can rewrite default values for this model. For example, this model usually uses aligned dataset as its dataset. if is_train: parser.add_argument('--lambda_regression', type=float, default=1.0, help='weight for the regression loss') # You can define new arguments for this model. return parser
Add new model-specific options and rewrite default values for existing options. Parameters: parser -- the option parser is_train -- if it is training phase or test phase. You can use this flag to add training-specific or test-specific options. Returns: the modified parser.
modify_commandline_options
python
OpenTalker/video-retalking
third_part/face3d/models/template_model.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/template_model.py
Apache-2.0
def __init__(self, opt): """Initialize this model class. Parameters: opt -- training/test options A few things can be done here. - (required) call the initialization function of BaseModel - define loss function, visualization images, model names, and optimizers """ BaseModel.__init__(self, opt) # call the initialization method of BaseModel # specify the training losses you want to print out. The program will call base_model.get_current_losses to plot the losses to the console and save them to the disk. self.loss_names = ['loss_G'] # specify the images you want to save and display. The program will call base_model.get_current_visuals to save and display these images. self.visual_names = ['data_A', 'data_B', 'output'] # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks to save and load networks. # you can use opt.isTrain to specify different behaviors for training and test. For example, some networks will not be used during test, and you don't need to load them. self.model_names = ['G'] # define networks; you can use opt.isTrain to specify different behaviors for training and test. self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, gpu_ids=self.gpu_ids) if self.isTrain: # only defined during training time # define your loss functions. You can use losses provided by torch.nn such as torch.nn.L1Loss. # We also provide a GANLoss class "networks.GANLoss". self.criterionGAN = networks.GANLoss().to(self.device) self.criterionLoss = torch.nn.L1Loss() # define and initialize optimizers. You can define one optimizer for each network. # If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example. self.optimizer = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999)) self.optimizers = [self.optimizer] # Our program will automatically call <model.setup> to define schedulers, load networks, and print networks
Initialize this model class. Parameters: opt -- training/test options A few things can be done here. - (required) call the initialization function of BaseModel - define loss function, visualization images, model names, and optimizers
__init__
python
OpenTalker/video-retalking
third_part/face3d/models/template_model.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/template_model.py
Apache-2.0
def set_input(self, input): """Unpack input data from the dataloader and perform necessary pre-processing steps. Parameters: input: a dictionary that contains the data itself and its metadata information. """ AtoB = self.opt.direction == 'AtoB' # use <direction> to swap data_A and data_B self.data_A = input['A' if AtoB else 'B'].to(self.device) # get image data A self.data_B = input['B' if AtoB else 'A'].to(self.device) # get image data B self.image_paths = input['A_paths' if AtoB else 'B_paths'] # get image paths
Unpack input data from the dataloader and perform necessary pre-processing steps. Parameters: input: a dictionary that contains the data itself and its metadata information.
set_input
python
OpenTalker/video-retalking
third_part/face3d/models/template_model.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/template_model.py
Apache-2.0
def backward(self): """Calculate losses, gradients, and update network weights; called in every training iteration""" # calculate the intermediate results if necessary; here self.output has been computed during function <forward> # calculate loss given the input and intermediate results self.loss_G = self.criterionLoss(self.output, self.data_B) * self.opt.lambda_regression self.loss_G.backward() # calculate gradients of network G w.r.t. loss_G
Calculate losses, gradients, and update network weights; called in every training iteration
backward
python
OpenTalker/video-retalking
third_part/face3d/models/template_model.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/template_model.py
Apache-2.0
def optimize_parameters(self): """Update network weights; it will be called in every training iteration.""" self.forward() # first call forward to calculate intermediate results self.optimizer.zero_grad() # clear network G's existing gradients self.backward() # calculate gradients for network G self.optimizer.step() # update gradients for network G
Update network weights; it will be called in every training iteration.
optimize_parameters
python
OpenTalker/video-retalking
third_part/face3d/models/template_model.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/template_model.py
Apache-2.0
def find_model_using_name(model_name): """Import the module "models/[model_name]_model.py". In the file, the class called DatasetNameModel() will be instantiated. It has to be a subclass of BaseModel, and it is case-insensitive. """ model_filename = "face3d.models." + model_name + "_model" modellib = importlib.import_module(model_filename) model = None target_model_name = model_name.replace('_', '') + 'model' for name, cls in modellib.__dict__.items(): if name.lower() == target_model_name.lower() \ and issubclass(cls, BaseModel): model = cls if model is None: print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name)) exit(0) return model
Import the module "models/[model_name]_model.py". In the file, the class called DatasetNameModel() will be instantiated. It has to be a subclass of BaseModel, and it is case-insensitive.
find_model_using_name
python
OpenTalker/video-retalking
third_part/face3d/models/__init__.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/__init__.py
Apache-2.0
def create_model(opt): """Create a model given the option. This function warps the class CustomDatasetDataLoader. This is the main interface between this package and 'train.py'/'test.py' Example: >>> from models import create_model >>> model = create_model(opt) """ model = find_model_using_name(opt.model) instance = model(opt) print("model [%s] was created" % type(instance).__name__) return instance
Create a model given the option. This function warps the class CustomDatasetDataLoader. This is the main interface between this package and 'train.py'/'test.py' Example: >>> from models import create_model >>> model = create_model(opt)
create_model
python
OpenTalker/video-retalking
third_part/face3d/models/__init__.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/__init__.py
Apache-2.0
def __init__(self, rank, local_rank, world_size, batch_size, resume, margin_softmax, num_classes, sample_rate=1.0, embedding_size=512, prefix="./"): """ rank: int Unique process(GPU) ID from 0 to world_size - 1. local_rank: int Unique process(GPU) ID within the server from 0 to 7. world_size: int Number of GPU. batch_size: int Batch size on current rank(GPU). resume: bool Select whether to restore the weight of softmax. margin_softmax: callable A function of margin softmax, eg: cosface, arcface. num_classes: int The number of class center storage in current rank(CPU/GPU), usually is total_classes // world_size, required. sample_rate: float The partial fc sampling rate, when the number of classes increases to more than 2 millions, Sampling can greatly speed up training, and reduce a lot of GPU memory, default is 1.0. embedding_size: int The feature dimension, default is 512. prefix: str Path for save checkpoint, default is './'. """ super(PartialFC, self).__init__() # self.num_classes: int = num_classes self.rank: int = rank self.local_rank: int = local_rank self.device: torch.device = torch.device("cuda:{}".format(self.local_rank)) self.world_size: int = world_size self.batch_size: int = batch_size self.margin_softmax: callable = margin_softmax self.sample_rate: float = sample_rate self.embedding_size: int = embedding_size self.prefix: str = prefix self.num_local: int = num_classes // world_size + int(rank < num_classes % world_size) self.class_start: int = num_classes // world_size * rank + min(rank, num_classes % world_size) self.num_sample: int = int(self.sample_rate * self.num_local) self.weight_name = os.path.join(self.prefix, "rank_{}_softmax_weight.pt".format(self.rank)) self.weight_mom_name = os.path.join(self.prefix, "rank_{}_softmax_weight_mom.pt".format(self.rank)) if resume: try: self.weight: torch.Tensor = torch.load(self.weight_name) self.weight_mom: torch.Tensor = torch.load(self.weight_mom_name) if self.weight.shape[0] != self.num_local or self.weight_mom.shape[0] != self.num_local: raise IndexError logging.info("softmax weight resume successfully!") logging.info("softmax weight mom resume successfully!") except (FileNotFoundError, KeyError, IndexError): self.weight = torch.normal(0, 0.01, (self.num_local, self.embedding_size), device=self.device) self.weight_mom: torch.Tensor = torch.zeros_like(self.weight) logging.info("softmax weight init!") logging.info("softmax weight mom init!") else: self.weight = torch.normal(0, 0.01, (self.num_local, self.embedding_size), device=self.device) self.weight_mom: torch.Tensor = torch.zeros_like(self.weight) logging.info("softmax weight init successfully!") logging.info("softmax weight mom init successfully!") self.stream: torch.cuda.Stream = torch.cuda.Stream(local_rank) self.index = None if int(self.sample_rate) == 1: self.update = lambda: 0 self.sub_weight = Parameter(self.weight) self.sub_weight_mom = self.weight_mom else: self.sub_weight = Parameter(torch.empty((0, 0)).cuda(local_rank))
rank: int Unique process(GPU) ID from 0 to world_size - 1. local_rank: int Unique process(GPU) ID within the server from 0 to 7. world_size: int Number of GPU. batch_size: int Batch size on current rank(GPU). resume: bool Select whether to restore the weight of softmax. margin_softmax: callable A function of margin softmax, eg: cosface, arcface. num_classes: int The number of class center storage in current rank(CPU/GPU), usually is total_classes // world_size, required. sample_rate: float The partial fc sampling rate, when the number of classes increases to more than 2 millions, Sampling can greatly speed up training, and reduce a lot of GPU memory, default is 1.0. embedding_size: int The feature dimension, default is 512. prefix: str Path for save checkpoint, default is './'.
__init__
python
OpenTalker/video-retalking
third_part/face3d/models/arcface_torch/partial_fc.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/arcface_torch/partial_fc.py
Apache-2.0
def sample(self, total_label): """ Sample all positive class centers in each rank, and random select neg class centers to filling a fixed `num_sample`. total_label: tensor Label after all gather, which cross all GPUs. """ index_positive = (self.class_start <= total_label) & (total_label < self.class_start + self.num_local) total_label[~index_positive] = -1 total_label[index_positive] -= self.class_start if int(self.sample_rate) != 1: positive = torch.unique(total_label[index_positive], sorted=True) if self.num_sample - positive.size(0) >= 0: perm = torch.rand(size=[self.num_local], device=self.device) perm[positive] = 2.0 index = torch.topk(perm, k=self.num_sample)[1] index = index.sort()[0] else: index = positive self.index = index total_label[index_positive] = torch.searchsorted(index, total_label[index_positive]) self.sub_weight = Parameter(self.weight[index]) self.sub_weight_mom = self.weight_mom[index]
Sample all positive class centers in each rank, and random select neg class centers to filling a fixed `num_sample`. total_label: tensor Label after all gather, which cross all GPUs.
sample
python
OpenTalker/video-retalking
third_part/face3d/models/arcface_torch/partial_fc.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/arcface_torch/partial_fc.py
Apache-2.0
def update(self): """ Set updated weight and weight_mom to memory bank. """ self.weight_mom[self.index] = self.sub_weight_mom self.weight[self.index] = self.sub_weight
Set updated weight and weight_mom to memory bank.
update
python
OpenTalker/video-retalking
third_part/face3d/models/arcface_torch/partial_fc.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/arcface_torch/partial_fc.py
Apache-2.0
def prepare(self, label, optimizer): """ get sampled class centers for cal softmax. label: tensor Label tensor on each rank. optimizer: opt Optimizer for partial fc, which need to get weight mom. """ with torch.cuda.stream(self.stream): total_label = torch.zeros( size=[self.batch_size * self.world_size], device=self.device, dtype=torch.long) dist.all_gather(list(total_label.chunk(self.world_size, dim=0)), label) self.sample(total_label) optimizer.state.pop(optimizer.param_groups[-1]['params'][0], None) optimizer.param_groups[-1]['params'][0] = self.sub_weight optimizer.state[self.sub_weight]['momentum_buffer'] = self.sub_weight_mom norm_weight = normalize(self.sub_weight) return total_label, norm_weight
get sampled class centers for cal softmax. label: tensor Label tensor on each rank. optimizer: opt Optimizer for partial fc, which need to get weight mom.
prepare
python
OpenTalker/video-retalking
third_part/face3d/models/arcface_torch/partial_fc.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/arcface_torch/partial_fc.py
Apache-2.0
def forward_backward(self, label, features, optimizer): """ Partial fc forward and backward with model parallel label: tensor Label tensor on each rank(GPU) features: tensor Features tensor on each rank(GPU) optimizer: optimizer Optimizer for partial fc Returns: -------- x_grad: tensor The gradient of features. loss_v: tensor Loss value for cross entropy. """ total_label, norm_weight = self.prepare(label, optimizer) total_features = torch.zeros( size=[self.batch_size * self.world_size, self.embedding_size], device=self.device) dist.all_gather(list(total_features.chunk(self.world_size, dim=0)), features.data) total_features.requires_grad = True logits = self.forward(total_features, norm_weight) logits = self.margin_softmax(logits, total_label) with torch.no_grad(): max_fc = torch.max(logits, dim=1, keepdim=True)[0] dist.all_reduce(max_fc, dist.ReduceOp.MAX) # calculate exp(logits) and all-reduce logits_exp = torch.exp(logits - max_fc) logits_sum_exp = logits_exp.sum(dim=1, keepdims=True) dist.all_reduce(logits_sum_exp, dist.ReduceOp.SUM) # calculate prob logits_exp.div_(logits_sum_exp) # get one-hot grad = logits_exp index = torch.where(total_label != -1)[0] one_hot = torch.zeros(size=[index.size()[0], grad.size()[1]], device=grad.device) one_hot.scatter_(1, total_label[index, None], 1) # calculate loss loss = torch.zeros(grad.size()[0], 1, device=grad.device) loss[index] = grad[index].gather(1, total_label[index, None]) dist.all_reduce(loss, dist.ReduceOp.SUM) loss_v = loss.clamp_min_(1e-30).log_().mean() * (-1) # calculate grad grad[index] -= one_hot grad.div_(self.batch_size * self.world_size) logits.backward(grad) if total_features.grad is not None: total_features.grad.detach_() x_grad: torch.Tensor = torch.zeros_like(features, requires_grad=True) # feature gradient all-reduce dist.reduce_scatter(x_grad, list(total_features.grad.chunk(self.world_size, dim=0))) x_grad = x_grad * self.world_size # backward backbone return x_grad, loss_v
Partial fc forward and backward with model parallel label: tensor Label tensor on each rank(GPU) features: tensor Features tensor on each rank(GPU) optimizer: optimizer Optimizer for partial fc Returns: -------- x_grad: tensor The gradient of features. loss_v: tensor Loss value for cross entropy.
forward_backward
python
OpenTalker/video-retalking
third_part/face3d/models/arcface_torch/partial_fc.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/arcface_torch/partial_fc.py
Apache-2.0
def scale(self, outputs): """ Multiplies ('scales') a tensor or list of tensors by the scale factor. Returns scaled outputs. If this instance of :class:`GradScaler` is not enabled, outputs are returned unmodified. Arguments: outputs (Tensor or iterable of Tensors): Outputs to scale. """ if not self._enabled: return outputs self.scale_clip() # Short-circuit for the common case. if isinstance(outputs, torch.Tensor): assert outputs.is_cuda if self._scale is None: self._lazy_init_scale_growth_tracker(outputs.device) assert self._scale is not None return outputs * self._scale.to(device=outputs.device, non_blocking=True) # Invoke the more complex machinery only if we're treating multiple outputs. stash: List[_MultiDeviceReplicator] = [] # holds a reference that can be overwritten by apply_scale def apply_scale(val): if isinstance(val, torch.Tensor): assert val.is_cuda if len(stash) == 0: if self._scale is None: self._lazy_init_scale_growth_tracker(val.device) assert self._scale is not None stash.append(_MultiDeviceReplicator(self._scale)) return val * stash[0].get(val.device) elif isinstance(val, Iterable): iterable = map(apply_scale, val) if isinstance(val, list) or isinstance(val, tuple): return type(val)(iterable) else: return iterable else: raise ValueError("outputs must be a Tensor or an iterable of Tensors") return apply_scale(outputs)
Multiplies ('scales') a tensor or list of tensors by the scale factor. Returns scaled outputs. If this instance of :class:`GradScaler` is not enabled, outputs are returned unmodified. Arguments: outputs (Tensor or iterable of Tensors): Outputs to scale.
scale
python
OpenTalker/video-retalking
third_part/face3d/models/arcface_torch/utils/utils_amp.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/arcface_torch/utils/utils_amp.py
Apache-2.0
def __init__(self, cmd_line=None): """Reset the class; indicates the class hasn't been initialized""" self.initialized = False self.cmd_line = None if cmd_line is not None: self.cmd_line = cmd_line.split()
Reset the class; indicates the class hasn't been initialized
__init__
python
OpenTalker/video-retalking
third_part/face3d/options/base_options.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/options/base_options.py
Apache-2.0
def initialize(self, parser): """Define the common options that are used in both training and test.""" # basic parameters parser.add_argument('--name', type=str, default='face_recon', help='name of the experiment. It decides where to store samples and models') parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here') parser.add_argument('--vis_batch_nums', type=float, default=1, help='batch nums of images for visulization') parser.add_argument('--eval_batch_nums', type=float, default=float('inf'), help='batch nums of images for evaluation') parser.add_argument('--use_ddp', type=util.str2bool, nargs='?', const=True, default=True, help='whether use distributed data parallel') parser.add_argument('--ddp_port', type=str, default='12355', help='ddp port') parser.add_argument('--display_per_batch', type=util.str2bool, nargs='?', const=True, default=True, help='whether use batch to show losses') parser.add_argument('--add_image', type=util.str2bool, nargs='?', const=True, default=True, help='whether add image to tensorboard') parser.add_argument('--world_size', type=int, default=1, help='batch nums of images for evaluation') # model parameters parser.add_argument('--model', type=str, default='facerecon', help='chooses which model to use.') # additional parameters parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information') parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}') self.initialized = True return parser
Define the common options that are used in both training and test.
initialize
python
OpenTalker/video-retalking
third_part/face3d/options/base_options.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/options/base_options.py
Apache-2.0
def gather_options(self): """Initialize our parser with basic options(only once). Add additional model-specific and dataset-specific options. These options are defined in the <modify_commandline_options> function in model and dataset classes. """ if not self.initialized: # check if it has been initialized parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser = self.initialize(parser) # get the basic options if self.cmd_line is None: opt, _ = parser.parse_known_args() else: opt, _ = parser.parse_known_args(self.cmd_line) # set cuda visible devices os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu_ids # modify model-related parser options model_name = opt.model model_option_setter = models.get_option_setter(model_name) parser = model_option_setter(parser, self.isTrain) if self.cmd_line is None: opt, _ = parser.parse_known_args() # parse again with new defaults else: opt, _ = parser.parse_known_args(self.cmd_line) # parse again with new defaults # modify dataset-related parser options if opt.dataset_mode: dataset_name = opt.dataset_mode dataset_option_setter = data.get_option_setter(dataset_name) parser = dataset_option_setter(parser, self.isTrain) # save and return the parser self.parser = parser if self.cmd_line is None: return parser.parse_args() else: return parser.parse_args(self.cmd_line)
Initialize our parser with basic options(only once). Add additional model-specific and dataset-specific options. These options are defined in the <modify_commandline_options> function in model and dataset classes.
gather_options
python
OpenTalker/video-retalking
third_part/face3d/options/base_options.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/options/base_options.py
Apache-2.0
def print_options(self, opt): """Print and save options It will print both current options and default values(if different). It will save options into a text file / [checkpoints_dir] / opt.txt """ message = '' message += '----------------- Options ---------------\n' for k, v in sorted(vars(opt).items()): comment = '' default = self.parser.get_default(k) if v != default: comment = '\t[default: %s]' % str(default) message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment) message += '----------------- End -------------------' print(message) # save to the disk expr_dir = os.path.join(opt.checkpoints_dir, opt.name) util.mkdirs(expr_dir) file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase)) try: with open(file_name, 'wt') as opt_file: opt_file.write(message) opt_file.write('\n') except PermissionError as error: print("permission error {}".format(error)) pass
Print and save options It will print both current options and default values(if different). It will save options into a text file / [checkpoints_dir] / opt.txt
print_options
python
OpenTalker/video-retalking
third_part/face3d/options/base_options.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/options/base_options.py
Apache-2.0
def parse(self): """Parse our options, create checkpoints directory suffix, and set up gpu device.""" opt = self.gather_options() opt.isTrain = self.isTrain # train or test # process opt.suffix if opt.suffix: suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else '' opt.name = opt.name + suffix # set gpu ids str_ids = opt.gpu_ids.split(',') gpu_ids = [] for str_id in str_ids: id = int(str_id) if id >= 0: gpu_ids.append(id) opt.world_size = len(gpu_ids) # if len(opt.gpu_ids) > 0: # torch.cuda.set_device(gpu_ids[0]) if opt.world_size == 1: opt.use_ddp = False if opt.phase != 'test': # set continue_train automatically if opt.pretrained_name is None: model_dir = os.path.join(opt.checkpoints_dir, opt.name) else: model_dir = os.path.join(opt.checkpoints_dir, opt.pretrained_name) if os.path.isdir(model_dir): model_pths = [i for i in os.listdir(model_dir) if i.endswith('pth')] if os.path.isdir(model_dir) and len(model_pths) != 0: opt.continue_train= True # update the latest epoch count if opt.continue_train: if opt.epoch == 'latest': epoch_counts = [int(i.split('.')[0].split('_')[-1]) for i in model_pths if 'latest' not in i] if len(epoch_counts) != 0: opt.epoch_count = max(epoch_counts) + 1 else: opt.epoch_count = int(opt.epoch) + 1 self.print_options(opt) self.opt = opt return self.opt
Parse our options, create checkpoints directory suffix, and set up gpu device.
parse
python
OpenTalker/video-retalking
third_part/face3d/options/base_options.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/options/base_options.py
Apache-2.0
def __init__(self, web_dir, title, refresh=0): """Initialize the HTML classes Parameters: web_dir (str) -- a directory that stores the webpage. HTML file will be created at <web_dir>/index.html; images will be saved at <web_dir/images/ title (str) -- the webpage name refresh (int) -- how often the website refresh itself; if 0; no refreshing """ self.title = title self.web_dir = web_dir self.img_dir = os.path.join(self.web_dir, 'images') if not os.path.exists(self.web_dir): os.makedirs(self.web_dir) if not os.path.exists(self.img_dir): os.makedirs(self.img_dir) self.doc = dominate.document(title=title) if refresh > 0: with self.doc.head: meta(http_equiv="refresh", content=str(refresh))
Initialize the HTML classes Parameters: web_dir (str) -- a directory that stores the webpage. HTML file will be created at <web_dir>/index.html; images will be saved at <web_dir/images/ title (str) -- the webpage name refresh (int) -- how often the website refresh itself; if 0; no refreshing
__init__
python
OpenTalker/video-retalking
third_part/face3d/util/html.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/html.py
Apache-2.0
def add_images(self, ims, txts, links, width=400): """add images to the HTML file Parameters: ims (str list) -- a list of image paths txts (str list) -- a list of image names shown on the website links (str list) -- a list of hyperref links; when you click an image, it will redirect you to a new page """ self.t = table(border=1, style="table-layout: fixed;") # Insert a table self.doc.add(self.t) with self.t: with tr(): for im, txt, link in zip(ims, txts, links): with td(style="word-wrap: break-word;", halign="center", valign="top"): with p(): with a(href=os.path.join('images', link)): img(style="width:%dpx" % width, src=os.path.join('images', im)) br() p(txt)
add images to the HTML file Parameters: ims (str list) -- a list of image paths txts (str list) -- a list of image names shown on the website links (str list) -- a list of hyperref links; when you click an image, it will redirect you to a new page
add_images
python
OpenTalker/video-retalking
third_part/face3d/util/html.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/html.py
Apache-2.0
def save(self): """save the current content to the HTML file""" html_file = '%s/index.html' % self.web_dir f = open(html_file, 'wt') f.write(self.doc.render()) f.close()
save the current content to the HTML file
save
python
OpenTalker/video-retalking
third_part/face3d/util/html.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/html.py
Apache-2.0
def forward(self, vertex, tri, feat=None): """ Return: mask -- torch.tensor, size (B, 1, H, W) depth -- torch.tensor, size (B, 1, H, W) features(optional) -- torch.tensor, size (B, C, H, W) if feat is not None Parameters: vertex -- torch.tensor, size (B, N, 3) tri -- torch.tensor, size (B, M, 3) or (M, 3), triangles feat(optional) -- torch.tensor, size (B, C), features """ device = vertex.device rsize = int(self.rasterize_size) ndc_proj = self.ndc_proj.to(device) # trans to homogeneous coordinates of 3d vertices, the direction of y is the same as v if vertex.shape[-1] == 3: vertex = torch.cat([vertex, torch.ones([*vertex.shape[:2], 1]).to(device)], dim=-1) vertex[..., 1] = -vertex[..., 1] vertex_ndc = vertex @ ndc_proj.t() if self.glctx is None: self.glctx = dr.RasterizeGLContext(device=device) print("create glctx on device cuda:%d"%device.index) ranges = None if isinstance(tri, List) or len(tri.shape) == 3: vum = vertex_ndc.shape[1] fnum = torch.tensor([f.shape[0] for f in tri]).unsqueeze(1).to(device) fstartidx = torch.cumsum(fnum, dim=0) - fnum ranges = torch.cat([fstartidx, fnum], axis=1).type(torch.int32).cpu() for i in range(tri.shape[0]): tri[i] = tri[i] + i*vum vertex_ndc = torch.cat(vertex_ndc, dim=0) tri = torch.cat(tri, dim=0) # for range_mode vetex: [B*N, 4], tri: [B*M, 3], for instance_mode vetex: [B, N, 4], tri: [M, 3] tri = tri.type(torch.int32).contiguous() rast_out, _ = dr.rasterize(self.glctx, vertex_ndc.contiguous(), tri, resolution=[rsize, rsize], ranges=ranges) depth, _ = dr.interpolate(vertex.reshape([-1,4])[...,2].unsqueeze(1).contiguous(), rast_out, tri) depth = depth.permute(0, 3, 1, 2) mask = (rast_out[..., 3] > 0).float().unsqueeze(1) depth = mask * depth image = None if feat is not None: image, _ = dr.interpolate(feat, rast_out, tri) image = image.permute(0, 3, 1, 2) image = mask * image return mask, depth, image
Return: mask -- torch.tensor, size (B, 1, H, W) depth -- torch.tensor, size (B, 1, H, W) features(optional) -- torch.tensor, size (B, C, H, W) if feat is not None Parameters: vertex -- torch.tensor, size (B, N, 3) tri -- torch.tensor, size (B, M, 3) or (M, 3), triangles feat(optional) -- torch.tensor, size (B, C), features
forward
python
OpenTalker/video-retalking
third_part/face3d/util/nvdiffrast.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/nvdiffrast.py
Apache-2.0
def align_img(img, lm, lm3D, mask=None, target_size=224., rescale_factor=102.): """ Return: transparams --numpy.array (raw_W, raw_H, scale, tx, ty) img_new --PIL.Image (target_size, target_size, 3) lm_new --numpy.array (68, 2), y direction is opposite to v direction mask_new --PIL.Image (target_size, target_size) Parameters: img --PIL.Image (raw_H, raw_W, 3) lm --numpy.array (68, 2), y direction is opposite to v direction lm3D --numpy.array (5, 3) mask --PIL.Image (raw_H, raw_W, 3) """ w0, h0 = img.size if lm.shape[0] != 5: lm5p = extract_5p(lm) else: lm5p = lm # calculate translation and scale factors using 5 facial landmarks and standard landmarks of a 3D face t, s = POS(lm5p.transpose(), lm3D.transpose()) s = rescale_factor/s # processing the image img_new, lm_new, mask_new = resize_n_crop_img(img, lm, t, s, target_size=target_size, mask=mask) trans_params = np.array([w0, h0, s, t[0], t[1]]) return trans_params, img_new, lm_new, mask_new
Return: transparams --numpy.array (raw_W, raw_H, scale, tx, ty) img_new --PIL.Image (target_size, target_size, 3) lm_new --numpy.array (68, 2), y direction is opposite to v direction mask_new --PIL.Image (target_size, target_size) Parameters: img --PIL.Image (raw_H, raw_W, 3) lm --numpy.array (68, 2), y direction is opposite to v direction lm3D --numpy.array (5, 3) mask --PIL.Image (raw_H, raw_W, 3)
align_img
python
OpenTalker/video-retalking
third_part/face3d/util/preprocess.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/preprocess.py
Apache-2.0
def estimate_norm(lm_68p, H): # from https://github.com/deepinsight/insightface/blob/c61d3cd208a603dfa4a338bd743b320ce3e94730/recognition/common/face_align.py#L68 """ Return: trans_m --numpy.array (2, 3) Parameters: lm --numpy.array (68, 2), y direction is opposite to v direction H --int/float , image height """ lm = extract_5p(lm_68p) lm[:, -1] = H - 1 - lm[:, -1] tform = trans.SimilarityTransform() src = np.array( [[38.2946, 51.6963], [73.5318, 51.5014], [56.0252, 71.7366], [41.5493, 92.3655], [70.7299, 92.2041]], dtype=np.float32) tform.estimate(lm, src) M = tform.params if np.linalg.det(M) == 0: M = np.eye(3) return M[0:2, :]
Return: trans_m --numpy.array (2, 3) Parameters: lm --numpy.array (68, 2), y direction is opposite to v direction H --int/float , image height
estimate_norm
python
OpenTalker/video-retalking
third_part/face3d/util/preprocess.py
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/preprocess.py
Apache-2.0