index
int64
0
731k
package
stringlengths
2
98
name
stringlengths
1
76
docstring
stringlengths
0
281k
code
stringlengths
4
1.07M
signature
stringlengths
2
42.8k
16,614
imodels.experimental.bartpy.sklearnmodel
predict
Predict the target corresponding to the provided covariate matrix If X is None, will predict based on training covariates Prediction is based on the mean of all samples Parameters ---------- X: pd.DataFrame covariates to predict from Returns ------- np.ndarray predictions for the X covariates
def predict(self, X: np.ndarray = None) -> np.ndarray: """ Predict the target corresponding to the provided covariate matrix If X is None, will predict based on training covariates Prediction is based on the mean of all samples Parameters ---------- X: pd.DataFrame covariates to predict from Returns ------- np.ndarray predictions for the X covariates """ if X is None and self.store_in_sample_predictions: return self.data.y.unnormalize_y(np.mean(self._prediction_samples, axis=0)) elif X is None and not self.store_in_sample_predictions: raise ValueError( "In sample predictions only possible if model.store_in_sample_predictions is `True`. Either set the parameter to True or pass a non-None X parameter") else: predictions = self._out_of_sample_predict(X) if self.classification: return np.round(predictions, 0) return predictions
(self, X: Optional[numpy.ndarray] = None) -> numpy.ndarray
16,615
imodels.experimental.bartpy.sklearnmodel
predict_chain
null
def predict_chain(self, X, chain_number): predictions_transformed = self._chain_pred_arr(X, chain_number) predictions = self.data.y.unnormalize_y(np.mean(predictions_transformed, axis=0)) if self.classification: predictions = scipy.stats.norm.cdf(predictions) return predictions
(self, X, chain_number)
16,616
imodels.experimental.bartpy.sklearnmodel
predict_proba
null
def predict_proba(self, X: np.ndarray = None) -> np.ndarray: preds = self._out_of_sample_predict(X) return np.stack([preds, 1 - preds], axis=1)
(self, X: Optional[numpy.ndarray] = None) -> numpy.ndarray
16,617
imodels.experimental.bartpy.sklearnmodel
residuals
Array of error for each observation Parameters ---------- X: np.ndarray Covariate matrix y: np.ndarray Target array Returns ------- np.ndarray Error for each observation
def residuals(self, X=None, y=None) -> np.ndarray: """ Array of error for each observation Parameters ---------- X: np.ndarray Covariate matrix y: np.ndarray Target array Returns ------- np.ndarray Error for each observation """ if y is None: return self.model.data.y.unnormalized_y - self.predict(X) else: return y - self.predict(X)
(self, X=None, y=None) -> numpy.ndarray
16,618
imodels.experimental.bartpy.sklearnmodel
rmse
The total RMSE error of the model The sum of squared errors over all observations Parameters ---------- X: np.ndarray Covariate matrix y: np.ndarray Target array Returns ------- float The total summed L2 error for the model
def rmse(self, X, y) -> float: """ The total RMSE error of the model The sum of squared errors over all observations Parameters ---------- X: np.ndarray Covariate matrix y: np.ndarray Target array Returns ------- float The total summed L2 error for the model """ return np.sqrt(np.sum(self.l2_error(X, y)))
(self, X, y) -> float
16,622
imodels.experimental.bartpy.sklearnmodel
sub_forest
null
@staticmethod def sub_forest(trees, n_nodes): nodes = 0 for i, tree in enumerate(trees): nodes += len(tree.decision_nodes) if nodes >= n_nodes: return trees[0:i + 1]
(trees, n_nodes)
16,623
imodels.experimental.bartpy.sklearnmodel
update_complexity
null
def update_complexity(self, i): samples_complexity = [self._get_n_nodes(t) for t in self.trees] # complexity_sum = 0 arg_sort_complexity = np.argsort(samples_complexity) self._model_samples = self._model_samples[arg_sort_complexity[:i + 1]] return self
(self, i)
16,624
imodels.discretization.mdlp
BRLDiscretizer
null
class BRLDiscretizer: def __init__(self, feature_labels, verbose=False): self.feature_labels_original = feature_labels self.verbose = verbose def fit(self, X, y, undiscretized_features=[]): # check which features are numeric (to be discretized) self.discretized_features = [] X_str_disc = self._encode_strings(X) for fi in range(X_str_disc.shape[1]): # if not string, has values other than 0 and 1, and not specified as undiscretized if ( isinstance(X_str_disc[0][fi], numbers.Number) and (not set(np.unique(X_str_disc[:, fi])).issubset({0, 1})) and (len(self.feature_labels) == 0 or len(undiscretized_features) == 0 or self.feature_labels[fi] not in undiscretized_features ) ): self.discretized_features.append(self.feature_labels[fi]) if len(self.discretized_features) > 0: if self.verbose: print( "Warning: non-categorical data found. Trying to discretize. (Please convert categorical values to " "strings, and/or specify the argument 'undiscretized_features', to avoid this.)") X_str_and_num_disc = self.discretize(X_str_disc, y) self.discretized_X = X_str_and_num_disc else: self.discretizer = None return def discretize(self, X, y): '''Discretize the features specified in self.discretized_features ''' if self.verbose: print("Discretizing ", self.discretized_features, "...") D = pd.DataFrame(np.hstack((X, np.expand_dims(y, axis=1))), columns=list(self.feature_labels) + ["y"]) self.discretizer = MDLPDiscretizer(dataset=D, class_label="y", features=self.discretized_features) cat_data = pd.DataFrame(np.zeros_like(X)) for i in range(len(self.feature_labels)): label = self.feature_labels[i] if label in self.discretized_features: new_column = label + " : " + self.discretizer._data[label].astype(str) cat_data.iloc[:, i] = new_column else: cat_data.iloc[:, i] = D[label] return np.array(cat_data).tolist() def _encode_strings(self, X): # handle string data X_str_disc = pd.DataFrame([]) for fi in range(X.shape[1]): if issubclass(type(X[0][fi]), str): new_columns = pd.get_dummies(X[:, fi]) new_columns.columns = [self.feature_labels_original[fi] + '_' + value for value in new_columns.columns] new_columns_colon_format = new_columns.apply(lambda s: s.name + ' : ' + s.astype(str)) X_str_disc = pd.concat([X_str_disc, new_columns_colon_format], axis=1) else: X_str_disc = pd.concat([X_str_disc, pd.Series(X[:, fi], name=self.feature_labels_original[fi])], axis=1) self.feature_labels = list(X_str_disc.columns) return X_str_disc.values def transform(self, X, return_onehot=True): if type(X) in [pd.DataFrame, pd.Series]: X = X.values if self.discretizer is None: return pd.DataFrame(X, columns=self.feature_labels_original) self.data = pd.DataFrame(self._encode_strings(X), columns=self.feature_labels) self._apply_cutpoints() D = np.array(self.data) # prepend feature labels Dl = np.copy(D).astype(str).tolist() for i in range(len(Dl)): for j in range(len(Dl[0])): Dl[i][j] = self.feature_labels[j] + " : " + Dl[i][j] if not return_onehot: return Dl else: return self.get_onehot_df(Dl) @property def onehot_df(self): return self.get_onehot_df(self.discretized_X) def get_onehot_df(self, discretized_X): '''Create readable one-hot encoded DataFrame from discretized features ''' data = list(discretized_X[:]) X_colname_removed = data.copy() replace_str_entries_func = lambda s: s.split(' : ')[1] if type(s) is str else s for i in range(len(data)): X_colname_removed[i] = list(map(replace_str_entries_func, X_colname_removed[i])) X_df_categorical = pd.DataFrame(X_colname_removed, columns=self.feature_labels) X_df_onehot = pd.get_dummies(X_df_categorical) return X_df_onehot @property def data(self): return self.discretizer._data @data.setter def data(self, value): self.discretizer._data = value def _apply_cutpoints(self): return self.discretizer._apply_cutpoints()
(feature_labels, verbose=False)
16,625
imodels.discretization.mdlp
__init__
null
def __init__(self, feature_labels, verbose=False): self.feature_labels_original = feature_labels self.verbose = verbose
(self, feature_labels, verbose=False)
16,626
imodels.discretization.mdlp
_apply_cutpoints
null
def _apply_cutpoints(self): return self.discretizer._apply_cutpoints()
(self)
16,627
imodels.discretization.mdlp
_encode_strings
null
def _encode_strings(self, X): # handle string data X_str_disc = pd.DataFrame([]) for fi in range(X.shape[1]): if issubclass(type(X[0][fi]), str): new_columns = pd.get_dummies(X[:, fi]) new_columns.columns = [self.feature_labels_original[fi] + '_' + value for value in new_columns.columns] new_columns_colon_format = new_columns.apply(lambda s: s.name + ' : ' + s.astype(str)) X_str_disc = pd.concat([X_str_disc, new_columns_colon_format], axis=1) else: X_str_disc = pd.concat([X_str_disc, pd.Series(X[:, fi], name=self.feature_labels_original[fi])], axis=1) self.feature_labels = list(X_str_disc.columns) return X_str_disc.values
(self, X)
16,628
imodels.discretization.mdlp
discretize
Discretize the features specified in self.discretized_features
def discretize(self, X, y): '''Discretize the features specified in self.discretized_features ''' if self.verbose: print("Discretizing ", self.discretized_features, "...") D = pd.DataFrame(np.hstack((X, np.expand_dims(y, axis=1))), columns=list(self.feature_labels) + ["y"]) self.discretizer = MDLPDiscretizer(dataset=D, class_label="y", features=self.discretized_features) cat_data = pd.DataFrame(np.zeros_like(X)) for i in range(len(self.feature_labels)): label = self.feature_labels[i] if label in self.discretized_features: new_column = label + " : " + self.discretizer._data[label].astype(str) cat_data.iloc[:, i] = new_column else: cat_data.iloc[:, i] = D[label] return np.array(cat_data).tolist()
(self, X, y)
16,629
imodels.discretization.mdlp
fit
null
def fit(self, X, y, undiscretized_features=[]): # check which features are numeric (to be discretized) self.discretized_features = [] X_str_disc = self._encode_strings(X) for fi in range(X_str_disc.shape[1]): # if not string, has values other than 0 and 1, and not specified as undiscretized if ( isinstance(X_str_disc[0][fi], numbers.Number) and (not set(np.unique(X_str_disc[:, fi])).issubset({0, 1})) and (len(self.feature_labels) == 0 or len(undiscretized_features) == 0 or self.feature_labels[fi] not in undiscretized_features ) ): self.discretized_features.append(self.feature_labels[fi]) if len(self.discretized_features) > 0: if self.verbose: print( "Warning: non-categorical data found. Trying to discretize. (Please convert categorical values to " "strings, and/or specify the argument 'undiscretized_features', to avoid this.)") X_str_and_num_disc = self.discretize(X_str_disc, y) self.discretized_X = X_str_and_num_disc else: self.discretizer = None return
(self, X, y, undiscretized_features=[])
16,630
imodels.discretization.mdlp
get_onehot_df
Create readable one-hot encoded DataFrame from discretized features
def get_onehot_df(self, discretized_X): '''Create readable one-hot encoded DataFrame from discretized features ''' data = list(discretized_X[:]) X_colname_removed = data.copy() replace_str_entries_func = lambda s: s.split(' : ')[1] if type(s) is str else s for i in range(len(data)): X_colname_removed[i] = list(map(replace_str_entries_func, X_colname_removed[i])) X_df_categorical = pd.DataFrame(X_colname_removed, columns=self.feature_labels) X_df_onehot = pd.get_dummies(X_df_categorical) return X_df_onehot
(self, discretized_X)
16,631
imodels.discretization.mdlp
transform
null
def transform(self, X, return_onehot=True): if type(X) in [pd.DataFrame, pd.Series]: X = X.values if self.discretizer is None: return pd.DataFrame(X, columns=self.feature_labels_original) self.data = pd.DataFrame(self._encode_strings(X), columns=self.feature_labels) self._apply_cutpoints() D = np.array(self.data) # prepend feature labels Dl = np.copy(D).astype(str).tolist() for i in range(len(Dl)): for j in range(len(Dl[0])): Dl[i][j] = self.feature_labels[j] + " : " + Dl[i][j] if not return_onehot: return Dl else: return self.get_onehot_df(Dl)
(self, X, return_onehot=True)
16,632
sklearn.base
BaseEstimator
Base class for all estimators in scikit-learn. Inheriting from this class provides default implementations of: - setting and getting parameters used by `GridSearchCV` and friends; - textual and HTML representation displayed in terminals and IDEs; - estimator serialization; - parameters validation; - data validation; - feature names validation. Read more in the :ref:`User Guide <rolling_your_own_estimator>`. Notes ----- All estimators should specify all the parameters that can be set at the class level in their ``__init__`` as explicit keyword arguments (no ``*args`` or ``**kwargs``). Examples -------- >>> import numpy as np >>> from sklearn.base import BaseEstimator >>> class MyEstimator(BaseEstimator): ... def __init__(self, *, param=1): ... self.param = param ... def fit(self, X, y=None): ... self.is_fitted_ = True ... return self ... def predict(self, X): ... return np.full(shape=X.shape[0], fill_value=self.param) >>> estimator = MyEstimator(param=2) >>> estimator.get_params() {'param': 2} >>> X = np.array([[1, 2], [2, 3], [3, 4]]) >>> y = np.array([1, 0, 1]) >>> estimator.fit(X, y).predict(X) array([2, 2, 2]) >>> estimator.set_params(param=3).fit(X, y).predict(X) array([3, 3, 3])
class BaseEstimator(_HTMLDocumentationLinkMixin, _MetadataRequester): """Base class for all estimators in scikit-learn. Inheriting from this class provides default implementations of: - setting and getting parameters used by `GridSearchCV` and friends; - textual and HTML representation displayed in terminals and IDEs; - estimator serialization; - parameters validation; - data validation; - feature names validation. Read more in the :ref:`User Guide <rolling_your_own_estimator>`. Notes ----- All estimators should specify all the parameters that can be set at the class level in their ``__init__`` as explicit keyword arguments (no ``*args`` or ``**kwargs``). Examples -------- >>> import numpy as np >>> from sklearn.base import BaseEstimator >>> class MyEstimator(BaseEstimator): ... def __init__(self, *, param=1): ... self.param = param ... def fit(self, X, y=None): ... self.is_fitted_ = True ... return self ... def predict(self, X): ... return np.full(shape=X.shape[0], fill_value=self.param) >>> estimator = MyEstimator(param=2) >>> estimator.get_params() {'param': 2} >>> X = np.array([[1, 2], [2, 3], [3, 4]]) >>> y = np.array([1, 0, 1]) >>> estimator.fit(X, y).predict(X) array([2, 2, 2]) >>> estimator.set_params(param=3).fit(X, y).predict(X) array([3, 3, 3]) """ @classmethod def _get_param_names(cls): """Get parameter names for the estimator""" # fetch the constructor or the original constructor before # deprecation wrapping if any init = getattr(cls.__init__, "deprecated_original", cls.__init__) if init is object.__init__: # No explicit constructor to introspect return [] # introspect the constructor arguments to find the model parameters # to represent init_signature = inspect.signature(init) # Consider the constructor parameters excluding 'self' parameters = [ p for p in init_signature.parameters.values() if p.name != "self" and p.kind != p.VAR_KEYWORD ] for p in parameters: if p.kind == p.VAR_POSITIONAL: raise RuntimeError( "scikit-learn estimators should always " "specify their parameters in the signature" " of their __init__ (no varargs)." " %s with constructor %s doesn't " " follow this convention." % (cls, init_signature) ) # Extract and sort argument names excluding 'self' return sorted([p.name for p in parameters]) def get_params(self, deep=True): """ Get parameters for this estimator. Parameters ---------- deep : bool, default=True If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : dict Parameter names mapped to their values. """ out = dict() for key in self._get_param_names(): value = getattr(self, key) if deep and hasattr(value, "get_params") and not isinstance(value, type): deep_items = value.get_params().items() out.update((key + "__" + k, val) for k, val in deep_items) out[key] = value return out def set_params(self, **params): """Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as :class:`~sklearn.pipeline.Pipeline`). The latter have parameters of the form ``<component>__<parameter>`` so that it's possible to update each component of a nested object. Parameters ---------- **params : dict Estimator parameters. Returns ------- self : estimator instance Estimator instance. """ if not params: # Simple optimization to gain speed (inspect is slow) return self valid_params = self.get_params(deep=True) nested_params = defaultdict(dict) # grouped by prefix for key, value in params.items(): key, delim, sub_key = key.partition("__") if key not in valid_params: local_valid_params = self._get_param_names() raise ValueError( f"Invalid parameter {key!r} for estimator {self}. " f"Valid parameters are: {local_valid_params!r}." ) if delim: nested_params[key][sub_key] = value else: setattr(self, key, value) valid_params[key] = value for key, sub_params in nested_params.items(): valid_params[key].set_params(**sub_params) return self def __sklearn_clone__(self): return _clone_parametrized(self) def __repr__(self, N_CHAR_MAX=700): # N_CHAR_MAX is the (approximate) maximum number of non-blank # characters to render. We pass it as an optional parameter to ease # the tests. from .utils._pprint import _EstimatorPrettyPrinter N_MAX_ELEMENTS_TO_SHOW = 30 # number of elements to show in sequences # use ellipsis for sequences with a lot of elements pp = _EstimatorPrettyPrinter( compact=True, indent=1, indent_at_name=True, n_max_elements_to_show=N_MAX_ELEMENTS_TO_SHOW, ) repr_ = pp.pformat(self) # Use bruteforce ellipsis when there are a lot of non-blank characters n_nonblank = len("".join(repr_.split())) if n_nonblank > N_CHAR_MAX: lim = N_CHAR_MAX // 2 # apprx number of chars to keep on both ends regex = r"^(\s*\S){%d}" % lim # The regex '^(\s*\S){%d}' % n # matches from the start of the string until the nth non-blank # character: # - ^ matches the start of string # - (pattern){n} matches n repetitions of pattern # - \s*\S matches a non-blank char following zero or more blanks left_lim = re.match(regex, repr_).end() right_lim = re.match(regex, repr_[::-1]).end() if "\n" in repr_[left_lim:-right_lim]: # The left side and right side aren't on the same line. # To avoid weird cuts, e.g.: # categoric...ore', # we need to start the right side with an appropriate newline # character so that it renders properly as: # categoric... # handle_unknown='ignore', # so we add [^\n]*\n which matches until the next \n regex += r"[^\n]*\n" right_lim = re.match(regex, repr_[::-1]).end() ellipsis = "..." if left_lim + len(ellipsis) < len(repr_) - right_lim: # Only add ellipsis if it results in a shorter repr repr_ = repr_[:left_lim] + "..." + repr_[-right_lim:] return repr_ def __getstate__(self): if getattr(self, "__slots__", None): raise TypeError( "You cannot use `__slots__` in objects inheriting from " "`sklearn.base.BaseEstimator`." ) try: state = super().__getstate__() if state is None: # For Python 3.11+, empty instance (no `__slots__`, # and `__dict__`) will return a state equal to `None`. state = self.__dict__.copy() except AttributeError: # Python < 3.11 state = self.__dict__.copy() if type(self).__module__.startswith("sklearn."): return dict(state.items(), _sklearn_version=__version__) else: return state def __setstate__(self, state): if type(self).__module__.startswith("sklearn."): pickle_version = state.pop("_sklearn_version", "pre-0.18") if pickle_version != __version__: warnings.warn( InconsistentVersionWarning( estimator_name=self.__class__.__name__, current_sklearn_version=__version__, original_sklearn_version=pickle_version, ), ) try: super().__setstate__(state) except AttributeError: self.__dict__.update(state) def _more_tags(self): return _DEFAULT_TAGS def _get_tags(self): collected_tags = {} for base_class in reversed(inspect.getmro(self.__class__)): if hasattr(base_class, "_more_tags"): # need the if because mixins might not have _more_tags # but might do redundant work in estimators # (i.e. calling more tags on BaseEstimator multiple times) more_tags = base_class._more_tags(self) collected_tags.update(more_tags) return collected_tags def _check_n_features(self, X, reset): """Set the `n_features_in_` attribute, or check against it. Parameters ---------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) The input samples. reset : bool If True, the `n_features_in_` attribute is set to `X.shape[1]`. If False and the attribute exists, then check that it is equal to `X.shape[1]`. If False and the attribute does *not* exist, then the check is skipped. .. note:: It is recommended to call reset=True in `fit` and in the first call to `partial_fit`. All other methods that validate `X` should set `reset=False`. """ try: n_features = _num_features(X) except TypeError as e: if not reset and hasattr(self, "n_features_in_"): raise ValueError( "X does not contain any features, but " f"{self.__class__.__name__} is expecting " f"{self.n_features_in_} features" ) from e # If the number of features is not defined and reset=True, # then we skip this check return if reset: self.n_features_in_ = n_features return if not hasattr(self, "n_features_in_"): # Skip this check if the expected number of expected input features # was not recorded by calling fit first. This is typically the case # for stateless transformers. return if n_features != self.n_features_in_: raise ValueError( f"X has {n_features} features, but {self.__class__.__name__} " f"is expecting {self.n_features_in_} features as input." ) def _check_feature_names(self, X, *, reset): """Set or check the `feature_names_in_` attribute. .. versionadded:: 1.0 Parameters ---------- X : {ndarray, dataframe} of shape (n_samples, n_features) The input samples. reset : bool Whether to reset the `feature_names_in_` attribute. If False, the input will be checked for consistency with feature names of data provided when reset was last True. .. note:: It is recommended to call `reset=True` in `fit` and in the first call to `partial_fit`. All other methods that validate `X` should set `reset=False`. """ if reset: feature_names_in = _get_feature_names(X) if feature_names_in is not None: self.feature_names_in_ = feature_names_in elif hasattr(self, "feature_names_in_"): # Delete the attribute when the estimator is fitted on a new dataset # that has no feature names. delattr(self, "feature_names_in_") return fitted_feature_names = getattr(self, "feature_names_in_", None) X_feature_names = _get_feature_names(X) if fitted_feature_names is None and X_feature_names is None: # no feature names seen in fit and in X return if X_feature_names is not None and fitted_feature_names is None: warnings.warn( f"X has feature names, but {self.__class__.__name__} was fitted without" " feature names" ) return if X_feature_names is None and fitted_feature_names is not None: warnings.warn( "X does not have valid feature names, but" f" {self.__class__.__name__} was fitted with feature names" ) return # validate the feature names against the `feature_names_in_` attribute if len(fitted_feature_names) != len(X_feature_names) or np.any( fitted_feature_names != X_feature_names ): message = ( "The feature names should match those that were passed during fit.\n" ) fitted_feature_names_set = set(fitted_feature_names) X_feature_names_set = set(X_feature_names) unexpected_names = sorted(X_feature_names_set - fitted_feature_names_set) missing_names = sorted(fitted_feature_names_set - X_feature_names_set) def add_names(names): output = "" max_n_names = 5 for i, name in enumerate(names): if i >= max_n_names: output += "- ...\n" break output += f"- {name}\n" return output if unexpected_names: message += "Feature names unseen at fit time:\n" message += add_names(unexpected_names) if missing_names: message += "Feature names seen at fit time, yet now missing:\n" message += add_names(missing_names) if not missing_names and not unexpected_names: message += ( "Feature names must be in the same order as they were in fit.\n" ) raise ValueError(message) def _validate_data( self, X="no_validation", y="no_validation", reset=True, validate_separately=False, cast_to_ndarray=True, **check_params, ): """Validate input data and set or check the `n_features_in_` attribute. Parameters ---------- X : {array-like, sparse matrix, dataframe} of shape \ (n_samples, n_features), default='no validation' The input samples. If `'no_validation'`, no validation is performed on `X`. This is useful for meta-estimator which can delegate input validation to their underlying estimator(s). In that case `y` must be passed and the only accepted `check_params` are `multi_output` and `y_numeric`. y : array-like of shape (n_samples,), default='no_validation' The targets. - If `None`, `check_array` is called on `X`. If the estimator's requires_y tag is True, then an error will be raised. - If `'no_validation'`, `check_array` is called on `X` and the estimator's requires_y tag is ignored. This is a default placeholder and is never meant to be explicitly set. In that case `X` must be passed. - Otherwise, only `y` with `_check_y` or both `X` and `y` are checked with either `check_array` or `check_X_y` depending on `validate_separately`. reset : bool, default=True Whether to reset the `n_features_in_` attribute. If False, the input will be checked for consistency with data provided when reset was last True. .. note:: It is recommended to call reset=True in `fit` and in the first call to `partial_fit`. All other methods that validate `X` should set `reset=False`. validate_separately : False or tuple of dicts, default=False Only used if y is not None. If False, call validate_X_y(). Else, it must be a tuple of kwargs to be used for calling check_array() on X and y respectively. `estimator=self` is automatically added to these dicts to generate more informative error message in case of invalid input data. cast_to_ndarray : bool, default=True Cast `X` and `y` to ndarray with checks in `check_params`. If `False`, `X` and `y` are unchanged and only `feature_names_in_` and `n_features_in_` are checked. **check_params : kwargs Parameters passed to :func:`sklearn.utils.check_array` or :func:`sklearn.utils.check_X_y`. Ignored if validate_separately is not False. `estimator=self` is automatically added to these params to generate more informative error message in case of invalid input data. Returns ------- out : {ndarray, sparse matrix} or tuple of these The validated input. A tuple is returned if both `X` and `y` are validated. """ self._check_feature_names(X, reset=reset) if y is None and self._get_tags()["requires_y"]: raise ValueError( f"This {self.__class__.__name__} estimator " "requires y to be passed, but the target y is None." ) no_val_X = isinstance(X, str) and X == "no_validation" no_val_y = y is None or isinstance(y, str) and y == "no_validation" if no_val_X and no_val_y: raise ValueError("Validation should be done on X, y or both.") default_check_params = {"estimator": self} check_params = {**default_check_params, **check_params} if not cast_to_ndarray: if not no_val_X and no_val_y: out = X elif no_val_X and not no_val_y: out = y else: out = X, y elif not no_val_X and no_val_y: out = check_array(X, input_name="X", **check_params) elif no_val_X and not no_val_y: out = _check_y(y, **check_params) else: if validate_separately: # We need this because some estimators validate X and y # separately, and in general, separately calling check_array() # on X and y isn't equivalent to just calling check_X_y() # :( check_X_params, check_y_params = validate_separately if "estimator" not in check_X_params: check_X_params = {**default_check_params, **check_X_params} X = check_array(X, input_name="X", **check_X_params) if "estimator" not in check_y_params: check_y_params = {**default_check_params, **check_y_params} y = check_array(y, input_name="y", **check_y_params) else: X, y = check_X_y(X, y, **check_params) out = X, y if not no_val_X and check_params.get("ensure_2d", True): self._check_n_features(X, reset=reset) return out def _validate_params(self): """Validate types and values of constructor parameters The expected type and values must be defined in the `_parameter_constraints` class attribute, which is a dictionary `param_name: list of constraints`. See the docstring of `validate_parameter_constraints` for a description of the accepted constraints. """ validate_parameter_constraints( self._parameter_constraints, self.get_params(deep=False), caller_name=self.__class__.__name__, ) @property def _repr_html_(self): """HTML representation of estimator. This is redundant with the logic of `_repr_mimebundle_`. The latter should be favorted in the long term, `_repr_html_` is only implemented for consumers who do not interpret `_repr_mimbundle_`. """ if get_config()["display"] != "diagram": raise AttributeError( "_repr_html_ is only defined when the " "'display' configuration option is set to " "'diagram'" ) return self._repr_html_inner def _repr_html_inner(self): """This function is returned by the @property `_repr_html_` to make `hasattr(estimator, "_repr_html_") return `True` or `False` depending on `get_config()["display"]`. """ return estimator_html_repr(self) def _repr_mimebundle_(self, **kwargs): """Mime bundle used by jupyter kernels to display estimator""" output = {"text/plain": repr(self)} if get_config()["display"] == "diagram": output["text/html"] = estimator_html_repr(self) return output
()
16,650
imodels.discretization.discretizer
BasicDiscretizer
Discretize numeric data into bins. Provides a wrapper around KBinsDiscretizer from sklearn Params ------ n_bins : int or array-like of shape (len(dcols),), default=2 Number of bins to discretize each feature into. dcols : list of strings The names of the columns to be discretized; by default, discretize all float and int columns in X. encode : {'onehot', 'ordinal'}, default='onehot' Method used to encode the transformed result. onehot Encode the transformed result with one-hot encoding and return a dense array. ordinal Return the bin identifier encoded as an integer value. strategy : {'uniform', 'quantile', 'kmeans'}, default='quantile' Strategy used to define the widths of the bins. uniform All bins in each feature have identical widths. quantile All bins in each feature have the same number of points. kmeans Values in each bin have the same nearest center of a 1D k-means cluster. onehot_drop : {‘first’, ‘if_binary’} or a array-like of shape (len(dcols),), default='if_binary' Specifies a methodology to use to drop one of the categories per feature when encode = "onehot". None Retain all features (the default). ‘first’ Drop the first y_str in each feature. If only one y_str is present, the feature will be dropped entirely. ‘if_binary’ Drop the first y_str in each feature with two categories. Features with 1 or more than 2 categories are left intact. Attributes ---------- discretizer_ : object of class KBinsDiscretizer() Primary discretization method used to bin numeric data manual_discretizer_ : dictionary Provides bin_edges to feed into _quantile_discretization() and do quantile discretization manually for features where KBinsDiscretizer() failed. Ignored if strategy != 'quantile' or no errors in KBinsDiscretizer(). onehot_ : object of class OneHotEncoder() One hot encoding fit. Ignored if encode != 'onehot' Examples --------
class BasicDiscretizer(AbstractDiscretizer): """ Discretize numeric data into bins. Provides a wrapper around KBinsDiscretizer from sklearn Params ------ n_bins : int or array-like of shape (len(dcols),), default=2 Number of bins to discretize each feature into. dcols : list of strings The names of the columns to be discretized; by default, discretize all float and int columns in X. encode : {'onehot', 'ordinal'}, default='onehot' Method used to encode the transformed result. onehot Encode the transformed result with one-hot encoding and return a dense array. ordinal Return the bin identifier encoded as an integer value. strategy : {'uniform', 'quantile', 'kmeans'}, default='quantile' Strategy used to define the widths of the bins. uniform All bins in each feature have identical widths. quantile All bins in each feature have the same number of points. kmeans Values in each bin have the same nearest center of a 1D k-means cluster. onehot_drop : {‘first’, ‘if_binary’} or a array-like of shape (len(dcols),), default='if_binary' Specifies a methodology to use to drop one of the categories per feature when encode = "onehot". None Retain all features (the default). ‘first’ Drop the first y_str in each feature. If only one y_str is present, the feature will be dropped entirely. ‘if_binary’ Drop the first y_str in each feature with two categories. Features with 1 or more than 2 categories are left intact. Attributes ---------- discretizer_ : object of class KBinsDiscretizer() Primary discretization method used to bin numeric data manual_discretizer_ : dictionary Provides bin_edges to feed into _quantile_discretization() and do quantile discretization manually for features where KBinsDiscretizer() failed. Ignored if strategy != 'quantile' or no errors in KBinsDiscretizer(). onehot_ : object of class OneHotEncoder() One hot encoding fit. Ignored if encode != 'onehot' Examples -------- """ def __init__(self, n_bins=2, dcols=[], encode='onehot', strategy='quantile', onehot_drop='if_binary'): super().__init__(n_bins=n_bins, dcols=dcols, encode=encode, strategy=strategy, onehot_drop=onehot_drop) def fit(self, X, y=None): """ Fit the estimator. Parameters ---------- X : data frame of shape (n_samples, n_features) (Training) data to be discretized. y : Ignored. This parameter exists only for compatibility with :class:`~sklearn.pipeline.Pipeline` and fit_transform method Returns ------- self """ # initialization and error checking self._fit_preprocessing(X) # apply KBinsDiscretizer to the selected columns discretizer = KBinsDiscretizer(n_bins=self.n_bins, encode='ordinal', strategy=self.strategy) discretizer.fit(X[self.dcols_]) self.discretizer_ = discretizer if (self.encode == 'onehot') | (self.strategy == 'quantile'): discretized_df = discretizer.transform(X[self.dcols_]) discretized_df = pd.DataFrame(discretized_df, columns=self.dcols_, index=X.index).astype(int) # fix KBinsDiscretizer errors if any when strategy = "quantile" if self.strategy == "quantile": err_idx = np.where(discretized_df.nunique() != self.n_bins)[0] self.manual_discretizer_ = dict() for idx in err_idx: col = self.dcols_[idx] if X[col].nunique() > 1: q_values = np.linspace(0, 1, self.n_bins[idx] + 1) bin_edges = np.quantile(X[col], q_values) discretized_df[col] = self._discretize_to_bins(X[col], bin_edges, keep_pointwise_bins=True) self.manual_discretizer_[col] = bin_edges # fit onehot encoded X if specified if self.encode == "onehot": onehot = OneHotEncoder(drop=self.onehot_drop) # , sparse=False) onehot.fit(discretized_df.astype(str)) self.onehot_ = onehot return self def transform(self, X): """ Discretize the data. Parameters ---------- X : data frame of shape (n_samples, n_features) Data to be discretized. Returns ------- X_discretized : data frame Data with features in dcols transformed to the binned space. All other features remain unchanged. """ check_is_fitted(self) # transform using KBinsDiscretizer discretized_df = self.discretizer_.transform( X[self.dcols_]).astype(int) discretized_df = pd.DataFrame(discretized_df, columns=self.dcols_, index=X.index) # fix KBinsDiscretizer errors (if any) when strategy = "quantile" if self.strategy == "quantile": for col in self.manual_discretizer_.keys(): bin_edges = self.manual_discretizer_[col] discretized_df[col] = self._discretize_to_bins(X[col], bin_edges, keep_pointwise_bins=True) # return onehot encoded data if specified and # join discretized columns with rest of X X_discretized = self._transform_postprocessing(discretized_df, X) return X_discretized
(n_bins=2, dcols=[], encode='onehot', strategy='quantile', onehot_drop='if_binary')
16,652
imodels.discretization.discretizer
__init__
null
def __init__(self, n_bins=2, dcols=[], encode='onehot', strategy='quantile', onehot_drop='if_binary'): super().__init__(n_bins=n_bins, dcols=dcols, encode=encode, strategy=strategy, onehot_drop=onehot_drop)
(self, n_bins=2, dcols=[], encode='onehot', strategy='quantile', onehot_drop='if_binary')
16,658
imodels.discretization.discretizer
_discretize_to_bins
Discretize data into bins of the form [a, b) given bin edges/boundaries Parameters ---------- x : array-like of shape (n_samples,) Data vector to be discretized. bin_edges : array-like Values to serve as bin edges; should include min and max values for the range of x keep_pointwise_bins : boolean If True, treat duplicate bin_edges as a pointwise bin, i.e., [a, a]. If False, these bins are in effect ignored. Returns ------- xd: array of shape (n_samples,) where x has been transformed to the binned space
def _discretize_to_bins(self, x, bin_edges, keep_pointwise_bins=False): """ Discretize data into bins of the form [a, b) given bin edges/boundaries Parameters ---------- x : array-like of shape (n_samples,) Data vector to be discretized. bin_edges : array-like Values to serve as bin edges; should include min and max values for the range of x keep_pointwise_bins : boolean If True, treat duplicate bin_edges as a pointwise bin, i.e., [a, a]. If False, these bins are in effect ignored. Returns ------- xd: array of shape (n_samples,) where x has been transformed to the binned space """ # ignore min and max values in bin generation unique_edges = np.unique(bin_edges[1:-1]) if keep_pointwise_bins: # note: min and max values are used to define pointwise bins pointwise_bins = np.unique( bin_edges[pd.Series(bin_edges).duplicated()]) else: pointwise_bins = np.array([]) xd = np.zeros_like(x) i = 1 for idx, split in enumerate(unique_edges): if idx == (len(unique_edges) - 1): # uppermost bin if (idx == 0) & (split in pointwise_bins): # two bins total: (-inf, a], (a, inf) indicator = x > split else: indicator = x >= split # uppermost bin: [a, inf) else: if split in pointwise_bins: # create two bins: [a, a], (a, b) indicator = (x > split) & (x < unique_edges[idx + 1]) # if idx != 0: xd[x == split] = i i += 1 else: # create bin: [a, b) indicator = (x >= split) & (x < unique_edges[idx + 1]) xd[indicator] = i i += 1 return xd.astype(int)
(self, x, bin_edges, keep_pointwise_bins=False)
16,659
imodels.discretization.discretizer
_fit_preprocessing
Initial checks before fitting the estimator. Parameters ---------- X : data frame of shape (n_samples, n_features) (Training) data to be discretized. Returns ------- self
def _fit_preprocessing(self, X): """ Initial checks before fitting the estimator. Parameters ---------- X : data frame of shape (n_samples, n_features) (Training) data to be discretized. Returns ------- self """ # by default, discretize all numeric columns if len(self.dcols) == 0: numeric_cols = [ col for col in X.columns if is_numeric_dtype(X[col].dtype)] self.dcols_ = numeric_cols # error checking self._validate_n_bins() self._validate_args() self._validate_dcols(X)
(self, X)
16,666
imodels.discretization.discretizer
_transform_postprocessing
Final processing in transform method. Does one-hot encoding (if specified) and joins discretized columns to the un-transformed columns in X. Parameters ---------- discretized_df : data frame of shape (n_sample, len(dcols)) Discretized data in the transformed bin space. X : data frame of shape (n_samples, n_features) Data to be discretized. Returns ------- X_discretized : data frame Data with features in dcols transformed to the binned space. All other features remain unchanged. Encoded either as ordinal or one-hot.
def _transform_postprocessing(self, discretized_df, X): """ Final processing in transform method. Does one-hot encoding (if specified) and joins discretized columns to the un-transformed columns in X. Parameters ---------- discretized_df : data frame of shape (n_sample, len(dcols)) Discretized data in the transformed bin space. X : data frame of shape (n_samples, n_features) Data to be discretized. Returns ------- X_discretized : data frame Data with features in dcols transformed to the binned space. All other features remain unchanged. Encoded either as ordinal or one-hot. """ discretized_df = discretized_df[self.dcols_] # return onehot encoded X if specified if self.encode == "onehot": colnames = [str(col) for col in self.dcols_] try: onehot_col_names = self.onehot_.get_feature_names_out(colnames) except: onehot_col_names = self.onehot_.get_feature_names( colnames) # older versions of sklearn discretized_df = self.onehot_.transform(discretized_df.astype(str)) discretized_df = pd.DataFrame(discretized_df, columns=onehot_col_names, index=X.index).astype(int) # join discretized columns with rest of X cols = [col for col in X.columns if col not in self.dcols_] X_discretized = pd.concat([discretized_df, X[cols]], axis=1) return X_discretized
(self, discretized_df, X)
16,667
imodels.discretization.discretizer
_validate_args
Check if encode, strategy arguments are valid.
def _validate_args(self): """ Check if encode, strategy arguments are valid. """ valid_encode = ('onehot', 'ordinal') if self.encode not in valid_encode: raise ValueError("Valid options for 'encode' are {}. Got encode={!r} instead." .format(valid_encode, self.encode)) valid_strategy = ('uniform', 'quantile', 'kmeans') if (self.strategy not in valid_strategy): raise ValueError("Valid options for 'strategy' are {}. Got strategy={!r} instead." .format(valid_strategy, self.strategy))
(self)
16,669
imodels.discretization.discretizer
_validate_dcols
Check if dcols argument is valid.
def _validate_dcols(self, X): """ Check if dcols argument is valid. """ for col in self.dcols_: if col not in X.columns: raise ValueError("{} is not a column in X.".format(col)) if not is_numeric_dtype(X[col].dtype): raise ValueError("Cannot discretize non-numeric columns.")
(self, X)
16,670
imodels.discretization.discretizer
_validate_n_bins
Check if n_bins argument is valid.
def _validate_n_bins(self): """ Check if n_bins argument is valid. """ orig_bins = self.n_bins n_features = len(self.dcols_) if isinstance(orig_bins, numbers.Number): if not isinstance(orig_bins, numbers.Integral): raise ValueError( "{} received an invalid n_bins type. " "Received {}, expected int.".format( AbstractDiscretizer.__name__, type(orig_bins).__name__ ) ) if orig_bins < 2: raise ValueError( "{} received an invalid number " "of bins. Received {}, expected at least 2.".format( AbstractDiscretizer.__name__, orig_bins ) ) self.n_bins = np.full(n_features, orig_bins, dtype=int) else: n_bins = check_array(orig_bins, dtype=int, copy=True, ensure_2d=False) if n_bins.ndim > 1 or n_bins.shape[0] != n_features: raise ValueError( "n_bins must be a scalar or array of shape (n_features,).") bad_nbins_value = (n_bins < 2) | (n_bins != orig_bins) violating_indices = np.where(bad_nbins_value)[0] if violating_indices.shape[0] > 0: indices = ", ".join(str(i) for i in violating_indices) raise ValueError( "{} received an invalid number " "of bins at indices {}. Number of bins " "must be at least 2, and must be an int.".format( AbstractDiscretizer.__name__, indices ) ) self.n_bins = n_bins
(self)
16,672
imodels.discretization.discretizer
fit
Fit the estimator. Parameters ---------- X : data frame of shape (n_samples, n_features) (Training) data to be discretized. y : Ignored. This parameter exists only for compatibility with :class:`~sklearn.pipeline.Pipeline` and fit_transform method Returns ------- self
def fit(self, X, y=None): """ Fit the estimator. Parameters ---------- X : data frame of shape (n_samples, n_features) (Training) data to be discretized. y : Ignored. This parameter exists only for compatibility with :class:`~sklearn.pipeline.Pipeline` and fit_transform method Returns ------- self """ # initialization and error checking self._fit_preprocessing(X) # apply KBinsDiscretizer to the selected columns discretizer = KBinsDiscretizer(n_bins=self.n_bins, encode='ordinal', strategy=self.strategy) discretizer.fit(X[self.dcols_]) self.discretizer_ = discretizer if (self.encode == 'onehot') | (self.strategy == 'quantile'): discretized_df = discretizer.transform(X[self.dcols_]) discretized_df = pd.DataFrame(discretized_df, columns=self.dcols_, index=X.index).astype(int) # fix KBinsDiscretizer errors if any when strategy = "quantile" if self.strategy == "quantile": err_idx = np.where(discretized_df.nunique() != self.n_bins)[0] self.manual_discretizer_ = dict() for idx in err_idx: col = self.dcols_[idx] if X[col].nunique() > 1: q_values = np.linspace(0, 1, self.n_bins[idx] + 1) bin_edges = np.quantile(X[col], q_values) discretized_df[col] = self._discretize_to_bins(X[col], bin_edges, keep_pointwise_bins=True) self.manual_discretizer_[col] = bin_edges # fit onehot encoded X if specified if self.encode == "onehot": onehot = OneHotEncoder(drop=self.onehot_drop) # , sparse=False) onehot.fit(discretized_df.astype(str)) self.onehot_ = onehot return self
(self, X, y=None)
16,673
sklearn.base
fit_transform
Fit to data, then transform it. Fits transformer to `X` and `y` with optional parameters `fit_params` and returns a transformed version of `X`. Parameters ---------- X : array-like of shape (n_samples, n_features) Input samples. y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None Target values (None for unsupervised transformations). **fit_params : dict Additional fit parameters. Returns ------- X_new : ndarray array of shape (n_samples, n_features_new) Transformed array.
def set_params(self, **params): """Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as :class:`~sklearn.pipeline.Pipeline`). The latter have parameters of the form ``<component>__<parameter>`` so that it's possible to update each component of a nested object. Parameters ---------- **params : dict Estimator parameters. Returns ------- self : estimator instance Estimator instance. """ if not params: # Simple optimization to gain speed (inspect is slow) return self valid_params = self.get_params(deep=True) nested_params = defaultdict(dict) # grouped by prefix for key, value in params.items(): key, delim, sub_key = key.partition("__") if key not in valid_params: local_valid_params = self._get_param_names() raise ValueError( f"Invalid parameter {key!r} for estimator {self}. " f"Valid parameters are: {local_valid_params!r}." ) if delim: nested_params[key][sub_key] = value else: setattr(self, key, value) valid_params[key] = value for key, sub_params in nested_params.items(): valid_params[key].set_params(**sub_params) return self
(self, X, y=None, **fit_params)
16,676
sklearn.utils._set_output
set_output
Set output container. See :ref:`sphx_glr_auto_examples_miscellaneous_plot_set_output.py` for an example on how to use the API. Parameters ---------- transform : {"default", "pandas"}, default=None Configure output of `transform` and `fit_transform`. - `"default"`: Default output format of a transformer - `"pandas"`: DataFrame output - `"polars"`: Polars output - `None`: Transform configuration is unchanged .. versionadded:: 1.4 `"polars"` option was added. Returns ------- self : estimator instance Estimator instance.
def create_container(self, X_output, X_original, columns, inplace=False): """Create container from `X_output` with additional metadata. Parameters ---------- X_output : {ndarray, dataframe} Data to wrap. X_original : {ndarray, dataframe} Original input dataframe. This is used to extract the metadata that should be passed to `X_output`, e.g. pandas row index. columns : callable, ndarray, or None The column names or a callable that returns the column names. The callable is useful if the column names require some computation. If `None`, then no columns are passed to the container's constructor. inplace : bool, default=False Whether or not we intend to modify `X_output` in-place. However, it does not guarantee that we return the same object if the in-place operation is not possible. Returns ------- wrapped_output : container_type `X_output` wrapped into the container type. """
(self, *, transform=None)
16,678
imodels.discretization.discretizer
transform
Discretize the data. Parameters ---------- X : data frame of shape (n_samples, n_features) Data to be discretized. Returns ------- X_discretized : data frame Data with features in dcols transformed to the binned space. All other features remain unchanged.
def _transform_postprocessing(self, discretized_df, X): """ Final processing in transform method. Does one-hot encoding (if specified) and joins discretized columns to the un-transformed columns in X. Parameters ---------- discretized_df : data frame of shape (n_sample, len(dcols)) Discretized data in the transformed bin space. X : data frame of shape (n_samples, n_features) Data to be discretized. Returns ------- X_discretized : data frame Data with features in dcols transformed to the binned space. All other features remain unchanged. Encoded either as ordinal or one-hot. """ discretized_df = discretized_df[self.dcols_] # return onehot encoded X if specified if self.encode == "onehot": colnames = [str(col) for col in self.dcols_] try: onehot_col_names = self.onehot_.get_feature_names_out(colnames) except: onehot_col_names = self.onehot_.get_feature_names( colnames) # older versions of sklearn discretized_df = self.onehot_.transform(discretized_df.astype(str)) discretized_df = pd.DataFrame(discretized_df, columns=onehot_col_names, index=X.index).astype(int) # join discretized columns with rest of X cols = [col for col in X.columns if col not in self.dcols_] X_discretized = pd.concat([discretized_df, X[cols]], axis=1) return X_discretized
(self, X)
16,679
imodels.rule_list.bayesian_rule_list.bayesian_rule_list
BayesianRuleListClassifier
This is a scikit-learn compatible wrapper for the Bayesian Rule List classifier developed by Benjamin Letham. It produces a highly interpretable model (a list of decision rules) by sampling many different rule lists, trying to optimize for compactness and predictive performance. Parameters ---------- listlengthprior : int, optional (default=3) Prior hyperparameter for expected list length (excluding null rule) listwidthprior : int, optional (default=1) Prior hyperparameter for expected list width (excluding null rule) maxcardinality : int, optional (default=2) Maximum cardinality of an itemset minsupport : float, optional (default=0.1) Minimum support (fraction between 0 and 1) of an itemset alpha : array_like, shape = [n_classes] prior hyperparameter for multinomial pseudocounts n_chains : int, optional (default=3) Number of MCMC chains for inference max_iter : int, optional (default=50000) Maximum number of iterations class1label: str, optional (default="class 1") Label or description of what the positive class (with y=1) means verbose: bool, optional (default=True) Verbose output random_state: int Random seed
class BayesianRuleListClassifier(BaseEstimator, RuleList, ClassifierMixin): """ This is a scikit-learn compatible wrapper for the Bayesian Rule List classifier developed by Benjamin Letham. It produces a highly interpretable model (a list of decision rules) by sampling many different rule lists, trying to optimize for compactness and predictive performance. Parameters ---------- listlengthprior : int, optional (default=3) Prior hyperparameter for expected list length (excluding null rule) listwidthprior : int, optional (default=1) Prior hyperparameter for expected list width (excluding null rule) maxcardinality : int, optional (default=2) Maximum cardinality of an itemset minsupport : float, optional (default=0.1) Minimum support (fraction between 0 and 1) of an itemset alpha : array_like, shape = [n_classes] prior hyperparameter for multinomial pseudocounts n_chains : int, optional (default=3) Number of MCMC chains for inference max_iter : int, optional (default=50000) Maximum number of iterations class1label: str, optional (default="class 1") Label or description of what the positive class (with y=1) means verbose: bool, optional (default=True) Verbose output random_state: int Random seed """ def __init__(self, listlengthprior=3, listwidthprior=1, maxcardinality=2, minsupport=0.1, alpha=np.array([1., 1.]), n_chains=3, max_iter=50000, class1label="class 1", verbose=False, random_state=42): self.listlengthprior = listlengthprior self.listwidthprior = listwidthprior self.maxcardinality = maxcardinality self.minsupport = minsupport self.alpha = alpha self.n_chains = n_chains self.max_iter = max_iter self.class1label = class1label self.verbose = verbose self._zmin = 1 self.thinning = 1 # The thinning rate self.burnin = self.max_iter // 2 # the number of samples to drop as burn-in in-simulation self.d_star = None self.random_state = random_state self.seed() def seed(self): if self.random_state is not None: random.seed(self.random_state) np.random.seed(self.random_state) def _setlabels(self, X, feature_names=[]): if len(feature_names) == 0: if type(X) == pd.DataFrame and ('object' in str(X.columns.dtype) or 'str' in str(X.columns.dtype)): feature_names = X.columns else: feature_names = ["ft" + str(i + 1) for i in range(len(X[0]))] self.feature_names = feature_names def fit(self, X, y, feature_names: list = None, verbose=False): """Fit rule lists to data. Note: The BRL algorithm requires numeric features to be discretized into bins prior to fitting. See imodels.discretization or sklearn.preprocessing for helpful utilities. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training data y : array_like, shape = [n_samples] Labels feature_names : array_like, shape = [n_features], optional (default: []) String labels for each feature. If empty and X is a DataFrame, column labels are used. If empty and X is not a DataFrame, then features are simply enumerated verbose : bool Currently doesn't do anything Returns ------- self : returns an instance of self. """ self.seed() if len(set(y)) != 2: raise ValueError("Only binary classification is supported at this time!") X, y = check_X_y(X, y) check_classification_targets(y) self.n_features_in_ = X.shape[1] self.classes_ = unique_labels(y) # Check that all features are either categorical or discretized if not np.all((X == 1) | (X == 0)): raise ValueError("All numeric features must be discretized prior to fitting!") self.feature_dict_ = get_feature_dict(X.shape[1], feature_names) self.feature_placeholders = np.array(list(self.feature_dict_.keys())) self.feature_names = np.array(list(self.feature_dict_.values())) X_df = pd.DataFrame(X, columns=self.feature_placeholders) itemsets = extract_fpgrowth(X_df, minsupport=self.minsupport, maxcardinality=self.maxcardinality, verbose=verbose) # Now form the data-vs.-lhs set # X[j] is the set of data points that contain itemset j (that is, satisfy rule j) for col in X_df.columns: # X_df[c] = [c if x == 1 else '' for x in list(X_df[c])] X_df[col] = X_df[col].replace({1: col, 0: ''}) itemset_support_inds = [{}] * (len(itemsets) + 1) itemset_support_inds[0] = set(range(X_df.shape[0])) # the default rule satisfies all data for (j, lhs) in enumerate(itemsets): itemset_support_inds[j + 1] = set( [i for (i, xi) in enumerate(X_df.values) if set(lhs).issubset(xi)]) # now form lhs_len lhs_len = [0] for lhs in itemsets: lhs_len.append(len(lhs)) nruleslen = Counter(lhs_len) lhs_len = np.array(lhs_len) itemsets_all = ['null'] itemsets_all.extend(itemsets) self.itemsets = itemsets_all Xtrain = itemset_support_inds Ytrain = np.vstack((1 - np.array(y), y)).T.astype(int) permsdic = defaultdict(default_permsdic) # We will store here the MCMC results # Do MCMC res, Rhat = run_bdl_multichain_serial( self.max_iter, self.thinning, self.alpha, self.listlengthprior, self.listwidthprior, Xtrain, Ytrain, nruleslen, lhs_len, self.maxcardinality, permsdic, self.burnin, self.n_chains, [None] * self.n_chains, verbose=self.verbose, seed=self.random_state) # Merge the chains permsdic = merge_chains(res) # The point estimate, BRL-point self.d_star = get_point_estimate(permsdic, lhs_len, Xtrain, Ytrain, self.alpha, nruleslen, self.maxcardinality, self.listlengthprior, self.listwidthprior, verbose=self.verbose) # get the point estimate if self.d_star: # Compute the rule consequent self.theta, self.ci_theta = get_rule_rhs(Xtrain, Ytrain, self.d_star, self.alpha, True) self.final_itemsets = np.array(self.itemsets, dtype=object)[self.d_star] rule_strs = itemsets_to_rules(self.final_itemsets) self.rules_without_feature_names_ = [Rule(r) for r in rule_strs] self.rules_ = [ replace_feature_name(rule, self.feature_dict_) for rule in self.rules_without_feature_names_ ] self.complexity_ = self._get_complexity() return self def _get_complexity(self): n_rule_terms = sum([len(iset) for iset in self.final_itemsets if type(iset) != str]) return n_rule_terms + 1 # def __repr__(self, decimals=1): # if self.d_star: # detect = "" # if self.class1label != "class 1": # detect = "for detecting " + self.class1label # header = "Trained RuleListClassifier " + detect + "\n" # separator = "".join(["="] * len(header)) + "\n" # s = "" # for i, j in enumerate(self.d_star): # if self.itemsets[j] != 'null': # condition = "ELSE IF " + ( # " AND ".join([str(self.itemsets[j][k]) for k in range(len(self.itemsets[j]))])) + " THEN" # else: # condition = "ELSE" # s += condition + " probability of " + self.class1label + ": " + str( # np.round(self.theta[i] * 100, decimals)) + "% (" + str( # np.round(self.ci_theta[i][0] * 100, decimals)) + "%-" + str( # np.round(self.ci_theta[i][1] * 100, decimals)) + "%)\n" # return header + separator + s[5:] + separator[1:] # else: # return "(Untrained RuleListClassifier)" def __repr__(self, decimals=1): if self.d_star: detect = "" if self.class1label != "class 1": detect = "for detecting " + self.class1label header = "Trained RuleListClassifier " + detect + "\n" separator = "".join(["="] * len(header)) + "\n" s = "" for i in range(len(self.rules_) + 1): if i != len(self.rules_): condition = "ELSE IF " + str(self.rules_[i]) + " THEN" else: condition = "ELSE" s += condition + " probability of " + self.class1label + ": " + str( np.round(self.theta[i] * 100, decimals)) + "% (" + str( np.round(self.ci_theta[i][0] * 100, decimals)) + "%-" + str( np.round(self.ci_theta[i][1] * 100, decimals)) + "%)\n" return header + separator + s[5:] + separator[1:] else: return "(Untrained RuleListClassifier)" def _to_itemset_indices(self, X_df_onehot): # X[j] is the set of data points that contain itemset j (that is, satisfy rule j) for c in X_df_onehot.columns: X_df_onehot[c] = [c if x == 1 else '' for x in list(X_df_onehot[c])] X = [set() for j in range(len(self.itemsets))] X[0] = set(range(X_df_onehot.shape[0])) # the default rule satisfies all data for (j, lhs) in enumerate(self.itemsets): if j > 0: X[j] = set([i for (i, xi) in enumerate(X_df_onehot.values) if set(lhs).issubset(xi)]) return X def predict_proba(self, X): """Compute probabilities of possible outcomes for samples in X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- T : array-like, shape = [n_samples, n_classes] Returns the probability of the sample for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute `classes_`. """ check_is_fitted(self) X = check_array(X) D = pd.DataFrame(X, columns=self.feature_placeholders) N = len(D) X2 = self._to_itemset_indices(D) P = preds_d_t(X2, np.zeros((N, 1), dtype=int), self.d_star, self.theta) return np.vstack((1 - P, P)).T def predict(self, X, threshold=0.1): """Perform classification on samples in X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- y_pred : array, shape = [n_samples] Class labels for samples in X. """ check_is_fitted(self) X = check_array(X) # print('predicting!') # print('preds_proba', self.predict_proba(X)[:, 1]) return 1 * (self.predict_proba(X)[:, 1] >= threshold)
(listlengthprior=3, listwidthprior=1, maxcardinality=2, minsupport=0.1, alpha=array([1., 1.]), n_chains=3, max_iter=50000, class1label='class 1', verbose=False, random_state=42)
16,681
imodels.rule_list.bayesian_rule_list.bayesian_rule_list
__init__
null
def __init__(self, listlengthprior=3, listwidthprior=1, maxcardinality=2, minsupport=0.1, alpha=np.array([1., 1.]), n_chains=3, max_iter=50000, class1label="class 1", verbose=False, random_state=42): self.listlengthprior = listlengthprior self.listwidthprior = listwidthprior self.maxcardinality = maxcardinality self.minsupport = minsupport self.alpha = alpha self.n_chains = n_chains self.max_iter = max_iter self.class1label = class1label self.verbose = verbose self._zmin = 1 self.thinning = 1 # The thinning rate self.burnin = self.max_iter // 2 # the number of samples to drop as burn-in in-simulation self.d_star = None self.random_state = random_state self.seed()
(self, listlengthprior=3, listwidthprior=1, maxcardinality=2, minsupport=0.1, alpha=array([1., 1.]), n_chains=3, max_iter=50000, class1label='class 1', verbose=False, random_state=42)
16,682
imodels.rule_list.bayesian_rule_list.bayesian_rule_list
__repr__
null
def __repr__(self, decimals=1): if self.d_star: detect = "" if self.class1label != "class 1": detect = "for detecting " + self.class1label header = "Trained RuleListClassifier " + detect + "\n" separator = "".join(["="] * len(header)) + "\n" s = "" for i in range(len(self.rules_) + 1): if i != len(self.rules_): condition = "ELSE IF " + str(self.rules_[i]) + " THEN" else: condition = "ELSE" s += condition + " probability of " + self.class1label + ": " + str( np.round(self.theta[i] * 100, decimals)) + "% (" + str( np.round(self.ci_theta[i][0] * 100, decimals)) + "%-" + str( np.round(self.ci_theta[i][1] * 100, decimals)) + "%)\n" return header + separator + s[5:] + separator[1:] else: return "(Untrained RuleListClassifier)"
(self, decimals=1)
16,687
imodels.rule_list.bayesian_rule_list.bayesian_rule_list
_get_complexity
null
def _get_complexity(self): n_rule_terms = sum([len(iset) for iset in self.final_itemsets if type(iset) != str]) return n_rule_terms + 1
(self)
16,694
imodels.rule_list.bayesian_rule_list.bayesian_rule_list
_setlabels
null
def _setlabels(self, X, feature_names=[]): if len(feature_names) == 0: if type(X) == pd.DataFrame and ('object' in str(X.columns.dtype) or 'str' in str(X.columns.dtype)): feature_names = X.columns else: feature_names = ["ft" + str(i + 1) for i in range(len(X[0]))] self.feature_names = feature_names
(self, X, feature_names=[])
16,695
imodels.rule_list.bayesian_rule_list.bayesian_rule_list
_to_itemset_indices
null
def _to_itemset_indices(self, X_df_onehot): # X[j] is the set of data points that contain itemset j (that is, satisfy rule j) for c in X_df_onehot.columns: X_df_onehot[c] = [c if x == 1 else '' for x in list(X_df_onehot[c])] X = [set() for j in range(len(self.itemsets))] X[0] = set(range(X_df_onehot.shape[0])) # the default rule satisfies all data for (j, lhs) in enumerate(self.itemsets): if j > 0: X[j] = set([i for (i, xi) in enumerate(X_df_onehot.values) if set(lhs).issubset(xi)]) return X
(self, X_df_onehot)
16,698
imodels.rule_list.bayesian_rule_list.bayesian_rule_list
fit
Fit rule lists to data. Note: The BRL algorithm requires numeric features to be discretized into bins prior to fitting. See imodels.discretization or sklearn.preprocessing for helpful utilities. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training data y : array_like, shape = [n_samples] Labels feature_names : array_like, shape = [n_features], optional (default: []) String labels for each feature. If empty and X is a DataFrame, column labels are used. If empty and X is not a DataFrame, then features are simply enumerated verbose : bool Currently doesn't do anything Returns ------- self : returns an instance of self.
def fit(self, X, y, feature_names: list = None, verbose=False): """Fit rule lists to data. Note: The BRL algorithm requires numeric features to be discretized into bins prior to fitting. See imodels.discretization or sklearn.preprocessing for helpful utilities. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training data y : array_like, shape = [n_samples] Labels feature_names : array_like, shape = [n_features], optional (default: []) String labels for each feature. If empty and X is a DataFrame, column labels are used. If empty and X is not a DataFrame, then features are simply enumerated verbose : bool Currently doesn't do anything Returns ------- self : returns an instance of self. """ self.seed() if len(set(y)) != 2: raise ValueError("Only binary classification is supported at this time!") X, y = check_X_y(X, y) check_classification_targets(y) self.n_features_in_ = X.shape[1] self.classes_ = unique_labels(y) # Check that all features are either categorical or discretized if not np.all((X == 1) | (X == 0)): raise ValueError("All numeric features must be discretized prior to fitting!") self.feature_dict_ = get_feature_dict(X.shape[1], feature_names) self.feature_placeholders = np.array(list(self.feature_dict_.keys())) self.feature_names = np.array(list(self.feature_dict_.values())) X_df = pd.DataFrame(X, columns=self.feature_placeholders) itemsets = extract_fpgrowth(X_df, minsupport=self.minsupport, maxcardinality=self.maxcardinality, verbose=verbose) # Now form the data-vs.-lhs set # X[j] is the set of data points that contain itemset j (that is, satisfy rule j) for col in X_df.columns: # X_df[c] = [c if x == 1 else '' for x in list(X_df[c])] X_df[col] = X_df[col].replace({1: col, 0: ''}) itemset_support_inds = [{}] * (len(itemsets) + 1) itemset_support_inds[0] = set(range(X_df.shape[0])) # the default rule satisfies all data for (j, lhs) in enumerate(itemsets): itemset_support_inds[j + 1] = set( [i for (i, xi) in enumerate(X_df.values) if set(lhs).issubset(xi)]) # now form lhs_len lhs_len = [0] for lhs in itemsets: lhs_len.append(len(lhs)) nruleslen = Counter(lhs_len) lhs_len = np.array(lhs_len) itemsets_all = ['null'] itemsets_all.extend(itemsets) self.itemsets = itemsets_all Xtrain = itemset_support_inds Ytrain = np.vstack((1 - np.array(y), y)).T.astype(int) permsdic = defaultdict(default_permsdic) # We will store here the MCMC results # Do MCMC res, Rhat = run_bdl_multichain_serial( self.max_iter, self.thinning, self.alpha, self.listlengthprior, self.listwidthprior, Xtrain, Ytrain, nruleslen, lhs_len, self.maxcardinality, permsdic, self.burnin, self.n_chains, [None] * self.n_chains, verbose=self.verbose, seed=self.random_state) # Merge the chains permsdic = merge_chains(res) # The point estimate, BRL-point self.d_star = get_point_estimate(permsdic, lhs_len, Xtrain, Ytrain, self.alpha, nruleslen, self.maxcardinality, self.listlengthprior, self.listwidthprior, verbose=self.verbose) # get the point estimate if self.d_star: # Compute the rule consequent self.theta, self.ci_theta = get_rule_rhs(Xtrain, Ytrain, self.d_star, self.alpha, True) self.final_itemsets = np.array(self.itemsets, dtype=object)[self.d_star] rule_strs = itemsets_to_rules(self.final_itemsets) self.rules_without_feature_names_ = [Rule(r) for r in rule_strs] self.rules_ = [ replace_feature_name(rule, self.feature_dict_) for rule in self.rules_without_feature_names_ ] self.complexity_ = self._get_complexity() return self
(self, X, y, feature_names: Optional[list] = None, verbose=False)
16,701
imodels.rule_list.bayesian_rule_list.bayesian_rule_list
predict
Perform classification on samples in X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- y_pred : array, shape = [n_samples] Class labels for samples in X.
def predict(self, X, threshold=0.1): """Perform classification on samples in X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- y_pred : array, shape = [n_samples] Class labels for samples in X. """ check_is_fitted(self) X = check_array(X) # print('predicting!') # print('preds_proba', self.predict_proba(X)[:, 1]) return 1 * (self.predict_proba(X)[:, 1] >= threshold)
(self, X, threshold=0.1)
16,702
imodels.rule_list.bayesian_rule_list.bayesian_rule_list
predict_proba
Compute probabilities of possible outcomes for samples in X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- T : array-like, shape = [n_samples, n_classes] Returns the probability of the sample for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute `classes_`.
def predict_proba(self, X): """Compute probabilities of possible outcomes for samples in X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- T : array-like, shape = [n_samples, n_classes] Returns the probability of the sample for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute `classes_`. """ check_is_fitted(self) X = check_array(X) D = pd.DataFrame(X, columns=self.feature_placeholders) N = len(D) X2 = self._to_itemset_indices(D) P = preds_d_t(X2, np.zeros((N, 1), dtype=int), self.d_star, self.theta) return np.vstack((1 - P, P)).T
(self, X)
16,704
imodels.rule_list.bayesian_rule_list.bayesian_rule_list
seed
null
def seed(self): if self.random_state is not None: random.seed(self.random_state) np.random.seed(self.random_state)
(self)
16,705
sklearn.utils._metadata_requests
set_fit_request
Request metadata passed to the ``fit`` method. Note that this method is only relevant if ``enable_metadata_routing=True`` (see :func:`sklearn.set_config`). Please see :ref:`User Guide <metadata_routing>` on how the routing mechanism works. The options for each parameter are: - ``True``: metadata is requested, and passed to ``fit`` if provided. The request is ignored if metadata is not provided. - ``False``: metadata is not requested and the meta-estimator will not pass it to ``fit``. - ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it. - ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name. The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the existing request. This allows you to change the request for some parameters and not others. .. versionadded:: 1.3 .. note:: This method is only relevant if this estimator is used as a sub-estimator of a meta-estimator, e.g. used inside a :class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect. Parameters ---------- feature_names : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED Metadata routing for ``feature_names`` parameter in ``fit``. verbose : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED Metadata routing for ``verbose`` parameter in ``fit``. Returns ------- self : object The updated object.
def __get__(self, instance, owner): # we would want to have a method which accepts only the expected args def func(**kw): """Updates the request for provided parameters This docstring is overwritten below. See REQUESTER_DOC for expected functionality """ if not _routing_enabled(): raise RuntimeError( "This method is only available when metadata routing is enabled." " You can enable it using" " sklearn.set_config(enable_metadata_routing=True)." ) if self.validate_keys and (set(kw) - set(self.keys)): raise TypeError( f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments" f" are: {set(self.keys)}" ) requests = instance._get_metadata_request() method_metadata_request = getattr(requests, self.name) for prop, alias in kw.items(): if alias is not UNCHANGED: method_metadata_request.add_request(param=prop, alias=alias) instance._metadata_request = requests return instance # Now we set the relevant attributes of the function so that it seems # like a normal method to the end user, with known expected arguments. func.__name__ = f"set_{self.name}_request" params = [ inspect.Parameter( name="self", kind=inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=owner, ) ] params.extend( [ inspect.Parameter( k, inspect.Parameter.KEYWORD_ONLY, default=UNCHANGED, annotation=Optional[Union[bool, None, str]], ) for k in self.keys ] ) func.__signature__ = inspect.Signature( params, return_annotation=owner, ) doc = REQUESTER_DOC.format(method=self.name) for metadata in self.keys: doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name) doc += REQUESTER_DOC_RETURN func.__doc__ = doc return func
(self: imodels.rule_list.bayesian_rule_list.bayesian_rule_list.BayesianRuleListClassifier, *, feature_names: Union[bool, NoneType, str] = '$UNCHANGED$', verbose: Union[bool, NoneType, str] = '$UNCHANGED$') -> imodels.rule_list.bayesian_rule_list.bayesian_rule_list.BayesianRuleListClassifier
16,707
sklearn.utils._metadata_requests
set_predict_request
Request metadata passed to the ``predict`` method. Note that this method is only relevant if ``enable_metadata_routing=True`` (see :func:`sklearn.set_config`). Please see :ref:`User Guide <metadata_routing>` on how the routing mechanism works. The options for each parameter are: - ``True``: metadata is requested, and passed to ``predict`` if provided. The request is ignored if metadata is not provided. - ``False``: metadata is not requested and the meta-estimator will not pass it to ``predict``. - ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it. - ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name. The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the existing request. This allows you to change the request for some parameters and not others. .. versionadded:: 1.3 .. note:: This method is only relevant if this estimator is used as a sub-estimator of a meta-estimator, e.g. used inside a :class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect. Parameters ---------- threshold : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED Metadata routing for ``threshold`` parameter in ``predict``. Returns ------- self : object The updated object.
def __get__(self, instance, owner): # we would want to have a method which accepts only the expected args def func(**kw): """Updates the request for provided parameters This docstring is overwritten below. See REQUESTER_DOC for expected functionality """ if not _routing_enabled(): raise RuntimeError( "This method is only available when metadata routing is enabled." " You can enable it using" " sklearn.set_config(enable_metadata_routing=True)." ) if self.validate_keys and (set(kw) - set(self.keys)): raise TypeError( f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments" f" are: {set(self.keys)}" ) requests = instance._get_metadata_request() method_metadata_request = getattr(requests, self.name) for prop, alias in kw.items(): if alias is not UNCHANGED: method_metadata_request.add_request(param=prop, alias=alias) instance._metadata_request = requests return instance # Now we set the relevant attributes of the function so that it seems # like a normal method to the end user, with known expected arguments. func.__name__ = f"set_{self.name}_request" params = [ inspect.Parameter( name="self", kind=inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=owner, ) ] params.extend( [ inspect.Parameter( k, inspect.Parameter.KEYWORD_ONLY, default=UNCHANGED, annotation=Optional[Union[bool, None, str]], ) for k in self.keys ] ) func.__signature__ = inspect.Signature( params, return_annotation=owner, ) doc = REQUESTER_DOC.format(method=self.name) for metadata in self.keys: doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name) doc += REQUESTER_DOC_RETURN func.__doc__ = doc return func
(self: imodels.rule_list.bayesian_rule_list.bayesian_rule_list.BayesianRuleListClassifier, *, threshold: Union[bool, NoneType, str] = '$UNCHANGED$') -> imodels.rule_list.bayesian_rule_list.bayesian_rule_list.BayesianRuleListClassifier
16,709
imodels.rule_set.brs
BayesianRuleSetClassifier
Bayesian or-of-and algorithm. Generates patterns that satisfy the minimum support and maximum length and then select the Nrules rules that have the highest entropy. In function SA_patternbased, each local maximum is stored in maps and the best BOA is returned. Remember here the BOA contains only the index of selected rules from Nrules self.rules_
class BayesianRuleSetClassifier(RuleSet, BaseEstimator, ClassifierMixin): '''Bayesian or-of-and algorithm. Generates patterns that satisfy the minimum support and maximum length and then select the Nrules rules that have the highest entropy. In function SA_patternbased, each local maximum is stored in maps and the best BOA is returned. Remember here the BOA contains only the index of selected rules from Nrules self.rules_ ''' def __init__(self, n_rules: int = 2000, supp=5, maxlen: int = 10, num_iterations=5000, num_chains=3, q=0.1, alpha_pos=100, beta_pos=1, alpha_neg=100, beta_neg=1, alpha_l=None, beta_l=None, discretization_method='randomforest', random_state=0): ''' Params ------ n_rules number of rules to be used in SA_patternbased and also the output of generate_rules supp The higher this supp, the 'larger' a pattern is. 5% is a generally good number maxlen maximum length of a pattern num_iterations number of iterations in each chain num_chains number of chains in the simulated annealing search algorithm q alpha_pos $\rho = alpha/(alpha+beta)$. Make sure $\rho$ is close to one when choosing alpha and beta The alpha and beta parameters alter the prior distributions for different rules beta_pos alpha_neg beta_neg alpha_l beta_l discretization_method discretization method ''' self.n_rules = n_rules self.supp = supp self.maxlen = maxlen self.num_iterations = num_iterations self.num_chains = num_chains self.q = q self.alpha_pos = alpha_pos self.beta_pos = beta_pos self.alpha_neg = alpha_neg self.beta_neg = beta_neg self.discretization_method = discretization_method self.alpha_l = alpha_l self.beta_l = beta_l self.random_state = 0 def fit(self, X, y, feature_names: list = None, init=[], verbose=False): ''' Parameters ---------- X : array-like, shape = [n_samples, n_features] Training data y : array_like, shape = [n_samples] Labels feature_names : array_like, shape = [n_features], optional (default: []) String labels for each feature. If empty and X is a DataFrame, column labels are used. If empty and X is not a DataFrame, then features are simply enumerated ''' # check inputs self.attr_level_num = defaultdict(int) # any missing value defaults to 0 self.attr_names = [] X, y, feature_names = check_fit_arguments(self, X, y, feature_names) np.random.seed(self.random_state) # convert to pandas DataFrame X = pd.DataFrame(X, columns=feature_names) for i, name in enumerate(X.columns): self.attr_level_num[name] += 1 self.attr_names.append(name) self.attr_names_orig = deepcopy(self.attr_names) self.attr_names = list(set(self.attr_names)) # set up patterns self._set_pattern_space() # parameter checking if self.alpha_l is None or self.beta_l is None or len(self.alpha_l) != self.maxlen or len( self.beta_l) != self.maxlen: if verbose: print('No or wrong input for alpha_l and beta_l - the model will use default parameters.') self.C = [1.0 / self.maxlen] * self.maxlen self.C.insert(0, -1) self.alpha_l = [10] * (self.maxlen + 1) self.beta_l = [10 * self.pattern_space[i] / self.C[i] for i in range(self.maxlen + 1)] else: self.alpha_l = [1] + list(self.alpha_l) self.beta_l = [1] + list(self.beta_l) # setup self._generate_rules(X, y, verbose) n_rules_current = len(self.rules_) self.rules_len_list = [len(rule) for rule in self.rules_] maps = defaultdict(list) T0 = 1000 # initial temperature for simulated annealing split = 0.7 * self.num_iterations # run simulated annealing for chain in range(self.num_chains): # initialize with a random pattern set if init != []: rules_curr = init.copy() else: assert n_rules_current > 1, f'Only {n_rules_current} potential rules found, change hyperparams to allow for more' N = sample(range(1, min(8, n_rules_current), 1), 1)[0] rules_curr = sample(range(n_rules_current), N) rules_curr_norm = self._normalize(rules_curr) pt_curr = -100000000000 maps[chain].append( [-1, [pt_curr / 3, pt_curr / 3, pt_curr / 3], rules_curr, [self.rules_[i] for i in rules_curr]]) for iter in range(self.num_iterations): if iter >= split: p = np.array(range(1 + len(maps[chain]))) p = np.array(list(_accumulate(p))) p = p / p[-1] index = _find_lt(p, random()) rules_curr = maps[chain][index][2].copy() rules_curr_norm = maps[chain][index][2].copy() # propose new rules rules_new, rules_norm = self._propose(rules_curr.copy(), rules_curr_norm.copy(), self.q, y) # compute probability of new rules cfmatrix, prob = self._compute_prob(rules_new, y) T = T0 ** (1 - iter / self.num_iterations) # temperature for simulated annealing pt_new = sum(prob) with warnings.catch_warnings(): if not verbose: warnings.simplefilter("ignore") alpha = np.exp(float(pt_new - pt_curr) / T) if pt_new > sum(maps[chain][-1][1]): maps[chain].append([iter, prob, rules_new, [self.rules_[i] for i in rules_new]]) if verbose: print(( '\n** chain = {}, max at iter = {} ** \n accuracy = {}, TP = {},FP = {}, TN = {}, FN = {}' '\n pt_new is {}, prior_ChsRules={}, likelihood_1 = {}, likelihood_2 = {}\n').format( chain, iter, (cfmatrix[0] + cfmatrix[2] + 0.0) / len(y), cfmatrix[0], cfmatrix[1], cfmatrix[2], cfmatrix[3], sum(prob), prob[0], prob[1], prob[2]) ) self._print_rules(rules_new) print(rules_new) if random() <= alpha: rules_curr_norm, rules_curr, pt_curr = rules_norm.copy(), rules_new.copy(), pt_new pt_max = [sum(maps[chain][-1][1]) for chain in range(self.num_chains)] index = pt_max.index(max(pt_max)) self.rules_ = maps[index][-1][3] return self def __str__(self): return ' '.join(str(r) for r in self.rules_) def predict(self, X): check_is_fitted(self) if isinstance(X, np.ndarray): df = pd.DataFrame(X, columns=self.attr_names_orig) else: df = X Z = [[]] * len(self.rules_) dfn = 1 - df # df has negative associations dfn.columns = [name.strip() + '_neg' for name in df.columns] df = pd.concat([df, dfn], axis=1) for i, rule in enumerate(self.rules_): Z[i] = (np.sum(df[list(rule)], axis=1) == len(rule)).astype(int) Yhat = (np.sum(Z, axis=0) > 0).astype(int) return Yhat def predict_proba(self, X): raise Exception('BOA does not support predicted probabilities.') def _set_pattern_space(self): """Compute the rule space from the levels in each attribute """ # add feat_neg to each existing feature feat for item in self.attr_names: self.attr_level_num[item + '_neg'] = self.attr_level_num[item] tmp = [item + '_neg' for item in self.attr_names] self.attr_names.extend(tmp) # set up pattern_space self.pattern_space = np.zeros(self.maxlen + 1) for k in range(1, self.maxlen + 1, 1): for subset in combinations(self.attr_names, k): tmp = 1 for i in subset: tmp = tmp * self.attr_level_num[i] # print('subset', subset, 'tmp', tmp, 'k', k) self.pattern_space[k] = self.pattern_space[k] + tmp def _generate_rules(self, X, y, verbose): '''This function generates rules that satisfy supp and maxlen using fpgrowth, then it selects the top n_rules rules that make data have the biggest decrease in entropy. There are two ways to generate rules. fpgrowth can handle cases where the maxlen is small. If maxlen<=3, fpgrowth can generates rules much faster than randomforest. If maxlen is big, fpgrowth tends to generate too many rules that overflow the memory. ''' df = 1 - X # df has negative associations df.columns = [name.strip() + '_neg' for name in X.columns] df = pd.concat([X, df], axis=1) if self.discretization_method == 'fpgrowth' and self.maxlen <= 3: itemMatrix = [[item for item in df.columns if row[item] == 1] for i, row in df.iterrows()] pindex = np.where(y == 1)[0] rules = fpgrowth([itemMatrix[i] for i in pindex], supp=self.supp, zmin=1, zmax=self.maxlen) rules = [tuple(np.sort(rule[0])) for rule in rules] rules = list(set(rules)) else: '''todo: replace this with imodels.RFDiscretizer ''' rules = [] for length in range(1, self.maxlen + 1, 1): n_estimators = min(pow(df.shape[1], length), 4000) clf = RandomForestClassifier(n_estimators=n_estimators, max_depth=length) clf.fit(X, y) for n in range(n_estimators): rules.extend(_extract_rules(clf.estimators_[n], df.columns)) rules = [list(x) for x in set(tuple(x) for x in rules)] self.rules_ = rules # select the top n_rules rules using secondary criteria, information gain self._screen_rules(df, y, verbose) # updates self.rules_ self._set_pattern_space() def _screen_rules(self, df, y, verbose): '''Screening rules using information gain ''' item_ind_dict = {} for i, name in enumerate(df.columns): item_ind_dict[name] = i indices = np.array( list(itertools.chain.from_iterable([[ item_ind_dict[x] for x in rule] for rule in self.rules_]))) len_rules = [len(rule) for rule in self.rules_] indptr = list(_accumulate(len_rules)) indptr.insert(0, 0) indptr = np.array(indptr) data = np.ones(len(indices)) rule_matrix = csc_matrix((data, indices, indptr), shape=(len(df.columns), len(self.rules_))) mat = df.values @ rule_matrix print('mat.shape', mat.shape) len_matrix = np.array([len_rules] * df.shape[0]) Z = (mat == len_matrix).astype(int) Zpos = [Z[i] for i in np.where(y > 0)][0] TP = np.sum(Zpos, axis=0) supp_select = np.where(TP >= self.supp * sum(y) / 100)[0] FP = np.sum(Z, axis=0) - TP TN = len(y) - np.sum(y) - FP FN = np.sum(y) - TP p1 = TP.astype(float) / (TP + FP) p2 = FN.astype(float) / (FN + TN) pp = (TP + FP).astype(float) / (TP + FP + TN + FN) # p1 = np.clip(p1, a_min=1e-10, a_max=1-1e-10) print('\n\n\n\np1.shape', p1.shape, 'pp.shape', pp.shape, 'cond_entropy.shape') # , cond_entropy.shape) with warnings.catch_warnings(): if not verbose: warnings.simplefilter("ignore") # ignore warnings about invalid values (e.g. log(0)) cond_entropy = -pp * (p1 * np.log(p1) + (1 - p1) * np.log(1 - p1)) - (1 - pp) * ( p2 * np.log(p2) + (1 - p2) * np.log(1 - p2)) cond_entropy[p1 * (1 - p1) == 0] = -((1 - pp) * (p2 * np.log(p2) + (1 - p2) * np.log(1 - p2)))[ p1 * (1 - p1) == 0] cond_entropy[p2 * (1 - p2) == 0] = -(pp * (p1 * np.log(p1) + (1 - p1) * np.log(1 - p1)))[p2 * (1 - p2) == 0] cond_entropy[p1 * (1 - p1) * p2 * (1 - p2) == 0] = 0 select = np.argsort(cond_entropy[supp_select])[::-1][-self.n_rules:] self.rules_ = [self.rules_[i] for i in supp_select[select]] self.RMatrix = np.array(Z[:, supp_select[select]]) def _propose(self, rules_curr, rules_norm, q, y): nRules = len(self.rules_) yhat = (np.sum(self.RMatrix[:, rules_curr], axis=1) > 0).astype(int) incorr = np.where(y != yhat)[0] N = len(rules_curr) if len(incorr) == 0: # BOA correctly classified all points but there could be redundant patterns, so cleaning is needed move = ['clean'] else: ex = sample(incorr.tolist(), 1)[0] t = random() if y[ex] == 1 or N == 1: if t < 1.0 / 2 or N == 1: move = ['add'] # action: add else: move = ['cut', 'add'] # action: replace else: if t < 1.0 / 2: move = ['cut'] # action: cut else: move = ['cut', 'add'] # action: replace if move[0] == 'cut': """ cut """ if random() < q: candidate = list(set(np.where(self.RMatrix[ex, :] == 1)[0]).intersection(rules_curr)) if len(candidate) == 0: candidate = rules_curr cut_rule = sample(candidate, 1)[0] else: p = [] all_sum = np.sum(self.RMatrix[:, rules_curr], axis=1) for index, rule in enumerate(rules_curr): yhat = ((all_sum - np.array(self.RMatrix[:, rule])) > 0).astype(int) TP, FP, TN, FN = _get_confusion_matrix(yhat, y) p.append(TP.astype(float) / (TP + FP + 1)) p = [x - min(p) for x in p] p = np.exp(p) p = np.insert(p, 0, 0) p = np.array(list(_accumulate(p))) if p[-1] == 0: index = sample(range(len(rules_curr)), 1)[0] else: p = p / p[-1] index = _find_lt(p, random()) cut_rule = rules_curr[index] rules_curr.remove(cut_rule) rules_norm = self._normalize(rules_curr) move.remove('cut') if len(move) > 0 and move[0] == 'add': """ add """ if random() < q: add_rule = sample(range(nRules), 1)[0] else: Yhat_neg_index = list(np.where(np.sum(self.RMatrix[:, rules_curr], axis=1) < 1)[0]) mat = np.multiply(self.RMatrix[Yhat_neg_index, :].transpose(), y[Yhat_neg_index]) TP = np.sum(mat, axis=1) FP = np.array((np.sum(self.RMatrix[Yhat_neg_index, :], axis=0) - TP)) p = (TP.astype(float) / (TP + FP + 1)) p[rules_curr] = 0 add_rule = sample(np.where(p == max(p))[0].tolist(), 1)[0] if add_rule not in rules_curr: rules_curr.append(add_rule) rules_norm = self._normalize(rules_curr) if len(move) > 0 and move[0] == 'clean': remove = [] for i, rule in enumerate(rules_norm): yhat = (np.sum( self.RMatrix[:, [rule for j, rule in enumerate(rules_norm) if (j != i and j not in remove)]], axis=1) > 0).astype(int) TP, FP, TN, FN = _get_confusion_matrix(yhat, y) if TP + FP == 0: remove.append(i) for x in remove: rules_norm.remove(x) return rules_curr, rules_norm return rules_curr, rules_norm def _compute_prob(self, rules, y): Yhat = (np.sum(self.RMatrix[:, rules], axis=1) > 0).astype(int) TP, FP, TN, FN = _get_confusion_matrix(Yhat, y) Kn_count = list(np.bincount([self.rules_len_list[x] for x in rules], minlength=self.maxlen + 1)) prior_ChsRules = sum([_log_betabin(Kn_count[i], self.pattern_space[i], self.alpha_l[i], self.beta_l[i]) for i in range(1, len(Kn_count), 1)]) likelihood_1 = _log_betabin(TP, TP + FP, self.alpha_pos, self.beta_pos) likelihood_2 = _log_betabin(TN, FN + TN, self.alpha_neg, self.beta_neg) return [TP, FP, TN, FN], [prior_ChsRules, likelihood_1, likelihood_2] def _normalize_add(self, rules_new, rule_index): rules = rules_new.copy() for rule in rules_new: if set(self.rules_[rule]).issubset(self.rules_[rule_index]): return rules_new.copy() if set(self.rules_[rule_index]).issubset(self.rules_[rule]): rules.remove(rule) rules.append(rule_index) return rules def _normalize(self, rules_new): try: rules_len = [len(self.rules_[index]) for index in rules_new] rules = [rules_new[i] for i in np.argsort(rules_len)[::-1][:len(rules_len)]] p1 = 0 while p1 < len(rules): for p2 in range(p1 + 1, len(rules), 1): if set(self.rules_[rules[p2]]).issubset(set(self.rules_[rules[p1]])): rules.remove(rules[p1]) p1 -= 1 break p1 += 1 return rules except: return rules_new.copy() def _print_rules(self, rules_max): for rule_index in rules_max: print(self.rules_[rule_index])
(n_rules: int = 2000, supp=5, maxlen: int = 10, num_iterations=5000, num_chains=3, q=0.1, alpha_pos=100, beta_pos=1, alpha_neg=100, beta_neg=1, alpha_l=None, beta_l=None, discretization_method='randomforest', random_state=0)
16,711
imodels.rule_set.brs
__init__
Params ------ n_rules number of rules to be used in SA_patternbased and also the output of generate_rules supp The higher this supp, the 'larger' a pattern is. 5% is a generally good number maxlen maximum length of a pattern num_iterations number of iterations in each chain num_chains number of chains in the simulated annealing search algorithm q alpha_pos $ ho = alpha/(alpha+beta)$. Make sure $ ho$ is close to one when choosing alpha and beta The alpha and beta parameters alter the prior distributions for different rules beta_pos alpha_neg beta_neg alpha_l beta_l discretization_method discretization method
def __init__(self, n_rules: int = 2000, supp=5, maxlen: int = 10, num_iterations=5000, num_chains=3, q=0.1, alpha_pos=100, beta_pos=1, alpha_neg=100, beta_neg=1, alpha_l=None, beta_l=None, discretization_method='randomforest', random_state=0): ''' Params ------ n_rules number of rules to be used in SA_patternbased and also the output of generate_rules supp The higher this supp, the 'larger' a pattern is. 5% is a generally good number maxlen maximum length of a pattern num_iterations number of iterations in each chain num_chains number of chains in the simulated annealing search algorithm q alpha_pos $\rho = alpha/(alpha+beta)$. Make sure $\rho$ is close to one when choosing alpha and beta The alpha and beta parameters alter the prior distributions for different rules beta_pos alpha_neg beta_neg alpha_l beta_l discretization_method discretization method ''' self.n_rules = n_rules self.supp = supp self.maxlen = maxlen self.num_iterations = num_iterations self.num_chains = num_chains self.q = q self.alpha_pos = alpha_pos self.beta_pos = beta_pos self.alpha_neg = alpha_neg self.beta_neg = beta_neg self.discretization_method = discretization_method self.alpha_l = alpha_l self.beta_l = beta_l self.random_state = 0
(self, n_rules: int = 2000, supp=5, maxlen: int = 10, num_iterations=5000, num_chains=3, q=0.1, alpha_pos=100, beta_pos=1, alpha_neg=100, beta_neg=1, alpha_l=None, beta_l=None, discretization_method='randomforest', random_state=0)
16,715
imodels.rule_set.brs
__str__
null
def __str__(self): return ' '.join(str(r) for r in self.rules_)
(self)
16,718
imodels.rule_set.brs
_compute_prob
null
def _compute_prob(self, rules, y): Yhat = (np.sum(self.RMatrix[:, rules], axis=1) > 0).astype(int) TP, FP, TN, FN = _get_confusion_matrix(Yhat, y) Kn_count = list(np.bincount([self.rules_len_list[x] for x in rules], minlength=self.maxlen + 1)) prior_ChsRules = sum([_log_betabin(Kn_count[i], self.pattern_space[i], self.alpha_l[i], self.beta_l[i]) for i in range(1, len(Kn_count), 1)]) likelihood_1 = _log_betabin(TP, TP + FP, self.alpha_pos, self.beta_pos) likelihood_2 = _log_betabin(TN, FN + TN, self.alpha_neg, self.beta_neg) return [TP, FP, TN, FN], [prior_ChsRules, likelihood_1, likelihood_2]
(self, rules, y)
16,719
imodels.rule_set.rule_set
_eval_weighted_rule_sum
null
def _eval_weighted_rule_sum(self, X) -> np.ndarray: check_is_fitted(self, ['rules_without_feature_names_', 'n_features_', 'feature_placeholders']) X = check_array(X) if X.shape[1] != self.n_features_: raise ValueError("X.shape[1] = %d should be equal to %d, the number of features at training time." " Please reshape your data." % (X.shape[1], self.n_features_)) df = pd.DataFrame(X, columns=self.feature_placeholders) selected_rules = self.rules_without_feature_names_ scores = np.zeros(X.shape[0]) for r in selected_rules: features_r_uses = list(map(lambda x: x[0], r.agg_dict.keys())) scores[df[features_r_uses].query(str(r)).index.values] += r.args[0] return scores
(self, X) -> numpy.ndarray
16,720
imodels.rule_set.rule_set
_extract_rules
null
def _extract_rules(self, X, y): pass
(self, X, y)
16,721
imodels.rule_set.brs
_generate_rules
This function generates rules that satisfy supp and maxlen using fpgrowth, then it selects the top n_rules rules that make data have the biggest decrease in entropy. There are two ways to generate rules. fpgrowth can handle cases where the maxlen is small. If maxlen<=3, fpgrowth can generates rules much faster than randomforest. If maxlen is big, fpgrowth tends to generate too many rules that overflow the memory.
def _generate_rules(self, X, y, verbose): '''This function generates rules that satisfy supp and maxlen using fpgrowth, then it selects the top n_rules rules that make data have the biggest decrease in entropy. There are two ways to generate rules. fpgrowth can handle cases where the maxlen is small. If maxlen<=3, fpgrowth can generates rules much faster than randomforest. If maxlen is big, fpgrowth tends to generate too many rules that overflow the memory. ''' df = 1 - X # df has negative associations df.columns = [name.strip() + '_neg' for name in X.columns] df = pd.concat([X, df], axis=1) if self.discretization_method == 'fpgrowth' and self.maxlen <= 3: itemMatrix = [[item for item in df.columns if row[item] == 1] for i, row in df.iterrows()] pindex = np.where(y == 1)[0] rules = fpgrowth([itemMatrix[i] for i in pindex], supp=self.supp, zmin=1, zmax=self.maxlen) rules = [tuple(np.sort(rule[0])) for rule in rules] rules = list(set(rules)) else: '''todo: replace this with imodels.RFDiscretizer ''' rules = [] for length in range(1, self.maxlen + 1, 1): n_estimators = min(pow(df.shape[1], length), 4000) clf = RandomForestClassifier(n_estimators=n_estimators, max_depth=length) clf.fit(X, y) for n in range(n_estimators): rules.extend(_extract_rules(clf.estimators_[n], df.columns)) rules = [list(x) for x in set(tuple(x) for x in rules)] self.rules_ = rules # select the top n_rules rules using secondary criteria, information gain self._screen_rules(df, y, verbose) # updates self.rules_ self._set_pattern_space()
(self, X, y, verbose)
16,722
imodels.rule_set.rule_set
_get_complexity
null
def _get_complexity(self): check_is_fitted(self, ['rules_without_feature_names_']) return sum([len(rule.agg_dict) for rule in self.rules_without_feature_names_])
(self)
16,727
imodels.rule_set.brs
_normalize
null
def _normalize(self, rules_new): try: rules_len = [len(self.rules_[index]) for index in rules_new] rules = [rules_new[i] for i in np.argsort(rules_len)[::-1][:len(rules_len)]] p1 = 0 while p1 < len(rules): for p2 in range(p1 + 1, len(rules), 1): if set(self.rules_[rules[p2]]).issubset(set(self.rules_[rules[p1]])): rules.remove(rules[p1]) p1 -= 1 break p1 += 1 return rules except: return rules_new.copy()
(self, rules_new)
16,728
imodels.rule_set.brs
_normalize_add
null
def _normalize_add(self, rules_new, rule_index): rules = rules_new.copy() for rule in rules_new: if set(self.rules_[rule]).issubset(self.rules_[rule_index]): return rules_new.copy() if set(self.rules_[rule_index]).issubset(self.rules_[rule]): rules.remove(rule) rules.append(rule_index) return rules
(self, rules_new, rule_index)
16,729
imodels.rule_set.brs
_print_rules
null
def _print_rules(self, rules_max): for rule_index in rules_max: print(self.rules_[rule_index])
(self, rules_max)
16,730
imodels.rule_set.brs
_propose
null
def _propose(self, rules_curr, rules_norm, q, y): nRules = len(self.rules_) yhat = (np.sum(self.RMatrix[:, rules_curr], axis=1) > 0).astype(int) incorr = np.where(y != yhat)[0] N = len(rules_curr) if len(incorr) == 0: # BOA correctly classified all points but there could be redundant patterns, so cleaning is needed move = ['clean'] else: ex = sample(incorr.tolist(), 1)[0] t = random() if y[ex] == 1 or N == 1: if t < 1.0 / 2 or N == 1: move = ['add'] # action: add else: move = ['cut', 'add'] # action: replace else: if t < 1.0 / 2: move = ['cut'] # action: cut else: move = ['cut', 'add'] # action: replace if move[0] == 'cut': """ cut """ if random() < q: candidate = list(set(np.where(self.RMatrix[ex, :] == 1)[0]).intersection(rules_curr)) if len(candidate) == 0: candidate = rules_curr cut_rule = sample(candidate, 1)[0] else: p = [] all_sum = np.sum(self.RMatrix[:, rules_curr], axis=1) for index, rule in enumerate(rules_curr): yhat = ((all_sum - np.array(self.RMatrix[:, rule])) > 0).astype(int) TP, FP, TN, FN = _get_confusion_matrix(yhat, y) p.append(TP.astype(float) / (TP + FP + 1)) p = [x - min(p) for x in p] p = np.exp(p) p = np.insert(p, 0, 0) p = np.array(list(_accumulate(p))) if p[-1] == 0: index = sample(range(len(rules_curr)), 1)[0] else: p = p / p[-1] index = _find_lt(p, random()) cut_rule = rules_curr[index] rules_curr.remove(cut_rule) rules_norm = self._normalize(rules_curr) move.remove('cut') if len(move) > 0 and move[0] == 'add': """ add """ if random() < q: add_rule = sample(range(nRules), 1)[0] else: Yhat_neg_index = list(np.where(np.sum(self.RMatrix[:, rules_curr], axis=1) < 1)[0]) mat = np.multiply(self.RMatrix[Yhat_neg_index, :].transpose(), y[Yhat_neg_index]) TP = np.sum(mat, axis=1) FP = np.array((np.sum(self.RMatrix[Yhat_neg_index, :], axis=0) - TP)) p = (TP.astype(float) / (TP + FP + 1)) p[rules_curr] = 0 add_rule = sample(np.where(p == max(p))[0].tolist(), 1)[0] if add_rule not in rules_curr: rules_curr.append(add_rule) rules_norm = self._normalize(rules_curr) if len(move) > 0 and move[0] == 'clean': remove = [] for i, rule in enumerate(rules_norm): yhat = (np.sum( self.RMatrix[:, [rule for j, rule in enumerate(rules_norm) if (j != i and j not in remove)]], axis=1) > 0).astype(int) TP, FP, TN, FN = _get_confusion_matrix(yhat, y) if TP + FP == 0: remove.append(i) for x in remove: rules_norm.remove(x) return rules_curr, rules_norm return rules_curr, rules_norm
(self, rules_curr, rules_norm, q, y)
16,731
imodels.rule_set.rule_set
_prune_rules
null
def _prune_rules(self, rules): pass
(self, rules)
16,734
imodels.rule_set.rule_set
_score_rules
null
def _score_rules(self, X, y, rules): pass
(self, X, y, rules)
16,735
imodels.rule_set.brs
_screen_rules
Screening rules using information gain
def _screen_rules(self, df, y, verbose): '''Screening rules using information gain ''' item_ind_dict = {} for i, name in enumerate(df.columns): item_ind_dict[name] = i indices = np.array( list(itertools.chain.from_iterable([[ item_ind_dict[x] for x in rule] for rule in self.rules_]))) len_rules = [len(rule) for rule in self.rules_] indptr = list(_accumulate(len_rules)) indptr.insert(0, 0) indptr = np.array(indptr) data = np.ones(len(indices)) rule_matrix = csc_matrix((data, indices, indptr), shape=(len(df.columns), len(self.rules_))) mat = df.values @ rule_matrix print('mat.shape', mat.shape) len_matrix = np.array([len_rules] * df.shape[0]) Z = (mat == len_matrix).astype(int) Zpos = [Z[i] for i in np.where(y > 0)][0] TP = np.sum(Zpos, axis=0) supp_select = np.where(TP >= self.supp * sum(y) / 100)[0] FP = np.sum(Z, axis=0) - TP TN = len(y) - np.sum(y) - FP FN = np.sum(y) - TP p1 = TP.astype(float) / (TP + FP) p2 = FN.astype(float) / (FN + TN) pp = (TP + FP).astype(float) / (TP + FP + TN + FN) # p1 = np.clip(p1, a_min=1e-10, a_max=1-1e-10) print('\n\n\n\np1.shape', p1.shape, 'pp.shape', pp.shape, 'cond_entropy.shape') # , cond_entropy.shape) with warnings.catch_warnings(): if not verbose: warnings.simplefilter("ignore") # ignore warnings about invalid values (e.g. log(0)) cond_entropy = -pp * (p1 * np.log(p1) + (1 - p1) * np.log(1 - p1)) - (1 - pp) * ( p2 * np.log(p2) + (1 - p2) * np.log(1 - p2)) cond_entropy[p1 * (1 - p1) == 0] = -((1 - pp) * (p2 * np.log(p2) + (1 - p2) * np.log(1 - p2)))[ p1 * (1 - p1) == 0] cond_entropy[p2 * (1 - p2) == 0] = -(pp * (p1 * np.log(p1) + (1 - p1) * np.log(1 - p1)))[p2 * (1 - p2) == 0] cond_entropy[p1 * (1 - p1) * p2 * (1 - p2) == 0] = 0 select = np.argsort(cond_entropy[supp_select])[::-1][-self.n_rules:] self.rules_ = [self.rules_[i] for i in supp_select[select]] self.RMatrix = np.array(Z[:, supp_select[select]])
(self, df, y, verbose)
16,736
imodels.rule_set.brs
_set_pattern_space
Compute the rule space from the levels in each attribute
def _set_pattern_space(self): """Compute the rule space from the levels in each attribute """ # add feat_neg to each existing feature feat for item in self.attr_names: self.attr_level_num[item + '_neg'] = self.attr_level_num[item] tmp = [item + '_neg' for item in self.attr_names] self.attr_names.extend(tmp) # set up pattern_space self.pattern_space = np.zeros(self.maxlen + 1) for k in range(1, self.maxlen + 1, 1): for subset in combinations(self.attr_names, k): tmp = 1 for i in subset: tmp = tmp * self.attr_level_num[i] # print('subset', subset, 'tmp', tmp, 'k', k) self.pattern_space[k] = self.pattern_space[k] + tmp
(self)
16,739
imodels.rule_set.brs
fit
Parameters ---------- X : array-like, shape = [n_samples, n_features] Training data y : array_like, shape = [n_samples] Labels feature_names : array_like, shape = [n_features], optional (default: []) String labels for each feature. If empty and X is a DataFrame, column labels are used. If empty and X is not a DataFrame, then features are simply enumerated
def fit(self, X, y, feature_names: list = None, init=[], verbose=False): ''' Parameters ---------- X : array-like, shape = [n_samples, n_features] Training data y : array_like, shape = [n_samples] Labels feature_names : array_like, shape = [n_features], optional (default: []) String labels for each feature. If empty and X is a DataFrame, column labels are used. If empty and X is not a DataFrame, then features are simply enumerated ''' # check inputs self.attr_level_num = defaultdict(int) # any missing value defaults to 0 self.attr_names = [] X, y, feature_names = check_fit_arguments(self, X, y, feature_names) np.random.seed(self.random_state) # convert to pandas DataFrame X = pd.DataFrame(X, columns=feature_names) for i, name in enumerate(X.columns): self.attr_level_num[name] += 1 self.attr_names.append(name) self.attr_names_orig = deepcopy(self.attr_names) self.attr_names = list(set(self.attr_names)) # set up patterns self._set_pattern_space() # parameter checking if self.alpha_l is None or self.beta_l is None or len(self.alpha_l) != self.maxlen or len( self.beta_l) != self.maxlen: if verbose: print('No or wrong input for alpha_l and beta_l - the model will use default parameters.') self.C = [1.0 / self.maxlen] * self.maxlen self.C.insert(0, -1) self.alpha_l = [10] * (self.maxlen + 1) self.beta_l = [10 * self.pattern_space[i] / self.C[i] for i in range(self.maxlen + 1)] else: self.alpha_l = [1] + list(self.alpha_l) self.beta_l = [1] + list(self.beta_l) # setup self._generate_rules(X, y, verbose) n_rules_current = len(self.rules_) self.rules_len_list = [len(rule) for rule in self.rules_] maps = defaultdict(list) T0 = 1000 # initial temperature for simulated annealing split = 0.7 * self.num_iterations # run simulated annealing for chain in range(self.num_chains): # initialize with a random pattern set if init != []: rules_curr = init.copy() else: assert n_rules_current > 1, f'Only {n_rules_current} potential rules found, change hyperparams to allow for more' N = sample(range(1, min(8, n_rules_current), 1), 1)[0] rules_curr = sample(range(n_rules_current), N) rules_curr_norm = self._normalize(rules_curr) pt_curr = -100000000000 maps[chain].append( [-1, [pt_curr / 3, pt_curr / 3, pt_curr / 3], rules_curr, [self.rules_[i] for i in rules_curr]]) for iter in range(self.num_iterations): if iter >= split: p = np.array(range(1 + len(maps[chain]))) p = np.array(list(_accumulate(p))) p = p / p[-1] index = _find_lt(p, random()) rules_curr = maps[chain][index][2].copy() rules_curr_norm = maps[chain][index][2].copy() # propose new rules rules_new, rules_norm = self._propose(rules_curr.copy(), rules_curr_norm.copy(), self.q, y) # compute probability of new rules cfmatrix, prob = self._compute_prob(rules_new, y) T = T0 ** (1 - iter / self.num_iterations) # temperature for simulated annealing pt_new = sum(prob) with warnings.catch_warnings(): if not verbose: warnings.simplefilter("ignore") alpha = np.exp(float(pt_new - pt_curr) / T) if pt_new > sum(maps[chain][-1][1]): maps[chain].append([iter, prob, rules_new, [self.rules_[i] for i in rules_new]]) if verbose: print(( '\n** chain = {}, max at iter = {} ** \n accuracy = {}, TP = {},FP = {}, TN = {}, FN = {}' '\n pt_new is {}, prior_ChsRules={}, likelihood_1 = {}, likelihood_2 = {}\n').format( chain, iter, (cfmatrix[0] + cfmatrix[2] + 0.0) / len(y), cfmatrix[0], cfmatrix[1], cfmatrix[2], cfmatrix[3], sum(prob), prob[0], prob[1], prob[2]) ) self._print_rules(rules_new) print(rules_new) if random() <= alpha: rules_curr_norm, rules_curr, pt_curr = rules_norm.copy(), rules_new.copy(), pt_new pt_max = [sum(maps[chain][-1][1]) for chain in range(self.num_chains)] index = pt_max.index(max(pt_max)) self.rules_ = maps[index][-1][3] return self
(self, X, y, feature_names: Optional[list] = None, init=[], verbose=False)
16,742
imodels.rule_set.brs
predict
null
def predict(self, X): check_is_fitted(self) if isinstance(X, np.ndarray): df = pd.DataFrame(X, columns=self.attr_names_orig) else: df = X Z = [[]] * len(self.rules_) dfn = 1 - df # df has negative associations dfn.columns = [name.strip() + '_neg' for name in df.columns] df = pd.concat([df, dfn], axis=1) for i, rule in enumerate(self.rules_): Z[i] = (np.sum(df[list(rule)], axis=1) == len(rule)).astype(int) Yhat = (np.sum(Z, axis=0) > 0).astype(int) return Yhat
(self, X)
16,743
imodels.rule_set.brs
predict_proba
null
def predict_proba(self, X): raise Exception('BOA does not support predicted probabilities.')
(self, X)
16,745
sklearn.utils._metadata_requests
set_fit_request
Request metadata passed to the ``fit`` method. Note that this method is only relevant if ``enable_metadata_routing=True`` (see :func:`sklearn.set_config`). Please see :ref:`User Guide <metadata_routing>` on how the routing mechanism works. The options for each parameter are: - ``True``: metadata is requested, and passed to ``fit`` if provided. The request is ignored if metadata is not provided. - ``False``: metadata is not requested and the meta-estimator will not pass it to ``fit``. - ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it. - ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name. The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the existing request. This allows you to change the request for some parameters and not others. .. versionadded:: 1.3 .. note:: This method is only relevant if this estimator is used as a sub-estimator of a meta-estimator, e.g. used inside a :class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect. Parameters ---------- feature_names : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED Metadata routing for ``feature_names`` parameter in ``fit``. init : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED Metadata routing for ``init`` parameter in ``fit``. verbose : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED Metadata routing for ``verbose`` parameter in ``fit``. Returns ------- self : object The updated object.
def __get__(self, instance, owner): # we would want to have a method which accepts only the expected args def func(**kw): """Updates the request for provided parameters This docstring is overwritten below. See REQUESTER_DOC for expected functionality """ if not _routing_enabled(): raise RuntimeError( "This method is only available when metadata routing is enabled." " You can enable it using" " sklearn.set_config(enable_metadata_routing=True)." ) if self.validate_keys and (set(kw) - set(self.keys)): raise TypeError( f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments" f" are: {set(self.keys)}" ) requests = instance._get_metadata_request() method_metadata_request = getattr(requests, self.name) for prop, alias in kw.items(): if alias is not UNCHANGED: method_metadata_request.add_request(param=prop, alias=alias) instance._metadata_request = requests return instance # Now we set the relevant attributes of the function so that it seems # like a normal method to the end user, with known expected arguments. func.__name__ = f"set_{self.name}_request" params = [ inspect.Parameter( name="self", kind=inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=owner, ) ] params.extend( [ inspect.Parameter( k, inspect.Parameter.KEYWORD_ONLY, default=UNCHANGED, annotation=Optional[Union[bool, None, str]], ) for k in self.keys ] ) func.__signature__ = inspect.Signature( params, return_annotation=owner, ) doc = REQUESTER_DOC.format(method=self.name) for metadata in self.keys: doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name) doc += REQUESTER_DOC_RETURN func.__doc__ = doc return func
(self: imodels.rule_set.brs.BayesianRuleSetClassifier, *, feature_names: Union[bool, NoneType, str] = '$UNCHANGED$', init: Union[bool, NoneType, str] = '$UNCHANGED$', verbose: Union[bool, NoneType, str] = '$UNCHANGED$') -> imodels.rule_set.brs.BayesianRuleSetClassifier
16,748
imodels.rule_set.boosted_rules
BoostedRulesClassifier
An easy-interpretable classifier optimizing simple logical rules. Params ------ estimator: object with fit and predict methods Defaults to DecisionTreeClassifier with AdaBoost. For SLIPPER, should pass estimator=imodels.SlipperBaseEstimator
class BoostedRulesClassifier(AdaBoostClassifier): '''An easy-interpretable classifier optimizing simple logical rules. Params ------ estimator: object with fit and predict methods Defaults to DecisionTreeClassifier with AdaBoost. For SLIPPER, should pass estimator=imodels.SlipperBaseEstimator ''' def __init__( self, estimator=DecisionTreeClassifier(max_depth=1), *, n_estimators=15, learning_rate=1.0, random_state=None, ): try: # sklearn version >= 1.2 super().__init__( estimator=estimator, n_estimators=n_estimators, learning_rate=learning_rate, random_state=random_state, ) except: # sklearn version < 1.2 super().__init__( base_estimator=estimator, n_estimators=n_estimators, learning_rate=learning_rate, random_state=random_state, ) self.estimator = estimator def fit(self, X, y, feature_names=None, **kwargs): X, y, feature_names = check_fit_arguments(self, X, y, feature_names) super().fit(X, y, **kwargs) self.complexity_ = len(self.estimators_)
(estimator=DecisionTreeClassifier(max_depth=1), *, n_estimators=15, learning_rate=1.0, random_state=None)
16,751
imodels.rule_set.boosted_rules
__init__
null
def __init__( self, estimator=DecisionTreeClassifier(max_depth=1), *, n_estimators=15, learning_rate=1.0, random_state=None, ): try: # sklearn version >= 1.2 super().__init__( estimator=estimator, n_estimators=n_estimators, learning_rate=learning_rate, random_state=random_state, ) except: # sklearn version < 1.2 super().__init__( base_estimator=estimator, n_estimators=n_estimators, learning_rate=learning_rate, random_state=random_state, ) self.estimator = estimator
(self, estimator=DecisionTreeClassifier(max_depth=1), *, n_estimators=15, learning_rate=1.0, random_state=None)
16,775
imodels.rule_set.boosted_rules
fit
null
def fit(self, X, y, feature_names=None, **kwargs): X, y, feature_names = check_fit_arguments(self, X, y, feature_names) super().fit(X, y, **kwargs) self.complexity_ = len(self.estimators_)
(self, X, y, feature_names=None, **kwargs)
16,782
sklearn.utils._metadata_requests
set_fit_request
Request metadata passed to the ``fit`` method. Note that this method is only relevant if ``enable_metadata_routing=True`` (see :func:`sklearn.set_config`). Please see :ref:`User Guide <metadata_routing>` on how the routing mechanism works. The options for each parameter are: - ``True``: metadata is requested, and passed to ``fit`` if provided. The request is ignored if metadata is not provided. - ``False``: metadata is not requested and the meta-estimator will not pass it to ``fit``. - ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it. - ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name. The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the existing request. This allows you to change the request for some parameters and not others. .. versionadded:: 1.3 .. note:: This method is only relevant if this estimator is used as a sub-estimator of a meta-estimator, e.g. used inside a :class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect. Parameters ---------- feature_names : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED Metadata routing for ``feature_names`` parameter in ``fit``. Returns ------- self : object The updated object.
def __get__(self, instance, owner): # we would want to have a method which accepts only the expected args def func(**kw): """Updates the request for provided parameters This docstring is overwritten below. See REQUESTER_DOC for expected functionality """ if not _routing_enabled(): raise RuntimeError( "This method is only available when metadata routing is enabled." " You can enable it using" " sklearn.set_config(enable_metadata_routing=True)." ) if self.validate_keys and (set(kw) - set(self.keys)): raise TypeError( f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments" f" are: {set(self.keys)}" ) requests = instance._get_metadata_request() method_metadata_request = getattr(requests, self.name) for prop, alias in kw.items(): if alias is not UNCHANGED: method_metadata_request.add_request(param=prop, alias=alias) instance._metadata_request = requests return instance # Now we set the relevant attributes of the function so that it seems # like a normal method to the end user, with known expected arguments. func.__name__ = f"set_{self.name}_request" params = [ inspect.Parameter( name="self", kind=inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=owner, ) ] params.extend( [ inspect.Parameter( k, inspect.Parameter.KEYWORD_ONLY, default=UNCHANGED, annotation=Optional[Union[bool, None, str]], ) for k in self.keys ] ) func.__signature__ = inspect.Signature( params, return_annotation=owner, ) doc = REQUESTER_DOC.format(method=self.name) for metadata in self.keys: doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name) doc += REQUESTER_DOC_RETURN func.__doc__ = doc return func
(self: imodels.rule_set.boosted_rules.BoostedRulesClassifier, *, feature_names: Union[bool, NoneType, str] = '$UNCHANGED$') -> imodels.rule_set.boosted_rules.BoostedRulesClassifier
16,789
imodels.rule_set.boosted_rules
BoostedRulesRegressor
An easy-interpretable regressor optimizing simple logical rules. Params ------ estimator: object with fit and predict methods Defaults to DecisionTreeRegressor with AdaBoost.
class BoostedRulesRegressor(AdaBoostRegressor): '''An easy-interpretable regressor optimizing simple logical rules. Params ------ estimator: object with fit and predict methods Defaults to DecisionTreeRegressor with AdaBoost. ''' def __init__( self, estimator=DecisionTreeRegressor(max_depth=1), *, n_estimators=15, learning_rate=1.0, random_state=13, ): try: # sklearn version >= 1.2 super().__init__( estimator=estimator, n_estimators=n_estimators, learning_rate=learning_rate, random_state=random_state, ) except: # sklearn version < 1.2 super().__init__( base_estimator=estimator, n_estimators=n_estimators, learning_rate=learning_rate, random_state=random_state, ) self.estimator = estimator def fit(self, X, y, feature_names=None, **kwargs): X, y, feature_names = check_fit_arguments(self, X, y, feature_names) super().fit(X, y, **kwargs) self.complexity_ = len(self.estimators_)
(estimator=DecisionTreeRegressor(max_depth=1), *, n_estimators=15, learning_rate=1.0, random_state=13)
16,792
imodels.rule_set.boosted_rules
__init__
null
def __init__( self, estimator=DecisionTreeRegressor(max_depth=1), *, n_estimators=15, learning_rate=1.0, random_state=13, ): try: # sklearn version >= 1.2 super().__init__( estimator=estimator, n_estimators=n_estimators, learning_rate=learning_rate, random_state=random_state, ) except: # sklearn version < 1.2 super().__init__( base_estimator=estimator, n_estimators=n_estimators, learning_rate=learning_rate, random_state=random_state, ) self.estimator = estimator
(self, estimator=DecisionTreeRegressor(max_depth=1), *, n_estimators=15, learning_rate=1.0, random_state=13)
16,823
imodels.tree.c45_tree.c45_tree
C45TreeClassifier
A C4.5 tree classifier. Parameters ---------- max_rules : int, optional (default=None) Maximum number of split nodes allowed in the tree
class C45TreeClassifier(BaseEstimator, ClassifierMixin): """A C4.5 tree classifier. Parameters ---------- max_rules : int, optional (default=None) Maximum number of split nodes allowed in the tree """ def __init__(self, max_rules: int = None): super().__init__() self.max_rules = max_rules def fit(self, X, y, feature_names: str = None): self.complexity_ = 0 # X, y = check_X_y(X, y) X, y, feature_names = check_fit_arguments(self, X, y, feature_names) self.resultType = type(y[0]) if feature_names is None: self.feature_names = [f'X_{x}' for x in range(X.shape[1])] else: # only include alphanumeric chars / replace spaces with underscores self.feature_names = [''.join([i for i in x if i.isalnum()]).replace(' ', '_') for x in feature_names] self.feature_names = [ 'X_' + x if x[0].isdigit() else x for x in self.feature_names ] assert len(self.feature_names) == X.shape[1] data = [[] for i in range(len(self.feature_names))] categories = [] for i in range(len(X)): categories.append(str(y[i])) for j in range(len(self.feature_names)): data[j].append(X[i][j]) root = ET.Element('GreedyTree') self.grow_tree(data, categories, root, self.feature_names) # adds to root self.tree_ = ET.tostring(root, encoding="unicode") # print('self.tree_', self.tree_) self.dom_ = minidom.parseString(self.tree_) return self def impute_nodes(self, X, y): """ Returns --- the leaf by which this sample would be classified """ source_node = self.root for i in range(len(y)): sample, label = X[i, ...], y[i] _add_label(source_node, label) nodes = [source_node] while len(nodes) > 0: node = nodes.pop() if not node.hasChildNodes(): continue else: att_name = node.firstChild.nodeName if att_name != "#text": att = sample[self.feature_names.index(att_name)] next_node = _get_next_node(node.childNodes, att) else: next_node = node.firstChild _add_label(next_node, label) nodes.append(next_node) self._calc_probs(source_node) # self.dom_.childNodes[0] = source_node # self.tree_.source = source_node def _calc_probs(self, node): node.nodeValue = np.mean(node.labels) if not node.hasChildNodes(): return for c in node.childNodes: self._calc_probs(c) def raw_preds(self, X): check_is_fitted(self, ['tree_', 'resultType', 'feature_names']) X = check_array(X) if isinstance(X, pd.DataFrame): X = deepcopy(X) X.columns = self.feature_names root = self.root prediction = [] for i in range(X.shape[0]): answerlist = decision(root, X[i], self.feature_names, 1) answerlist = sorted(answerlist.items(), key=lambda x: x[1], reverse=True) answer = answerlist[0][0] # prediction.append(self.resultType(answer)) prediction.append(float(answer)) return np.array(prediction) def predict(self, X): raw_preds = self.raw_preds(X) return (raw_preds > np.ones_like(raw_preds) * 0.5).astype(int) def predict_proba(self, X): raw_preds = self.raw_preds(X) return np.vstack((1 - raw_preds, raw_preds)).transpose() def __str__(self): check_is_fitted(self, ['tree_']) return self.dom_.toprettyxml(newl="\r\n") def grow_tree(self, X_t: List[list], y_str: List[str], parent, attrs_names): """ Parameters ---------- X_t: List[list] input data transposed (num_features x num_observations) y_str: List[str] outcome represented as strings parent attrs_names """ # check that y contains more than 1 distinct value if len(set(y_str)) > 1: split = [] # loop over features and build up potential splits for i in range(len(X_t)): if set(X_t[i]) == set("?"): split.append(0) else: if is_numeric_feature(X_t[i]): split.append(gain(y_str, X_t[i])) else: split.append(gain_ratio(y_str, X_t[i])) # no good split, return child node if max(split) == 0: set_as_leaf_node(parent, y_str) # there is a good split else: index_selected = split.index(max(split)) name_selected = str(attrs_names[index_selected]) self.complexity_ += 1 if is_numeric_feature(X_t[index_selected]): # split on this point split_point = get_best_split(y_str, X_t[index_selected]) # build up children nodes r_child_X = [[] for i in range(len(X_t))] r_child_y = [] l_child_X = [[] for i in range(len(X_t))] l_child_y = [] for i in range(len(y_str)): if not X_t[index_selected][i] == "?": if float(X_t[index_selected][i]) < float(split_point): l_child_y.append(y_str[i]) for j in range(len(X_t)): l_child_X[j].append(X_t[j][i]) else: r_child_y.append(y_str[i]) for j in range(len(X_t)): r_child_X[j].append(X_t[j][i]) # grow child nodes as well if len(l_child_y) > 0 and len(r_child_y) > 0 and ( self.max_rules is None or self.complexity_ <= self.max_rules ): p_l = float(len(l_child_y)) / (len(X_t[index_selected]) - X_t[index_selected].count("?")) son = ET.SubElement(parent, name_selected, {'feature': str(split_point), "flag": "l", "p": str(round(p_l, 3))}) self.grow_tree(l_child_X, l_child_y, son, attrs_names) son = ET.SubElement(parent, name_selected, {'feature': str(split_point), "flag": "r", "p": str(round(1 - p_l, 3))}) self.grow_tree(r_child_X, r_child_y, son, attrs_names) else: num_max = 0 for cat in set(y_str): num_cat = y_str.count(cat) if num_cat > num_max: num_max = num_cat most_cat = cat parent.text = most_cat else: # split on non-numeric variable (e.g. categorical) # create a leaf for each unique value for k in set(X_t[index_selected]): if not k == "?" and ( self.max_rules is None or self.complexity_ <= self.max_rules ): child_X = [[] for i in range(len(X_t))] child_y = [] for i in range(len(y_str)): if X_t[index_selected][i] == k: child_y.append(y_str[i]) for j in range(len(X_t)): child_X[j].append(X_t[j][i]) son = ET.SubElement(parent, name_selected, { 'feature': k, "flag": "m", 'p': str(round( float(len(child_y)) / ( len(X_t[index_selected]) - X_t[index_selected].count("?")), 3))}) self.grow_tree(child_X, child_y, son, attrs_names) else: parent.text = y_str[0] @property def root(self): return self.dom_.childNodes[0]
(max_rules: int = None)
16,825
imodels.tree.c45_tree.c45_tree
__init__
null
def __init__(self, max_rules: int = None): super().__init__() self.max_rules = max_rules
(self, max_rules: Optional[int] = None)
16,829
imodels.tree.c45_tree.c45_tree
__str__
null
def __str__(self): check_is_fitted(self, ['tree_']) return self.dom_.toprettyxml(newl="\r\n")
(self)
16,830
imodels.tree.c45_tree.c45_tree
_calc_probs
null
def _calc_probs(self, node): node.nodeValue = np.mean(node.labels) if not node.hasChildNodes(): return for c in node.childNodes: self._calc_probs(c)
(self, node)
16,841
imodels.tree.c45_tree.c45_tree
fit
null
def fit(self, X, y, feature_names: str = None): self.complexity_ = 0 # X, y = check_X_y(X, y) X, y, feature_names = check_fit_arguments(self, X, y, feature_names) self.resultType = type(y[0]) if feature_names is None: self.feature_names = [f'X_{x}' for x in range(X.shape[1])] else: # only include alphanumeric chars / replace spaces with underscores self.feature_names = [''.join([i for i in x if i.isalnum()]).replace(' ', '_') for x in feature_names] self.feature_names = [ 'X_' + x if x[0].isdigit() else x for x in self.feature_names ] assert len(self.feature_names) == X.shape[1] data = [[] for i in range(len(self.feature_names))] categories = [] for i in range(len(X)): categories.append(str(y[i])) for j in range(len(self.feature_names)): data[j].append(X[i][j]) root = ET.Element('GreedyTree') self.grow_tree(data, categories, root, self.feature_names) # adds to root self.tree_ = ET.tostring(root, encoding="unicode") # print('self.tree_', self.tree_) self.dom_ = minidom.parseString(self.tree_) return self
(self, X, y, feature_names: Optional[str] = None)
16,844
imodels.tree.c45_tree.c45_tree
grow_tree
Parameters ---------- X_t: List[list] input data transposed (num_features x num_observations) y_str: List[str] outcome represented as strings parent attrs_names
def grow_tree(self, X_t: List[list], y_str: List[str], parent, attrs_names): """ Parameters ---------- X_t: List[list] input data transposed (num_features x num_observations) y_str: List[str] outcome represented as strings parent attrs_names """ # check that y contains more than 1 distinct value if len(set(y_str)) > 1: split = [] # loop over features and build up potential splits for i in range(len(X_t)): if set(X_t[i]) == set("?"): split.append(0) else: if is_numeric_feature(X_t[i]): split.append(gain(y_str, X_t[i])) else: split.append(gain_ratio(y_str, X_t[i])) # no good split, return child node if max(split) == 0: set_as_leaf_node(parent, y_str) # there is a good split else: index_selected = split.index(max(split)) name_selected = str(attrs_names[index_selected]) self.complexity_ += 1 if is_numeric_feature(X_t[index_selected]): # split on this point split_point = get_best_split(y_str, X_t[index_selected]) # build up children nodes r_child_X = [[] for i in range(len(X_t))] r_child_y = [] l_child_X = [[] for i in range(len(X_t))] l_child_y = [] for i in range(len(y_str)): if not X_t[index_selected][i] == "?": if float(X_t[index_selected][i]) < float(split_point): l_child_y.append(y_str[i]) for j in range(len(X_t)): l_child_X[j].append(X_t[j][i]) else: r_child_y.append(y_str[i]) for j in range(len(X_t)): r_child_X[j].append(X_t[j][i]) # grow child nodes as well if len(l_child_y) > 0 and len(r_child_y) > 0 and ( self.max_rules is None or self.complexity_ <= self.max_rules ): p_l = float(len(l_child_y)) / (len(X_t[index_selected]) - X_t[index_selected].count("?")) son = ET.SubElement(parent, name_selected, {'feature': str(split_point), "flag": "l", "p": str(round(p_l, 3))}) self.grow_tree(l_child_X, l_child_y, son, attrs_names) son = ET.SubElement(parent, name_selected, {'feature': str(split_point), "flag": "r", "p": str(round(1 - p_l, 3))}) self.grow_tree(r_child_X, r_child_y, son, attrs_names) else: num_max = 0 for cat in set(y_str): num_cat = y_str.count(cat) if num_cat > num_max: num_max = num_cat most_cat = cat parent.text = most_cat else: # split on non-numeric variable (e.g. categorical) # create a leaf for each unique value for k in set(X_t[index_selected]): if not k == "?" and ( self.max_rules is None or self.complexity_ <= self.max_rules ): child_X = [[] for i in range(len(X_t))] child_y = [] for i in range(len(y_str)): if X_t[index_selected][i] == k: child_y.append(y_str[i]) for j in range(len(X_t)): child_X[j].append(X_t[j][i]) son = ET.SubElement(parent, name_selected, { 'feature': k, "flag": "m", 'p': str(round( float(len(child_y)) / ( len(X_t[index_selected]) - X_t[index_selected].count("?")), 3))}) self.grow_tree(child_X, child_y, son, attrs_names) else: parent.text = y_str[0]
(self, X_t: List[list], y_str: List[str], parent, attrs_names)
16,845
imodels.tree.c45_tree.c45_tree
impute_nodes
Returns --- the leaf by which this sample would be classified
def impute_nodes(self, X, y): """ Returns --- the leaf by which this sample would be classified """ source_node = self.root for i in range(len(y)): sample, label = X[i, ...], y[i] _add_label(source_node, label) nodes = [source_node] while len(nodes) > 0: node = nodes.pop() if not node.hasChildNodes(): continue else: att_name = node.firstChild.nodeName if att_name != "#text": att = sample[self.feature_names.index(att_name)] next_node = _get_next_node(node.childNodes, att) else: next_node = node.firstChild _add_label(next_node, label) nodes.append(next_node) self._calc_probs(source_node) # self.dom_.childNodes[0] = source_node # self.tree_.source = source_node
(self, X, y)
16,846
imodels.tree.c45_tree.c45_tree
predict
null
def predict(self, X): raw_preds = self.raw_preds(X) return (raw_preds > np.ones_like(raw_preds) * 0.5).astype(int)
(self, X)
16,847
imodels.tree.c45_tree.c45_tree
predict_proba
null
def predict_proba(self, X): raw_preds = self.raw_preds(X) return np.vstack((1 - raw_preds, raw_preds)).transpose()
(self, X)
16,848
imodels.tree.c45_tree.c45_tree
raw_preds
null
def raw_preds(self, X): check_is_fitted(self, ['tree_', 'resultType', 'feature_names']) X = check_array(X) if isinstance(X, pd.DataFrame): X = deepcopy(X) X.columns = self.feature_names root = self.root prediction = [] for i in range(X.shape[0]): answerlist = decision(root, X[i], self.feature_names, 1) answerlist = sorted(answerlist.items(), key=lambda x: x[1], reverse=True) answer = answerlist[0][0] # prediction.append(self.resultType(answer)) prediction.append(float(answer)) return np.array(prediction)
(self, X)
16,853
sklearn.base
ClassifierMixin
Mixin class for all classifiers in scikit-learn. This mixin defines the following functionality: - `_estimator_type` class attribute defaulting to `"classifier"`; - `score` method that default to :func:`~sklearn.metrics.accuracy_score`. - enforce that `fit` requires `y` to be passed through the `requires_y` tag. Read more in the :ref:`User Guide <rolling_your_own_estimator>`. Examples -------- >>> import numpy as np >>> from sklearn.base import BaseEstimator, ClassifierMixin >>> # Mixin classes should always be on the left-hand side for a correct MRO >>> class MyEstimator(ClassifierMixin, BaseEstimator): ... def __init__(self, *, param=1): ... self.param = param ... def fit(self, X, y=None): ... self.is_fitted_ = True ... return self ... def predict(self, X): ... return np.full(shape=X.shape[0], fill_value=self.param) >>> estimator = MyEstimator(param=1) >>> X = np.array([[1, 2], [2, 3], [3, 4]]) >>> y = np.array([1, 0, 1]) >>> estimator.fit(X, y).predict(X) array([1, 1, 1]) >>> estimator.score(X, y) 0.66...
class ClassifierMixin: """Mixin class for all classifiers in scikit-learn. This mixin defines the following functionality: - `_estimator_type` class attribute defaulting to `"classifier"`; - `score` method that default to :func:`~sklearn.metrics.accuracy_score`. - enforce that `fit` requires `y` to be passed through the `requires_y` tag. Read more in the :ref:`User Guide <rolling_your_own_estimator>`. Examples -------- >>> import numpy as np >>> from sklearn.base import BaseEstimator, ClassifierMixin >>> # Mixin classes should always be on the left-hand side for a correct MRO >>> class MyEstimator(ClassifierMixin, BaseEstimator): ... def __init__(self, *, param=1): ... self.param = param ... def fit(self, X, y=None): ... self.is_fitted_ = True ... return self ... def predict(self, X): ... return np.full(shape=X.shape[0], fill_value=self.param) >>> estimator = MyEstimator(param=1) >>> X = np.array([[1, 2], [2, 3], [3, 4]]) >>> y = np.array([1, 0, 1]) >>> estimator.fit(X, y).predict(X) array([1, 1, 1]) >>> estimator.score(X, y) 0.66... """ _estimator_type = "classifier" def score(self, X, y, sample_weight=None): """ Return the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters ---------- X : array-like of shape (n_samples, n_features) Test samples. y : array-like of shape (n_samples,) or (n_samples, n_outputs) True labels for `X`. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- score : float Mean accuracy of ``self.predict(X)`` w.r.t. `y`. """ from .metrics import accuracy_score return accuracy_score(y, self.predict(X), sample_weight=sample_weight) def _more_tags(self): return {"requires_y": True}
()
16,856
imodels.tree.cart_ccp
DecisionTreeCCPClassifier
null
class DecisionTreeCCPClassifier(DecisionTreeClassifier): def __init__(self, estimator_: BaseEstimator, desired_complexity: int = 1, complexity_measure='max_rules', *args, **kwargs): self.desired_complexity = desired_complexity # print('est', estimator_) self.estimator_ = estimator_ self.complexity_measure = complexity_measure def _get_alpha(self, X, y, sample_weight=None, *args, **kwargs): path = self.estimator_.cost_complexity_pruning_path(X, y) ccp_alphas, impurities = path.ccp_alphas, path.impurities complexities = {} low = 0 high = len(ccp_alphas) - 1 cur = 0 while low <= high: cur = (high + low) // 2 est_params = self.estimator_.get_params() est_params['ccp_alpha'] = ccp_alphas[cur] copied_estimator = deepcopy(self.estimator_).set_params(**est_params) copied_estimator.fit(X, y) if self._get_complexity(copied_estimator, self.complexity_measure) < self.desired_complexity: high = cur - 1 elif self._get_complexity(copied_estimator, self.complexity_measure) > self.desired_complexity: low = cur + 1 else: break self.alpha = ccp_alphas[cur] # for alpha in ccp_alphas: # est_params = self.estimator_.get_params() # est_params['ccp_alpha'] = alpha # copied_estimator = deepcopy(self.estimator_).set_params(**est_params) # copied_estimator.fit(X, y) # complexities[alpha] = self._get_complexity(copied_estimator,self.complexity_measure) # closest_alpha, closest_leaves = min(complexities.items(), key=lambda x: abs(self.desired_complexity - x[1])) # self.alpha = closest_alpha def fit(self, X, y, sample_weight=None, *args, **kwargs): params_for_fitting = self.estimator_.get_params() self._get_alpha(X, y, sample_weight, *args, **kwargs) params_for_fitting['ccp_alpha'] = self.alpha self.estimator_.set_params(**params_for_fitting) self.estimator_.fit(X, y, *args, **kwargs) def _get_complexity(self, BaseEstimator, complexity_measure): return compute_tree_complexity(BaseEstimator.tree_, complexity_measure) def predict_proba(self, *args, **kwargs): if hasattr(self.estimator_, 'predict_proba'): return self.estimator_.predict_proba(*args, **kwargs) else: return NotImplemented def predict(self, X, *args, **kwargs): return self.estimator_.predict(X, *args, **kwargs) def score(self, *args, **kwargs): if hasattr(self.estimator_, 'score'): return self.estimator_.score(*args, **kwargs) else: return NotImplemented
(estimator_: sklearn.base.BaseEstimator, desired_complexity: int = 1, complexity_measure='max_rules', *args, **kwargs)
16,858
imodels.tree.cart_ccp
__init__
null
def __init__(self, estimator_: BaseEstimator, desired_complexity: int = 1, complexity_measure='max_rules', *args, **kwargs): self.desired_complexity = desired_complexity # print('est', estimator_) self.estimator_ = estimator_ self.complexity_measure = complexity_measure
(self, estimator_: sklearn.base.BaseEstimator, desired_complexity: int = 1, complexity_measure='max_rules', *args, **kwargs)
16,864
sklearn.tree._classes
_compute_missing_values_in_feature_mask
Return boolean mask denoting if there are missing values for each feature. This method also ensures that X is finite. Parameter --------- X : array-like of shape (n_samples, n_features), dtype=DOUBLE Input data. estimator_name : str or None, default=None Name to use when raising an error. Defaults to the class name. Returns ------- missing_values_in_feature_mask : ndarray of shape (n_features,), or None Missing value mask. If missing values are not supported or there are no missing values, return None.
def _compute_missing_values_in_feature_mask(self, X, estimator_name=None): """Return boolean mask denoting if there are missing values for each feature. This method also ensures that X is finite. Parameter --------- X : array-like of shape (n_samples, n_features), dtype=DOUBLE Input data. estimator_name : str or None, default=None Name to use when raising an error. Defaults to the class name. Returns ------- missing_values_in_feature_mask : ndarray of shape (n_features,), or None Missing value mask. If missing values are not supported or there are no missing values, return None. """ estimator_name = estimator_name or self.__class__.__name__ common_kwargs = dict(estimator_name=estimator_name, input_name="X") if not self._support_missing_values(X): assert_all_finite(X, **common_kwargs) return None with np.errstate(over="ignore"): overall_sum = np.sum(X) if not np.isfinite(overall_sum): # Raise a ValueError in case of the presence of an infinite element. _assert_all_finite_element_wise(X, xp=np, allow_nan=True, **common_kwargs) # If the sum is not nan, then there are no missing values if not np.isnan(overall_sum): return None missing_values_in_feature_mask = _any_isnan_axis0(X) return missing_values_in_feature_mask
(self, X, estimator_name=None)
16,865
sklearn.tree._classes
_fit
null
def _fit( self, X, y, sample_weight=None, check_input=True, missing_values_in_feature_mask=None, ): random_state = check_random_state(self.random_state) if check_input: # Need to validate separately here. # We can't pass multi_output=True because that would allow y to be # csr. # _compute_missing_values_in_feature_mask will check for finite values and # compute the missing mask if the tree supports missing values check_X_params = dict( dtype=DTYPE, accept_sparse="csc", force_all_finite=False ) check_y_params = dict(ensure_2d=False, dtype=None) X, y = self._validate_data( X, y, validate_separately=(check_X_params, check_y_params) ) missing_values_in_feature_mask = ( self._compute_missing_values_in_feature_mask(X) ) if issparse(X): X.sort_indices() if X.indices.dtype != np.intc or X.indptr.dtype != np.intc: raise ValueError( "No support for np.int64 index based sparse matrices" ) if self.criterion == "poisson": if np.any(y < 0): raise ValueError( "Some value(s) of y are negative which is" " not allowed for Poisson regression." ) if np.sum(y) <= 0: raise ValueError( "Sum of y is not positive which is " "necessary for Poisson regression." ) # Determine output settings n_samples, self.n_features_in_ = X.shape is_classification = is_classifier(self) y = np.atleast_1d(y) expanded_class_weight = None if y.ndim == 1: # reshape is necessary to preserve the data contiguity against vs # [:, np.newaxis] that does not. y = np.reshape(y, (-1, 1)) self.n_outputs_ = y.shape[1] if is_classification: check_classification_targets(y) y = np.copy(y) self.classes_ = [] self.n_classes_ = [] if self.class_weight is not None: y_original = np.copy(y) y_encoded = np.zeros(y.shape, dtype=int) for k in range(self.n_outputs_): classes_k, y_encoded[:, k] = np.unique(y[:, k], return_inverse=True) self.classes_.append(classes_k) self.n_classes_.append(classes_k.shape[0]) y = y_encoded if self.class_weight is not None: expanded_class_weight = compute_sample_weight( self.class_weight, y_original ) self.n_classes_ = np.array(self.n_classes_, dtype=np.intp) if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous: y = np.ascontiguousarray(y, dtype=DOUBLE) max_depth = np.iinfo(np.int32).max if self.max_depth is None else self.max_depth if isinstance(self.min_samples_leaf, numbers.Integral): min_samples_leaf = self.min_samples_leaf else: # float min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples)) if isinstance(self.min_samples_split, numbers.Integral): min_samples_split = self.min_samples_split else: # float min_samples_split = int(ceil(self.min_samples_split * n_samples)) min_samples_split = max(2, min_samples_split) min_samples_split = max(min_samples_split, 2 * min_samples_leaf) if isinstance(self.max_features, str): if self.max_features == "sqrt": max_features = max(1, int(np.sqrt(self.n_features_in_))) elif self.max_features == "log2": max_features = max(1, int(np.log2(self.n_features_in_))) elif self.max_features is None: max_features = self.n_features_in_ elif isinstance(self.max_features, numbers.Integral): max_features = self.max_features else: # float if self.max_features > 0.0: max_features = max(1, int(self.max_features * self.n_features_in_)) else: max_features = 0 self.max_features_ = max_features max_leaf_nodes = -1 if self.max_leaf_nodes is None else self.max_leaf_nodes if len(y) != n_samples: raise ValueError( "Number of labels=%d does not match number of samples=%d" % (len(y), n_samples) ) if sample_weight is not None: sample_weight = _check_sample_weight(sample_weight, X, DOUBLE) if expanded_class_weight is not None: if sample_weight is not None: sample_weight = sample_weight * expanded_class_weight else: sample_weight = expanded_class_weight # Set min_weight_leaf from min_weight_fraction_leaf if sample_weight is None: min_weight_leaf = self.min_weight_fraction_leaf * n_samples else: min_weight_leaf = self.min_weight_fraction_leaf * np.sum(sample_weight) # Build tree criterion = self.criterion if not isinstance(criterion, Criterion): if is_classification: criterion = CRITERIA_CLF[self.criterion]( self.n_outputs_, self.n_classes_ ) else: criterion = CRITERIA_REG[self.criterion](self.n_outputs_, n_samples) else: # Make a deepcopy in case the criterion has mutable attributes that # might be shared and modified concurrently during parallel fitting criterion = copy.deepcopy(criterion) SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS splitter = self.splitter if self.monotonic_cst is None: monotonic_cst = None else: if self.n_outputs_ > 1: raise ValueError( "Monotonicity constraints are not supported with multiple outputs." ) # Check to correct monotonicity constraint' specification, # by applying element-wise logical conjunction # Note: we do not cast `np.asarray(self.monotonic_cst, dtype=np.int8)` # straight away here so as to generate error messages for invalid # values using the original values prior to any dtype related conversion. monotonic_cst = np.asarray(self.monotonic_cst) if monotonic_cst.shape[0] != X.shape[1]: raise ValueError( "monotonic_cst has shape {} but the input data " "X has {} features.".format(monotonic_cst.shape[0], X.shape[1]) ) valid_constraints = np.isin(monotonic_cst, (-1, 0, 1)) if not np.all(valid_constraints): unique_constaints_value = np.unique(monotonic_cst) raise ValueError( "monotonic_cst must be None or an array-like of -1, 0 or 1, but" f" got {unique_constaints_value}" ) monotonic_cst = np.asarray(monotonic_cst, dtype=np.int8) if is_classifier(self): if self.n_classes_[0] > 2: raise ValueError( "Monotonicity constraints are not supported with multiclass " "classification" ) # Binary classification trees are built by constraining probabilities # of the *negative class* in order to make the implementation similar # to regression trees. # Since self.monotonic_cst encodes constraints on probabilities of the # *positive class*, all signs must be flipped. monotonic_cst *= -1 if not isinstance(self.splitter, Splitter): splitter = SPLITTERS[self.splitter]( criterion, self.max_features_, min_samples_leaf, min_weight_leaf, random_state, monotonic_cst, ) if is_classifier(self): self.tree_ = Tree(self.n_features_in_, self.n_classes_, self.n_outputs_) else: self.tree_ = Tree( self.n_features_in_, # TODO: tree shouldn't need this in this case np.array([1] * self.n_outputs_, dtype=np.intp), self.n_outputs_, ) # Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise if max_leaf_nodes < 0: builder = DepthFirstTreeBuilder( splitter, min_samples_split, min_samples_leaf, min_weight_leaf, max_depth, self.min_impurity_decrease, ) else: builder = BestFirstTreeBuilder( splitter, min_samples_split, min_samples_leaf, min_weight_leaf, max_depth, max_leaf_nodes, self.min_impurity_decrease, ) builder.build(self.tree_, X, y, sample_weight, missing_values_in_feature_mask) if self.n_outputs_ == 1 and is_classifier(self): self.n_classes_ = self.n_classes_[0] self.classes_ = self.classes_[0] self._prune_tree() return self
(self, X, y, sample_weight=None, check_input=True, missing_values_in_feature_mask=None)
16,866
imodels.tree.cart_ccp
_get_alpha
null
def _get_alpha(self, X, y, sample_weight=None, *args, **kwargs): path = self.estimator_.cost_complexity_pruning_path(X, y) ccp_alphas, impurities = path.ccp_alphas, path.impurities complexities = {} low = 0 high = len(ccp_alphas) - 1 cur = 0 while low <= high: cur = (high + low) // 2 est_params = self.estimator_.get_params() est_params['ccp_alpha'] = ccp_alphas[cur] copied_estimator = deepcopy(self.estimator_).set_params(**est_params) copied_estimator.fit(X, y) if self._get_complexity(copied_estimator, self.complexity_measure) < self.desired_complexity: high = cur - 1 elif self._get_complexity(copied_estimator, self.complexity_measure) > self.desired_complexity: low = cur + 1 else: break self.alpha = ccp_alphas[cur] # for alpha in ccp_alphas: # est_params = self.estimator_.get_params() # est_params['ccp_alpha'] = alpha # copied_estimator = deepcopy(self.estimator_).set_params(**est_params) # copied_estimator.fit(X, y) # complexities[alpha] = self._get_complexity(copied_estimator,self.complexity_measure) # closest_alpha, closest_leaves = min(complexities.items(), key=lambda x: abs(self.desired_complexity - x[1])) # self.alpha = closest_alpha
(self, X, y, sample_weight=None, *args, **kwargs)
16,867
imodels.tree.cart_ccp
_get_complexity
null
def _get_complexity(self, BaseEstimator, complexity_measure): return compute_tree_complexity(BaseEstimator.tree_, complexity_measure)
(self, BaseEstimator, complexity_measure)
16,871
sklearn.tree._classes
_more_tags
null
def _more_tags(self): # XXX: nan is only support for dense arrays, but we set this for common test to # pass, specifically: check_estimators_nan_inf allow_nan = self.splitter == "best" and self.criterion in { "gini", "log_loss", "entropy", } return {"multilabel": True, "allow_nan": allow_nan}
(self)
16,872
sklearn.tree._classes
_prune_tree
Prune tree using Minimal Cost-Complexity Pruning.
def _prune_tree(self): """Prune tree using Minimal Cost-Complexity Pruning.""" check_is_fitted(self) if self.ccp_alpha == 0.0: return # build pruned tree if is_classifier(self): n_classes = np.atleast_1d(self.n_classes_) pruned_tree = Tree(self.n_features_in_, n_classes, self.n_outputs_) else: pruned_tree = Tree( self.n_features_in_, # TODO: the tree shouldn't need this param np.array([1] * self.n_outputs_, dtype=np.intp), self.n_outputs_, ) _build_pruned_tree_ccp(pruned_tree, self.tree_, self.ccp_alpha) self.tree_ = pruned_tree
(self)
16,875
sklearn.tree._classes
_support_missing_values
null
def _support_missing_values(self, X): return ( not issparse(X) and self._get_tags()["allow_nan"] and self.monotonic_cst is None )
(self, X)
16,876
sklearn.tree._classes
_validate_X_predict
Validate the training data on predict (probabilities).
def _validate_X_predict(self, X, check_input): """Validate the training data on predict (probabilities).""" if check_input: if self._support_missing_values(X): force_all_finite = "allow-nan" else: force_all_finite = True X = self._validate_data( X, dtype=DTYPE, accept_sparse="csr", reset=False, force_all_finite=force_all_finite, ) if issparse(X) and ( X.indices.dtype != np.intc or X.indptr.dtype != np.intc ): raise ValueError("No support for np.int64 index based sparse matrices") else: # The number of features is checked regardless of `check_input` self._check_n_features(X, reset=False) return X
(self, X, check_input)
16,879
sklearn.tree._classes
apply
Return the index of the leaf that each sample is predicted as. .. versionadded:: 0.17 Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. check_input : bool, default=True Allow to bypass several input checking. Don't use this parameter unless you know what you're doing. Returns ------- X_leaves : array-like of shape (n_samples,) For each datapoint x in X, return the index of the leaf x ends up in. Leaves are numbered within ``[0; self.tree_.node_count)``, possibly with gaps in the numbering.
def apply(self, X, check_input=True): """Return the index of the leaf that each sample is predicted as. .. versionadded:: 0.17 Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. check_input : bool, default=True Allow to bypass several input checking. Don't use this parameter unless you know what you're doing. Returns ------- X_leaves : array-like of shape (n_samples,) For each datapoint x in X, return the index of the leaf x ends up in. Leaves are numbered within ``[0; self.tree_.node_count)``, possibly with gaps in the numbering. """ check_is_fitted(self) X = self._validate_X_predict(X, check_input) return self.tree_.apply(X)
(self, X, check_input=True)
16,880
sklearn.tree._classes
cost_complexity_pruning_path
Compute the pruning path during Minimal Cost-Complexity Pruning. See :ref:`minimal_cost_complexity_pruning` for details on the pruning process. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csc_matrix``. y : array-like of shape (n_samples,) or (n_samples, n_outputs) The target values (class labels) as integers or strings. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. Splits are also ignored if they would result in any single class carrying a negative weight in either child node. Returns ------- ccp_path : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. ccp_alphas : ndarray Effective alphas of subtree during pruning. impurities : ndarray Sum of the impurities of the subtree leaves for the corresponding alpha value in ``ccp_alphas``.
def cost_complexity_pruning_path(self, X, y, sample_weight=None): """Compute the pruning path during Minimal Cost-Complexity Pruning. See :ref:`minimal_cost_complexity_pruning` for details on the pruning process. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csc_matrix``. y : array-like of shape (n_samples,) or (n_samples, n_outputs) The target values (class labels) as integers or strings. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. Splits are also ignored if they would result in any single class carrying a negative weight in either child node. Returns ------- ccp_path : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. ccp_alphas : ndarray Effective alphas of subtree during pruning. impurities : ndarray Sum of the impurities of the subtree leaves for the corresponding alpha value in ``ccp_alphas``. """ est = clone(self).set_params(ccp_alpha=0.0) est.fit(X, y, sample_weight=sample_weight) return Bunch(**ccp_pruning_path(est.tree_))
(self, X, y, sample_weight=None)
16,881
sklearn.tree._classes
decision_path
Return the decision path in the tree. .. versionadded:: 0.18 Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. check_input : bool, default=True Allow to bypass several input checking. Don't use this parameter unless you know what you're doing. Returns ------- indicator : sparse matrix of shape (n_samples, n_nodes) Return a node indicator CSR matrix where non zero elements indicates that the samples goes through the nodes.
def decision_path(self, X, check_input=True): """Return the decision path in the tree. .. versionadded:: 0.18 Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. check_input : bool, default=True Allow to bypass several input checking. Don't use this parameter unless you know what you're doing. Returns ------- indicator : sparse matrix of shape (n_samples, n_nodes) Return a node indicator CSR matrix where non zero elements indicates that the samples goes through the nodes. """ X = self._validate_X_predict(X, check_input) return self.tree_.decision_path(X)
(self, X, check_input=True)
16,882
imodels.tree.cart_ccp
fit
null
def fit(self, X, y, sample_weight=None, *args, **kwargs): params_for_fitting = self.estimator_.get_params() self._get_alpha(X, y, sample_weight, *args, **kwargs) params_for_fitting['ccp_alpha'] = self.alpha self.estimator_.set_params(**params_for_fitting) self.estimator_.fit(X, y, *args, **kwargs)
(self, X, y, sample_weight=None, *args, **kwargs)
16,883
sklearn.tree._classes
get_depth
Return the depth of the decision tree. The depth of a tree is the maximum distance between the root and any leaf. Returns ------- self.tree_.max_depth : int The maximum depth of the tree.
def get_depth(self): """Return the depth of the decision tree. The depth of a tree is the maximum distance between the root and any leaf. Returns ------- self.tree_.max_depth : int The maximum depth of the tree. """ check_is_fitted(self) return self.tree_.max_depth
(self)
16,885
sklearn.tree._classes
get_n_leaves
Return the number of leaves of the decision tree. Returns ------- self.tree_.n_leaves : int Number of leaves.
def get_n_leaves(self): """Return the number of leaves of the decision tree. Returns ------- self.tree_.n_leaves : int Number of leaves. """ check_is_fitted(self) return self.tree_.n_leaves
(self)
16,887
imodels.tree.cart_ccp
predict
null
def predict(self, X, *args, **kwargs): return self.estimator_.predict(X, *args, **kwargs)
(self, X, *args, **kwargs)
16,888
sklearn.tree._classes
predict_log_proba
Predict class log-probabilities of the input samples X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- proba : ndarray of shape (n_samples, n_classes) or list of n_outputs such arrays if n_outputs > 1 The class log-probabilities of the input samples. The order of the classes corresponds to that in the attribute :term:`classes_`.
def predict_log_proba(self, X): """Predict class log-probabilities of the input samples X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. Returns ------- proba : ndarray of shape (n_samples, n_classes) or list of n_outputs \ such arrays if n_outputs > 1 The class log-probabilities of the input samples. The order of the classes corresponds to that in the attribute :term:`classes_`. """ proba = self.predict_proba(X) if self.n_outputs_ == 1: return np.log(proba) else: for k in range(self.n_outputs_): proba[k] = np.log(proba[k]) return proba
(self, X)
16,889
imodels.tree.cart_ccp
predict_proba
null
def predict_proba(self, *args, **kwargs): if hasattr(self.estimator_, 'predict_proba'): return self.estimator_.predict_proba(*args, **kwargs) else: return NotImplemented
(self, *args, **kwargs)
16,890
imodels.tree.cart_ccp
score
null
def score(self, *args, **kwargs): if hasattr(self.estimator_, 'score'): return self.estimator_.score(*args, **kwargs) else: return NotImplemented
(self, *args, **kwargs)
16,893
sklearn.utils._metadata_requests
set_predict_proba_request
Request metadata passed to the ``predict_proba`` method. Note that this method is only relevant if ``enable_metadata_routing=True`` (see :func:`sklearn.set_config`). Please see :ref:`User Guide <metadata_routing>` on how the routing mechanism works. The options for each parameter are: - ``True``: metadata is requested, and passed to ``predict_proba`` if provided. The request is ignored if metadata is not provided. - ``False``: metadata is not requested and the meta-estimator will not pass it to ``predict_proba``. - ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it. - ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name. The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the existing request. This allows you to change the request for some parameters and not others. .. versionadded:: 1.3 .. note:: This method is only relevant if this estimator is used as a sub-estimator of a meta-estimator, e.g. used inside a :class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect. Parameters ---------- check_input : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED Metadata routing for ``check_input`` parameter in ``predict_proba``. Returns ------- self : object The updated object.
def __get__(self, instance, owner): # we would want to have a method which accepts only the expected args def func(**kw): """Updates the request for provided parameters This docstring is overwritten below. See REQUESTER_DOC for expected functionality """ if not _routing_enabled(): raise RuntimeError( "This method is only available when metadata routing is enabled." " You can enable it using" " sklearn.set_config(enable_metadata_routing=True)." ) if self.validate_keys and (set(kw) - set(self.keys)): raise TypeError( f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments" f" are: {set(self.keys)}" ) requests = instance._get_metadata_request() method_metadata_request = getattr(requests, self.name) for prop, alias in kw.items(): if alias is not UNCHANGED: method_metadata_request.add_request(param=prop, alias=alias) instance._metadata_request = requests return instance # Now we set the relevant attributes of the function so that it seems # like a normal method to the end user, with known expected arguments. func.__name__ = f"set_{self.name}_request" params = [ inspect.Parameter( name="self", kind=inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=owner, ) ] params.extend( [ inspect.Parameter( k, inspect.Parameter.KEYWORD_ONLY, default=UNCHANGED, annotation=Optional[Union[bool, None, str]], ) for k in self.keys ] ) func.__signature__ = inspect.Signature( params, return_annotation=owner, ) doc = REQUESTER_DOC.format(method=self.name) for metadata in self.keys: doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name) doc += REQUESTER_DOC_RETURN func.__doc__ = doc return func
(self: imodels.tree.cart_ccp.DecisionTreeCCPClassifier, *, check_input: Union[bool, NoneType, str] = '$UNCHANGED$') -> imodels.tree.cart_ccp.DecisionTreeCCPClassifier
16,894
sklearn.utils._metadata_requests
set_predict_request
Request metadata passed to the ``predict`` method. Note that this method is only relevant if ``enable_metadata_routing=True`` (see :func:`sklearn.set_config`). Please see :ref:`User Guide <metadata_routing>` on how the routing mechanism works. The options for each parameter are: - ``True``: metadata is requested, and passed to ``predict`` if provided. The request is ignored if metadata is not provided. - ``False``: metadata is not requested and the meta-estimator will not pass it to ``predict``. - ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it. - ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name. The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the existing request. This allows you to change the request for some parameters and not others. .. versionadded:: 1.3 .. note:: This method is only relevant if this estimator is used as a sub-estimator of a meta-estimator, e.g. used inside a :class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect. Parameters ---------- check_input : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED Metadata routing for ``check_input`` parameter in ``predict``. Returns ------- self : object The updated object.
def __get__(self, instance, owner): # we would want to have a method which accepts only the expected args def func(**kw): """Updates the request for provided parameters This docstring is overwritten below. See REQUESTER_DOC for expected functionality """ if not _routing_enabled(): raise RuntimeError( "This method is only available when metadata routing is enabled." " You can enable it using" " sklearn.set_config(enable_metadata_routing=True)." ) if self.validate_keys and (set(kw) - set(self.keys)): raise TypeError( f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments" f" are: {set(self.keys)}" ) requests = instance._get_metadata_request() method_metadata_request = getattr(requests, self.name) for prop, alias in kw.items(): if alias is not UNCHANGED: method_metadata_request.add_request(param=prop, alias=alias) instance._metadata_request = requests return instance # Now we set the relevant attributes of the function so that it seems # like a normal method to the end user, with known expected arguments. func.__name__ = f"set_{self.name}_request" params = [ inspect.Parameter( name="self", kind=inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=owner, ) ] params.extend( [ inspect.Parameter( k, inspect.Parameter.KEYWORD_ONLY, default=UNCHANGED, annotation=Optional[Union[bool, None, str]], ) for k in self.keys ] ) func.__signature__ = inspect.Signature( params, return_annotation=owner, ) doc = REQUESTER_DOC.format(method=self.name) for metadata in self.keys: doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name) doc += REQUESTER_DOC_RETURN func.__doc__ = doc return func
(self: imodels.tree.cart_ccp.DecisionTreeCCPClassifier, *, check_input: Union[bool, NoneType, str] = '$UNCHANGED$') -> imodels.tree.cart_ccp.DecisionTreeCCPClassifier
16,896
imodels.tree.cart_ccp
DecisionTreeCCPRegressor
null
class DecisionTreeCCPRegressor(BaseEstimator): def __init__(self, estimator_: BaseEstimator, desired_complexity: int = 1, complexity_measure='max_rules', *args, **kwargs): self.desired_complexity = desired_complexity # print('est', estimator_) self.estimator_ = estimator_ self.alpha = 0.0 self.complexity_measure = complexity_measure def _get_alpha(self, X, y, sample_weight=None): path = self.estimator_.cost_complexity_pruning_path(X, y) ccp_alphas, impurities = path.ccp_alphas, path.impurities complexities = {} low = 0 high = len(ccp_alphas) - 1 cur = 0 while low <= high: cur = (high + low) // 2 est_params = self.estimator_.get_params() est_params['ccp_alpha'] = ccp_alphas[cur] copied_estimator = deepcopy(self.estimator_).set_params(**est_params) copied_estimator.fit(X, y) if self._get_complexity(copied_estimator, self.complexity_measure) < self.desired_complexity: high = cur - 1 elif self._get_complexity(copied_estimator, self.complexity_measure) > self.desired_complexity: low = cur + 1 else: break self.alpha = ccp_alphas[cur] # path = self.estimator_.cost_complexity_pruning_path(X,y) # ccp_alphas, impurities = path.ccp_alphas, path.impurities # complexities = {} # for alpha in ccp_alphas: # est_params = self.estimator_.get_params() # est_params['ccp_alpha'] = alpha # copied_estimator = deepcopy(self.estimator_).set_params(**est_params) # copied_estimator.fit(X, y) # complexities[alpha] = self._get_complexity(copied_estimator,self.complexity_measure) # closest_alpha, closest_leaves = min(complexities.items(), key=lambda x: abs(self.desired_complexity - x[1])) # self.alpha = closest_alpha def fit(self, X, y, sample_weight=None): params_for_fitting = self.estimator_.get_params() self._get_alpha(X, y, sample_weight) params_for_fitting['ccp_alpha'] = self.alpha self.estimator_.set_params(**params_for_fitting) self.estimator_.fit(X, y) def _get_complexity(self, BaseEstimator, complexity_measure): return compute_tree_complexity(BaseEstimator.tree_, self.complexity_measure) def predict(self, X, *args, **kwargs): return self.estimator_.predict(X, *args, **kwargs) def score(self, *args, **kwargs): if hasattr(self.estimator_, 'score'): return self.estimator_.score(*args, **kwargs) else: return NotImplemented
(estimator_: sklearn.base.BaseEstimator, desired_complexity: int = 1, complexity_measure='max_rules', *args, **kwargs)