index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
16,898 | imodels.tree.cart_ccp | __init__ | null | def __init__(self, estimator_: BaseEstimator, desired_complexity: int = 1, complexity_measure='max_rules', *args,
**kwargs):
self.desired_complexity = desired_complexity
# print('est', estimator_)
self.estimator_ = estimator_
self.alpha = 0.0
self.complexity_measure = complexity_measure
| (self, estimator_: sklearn.base.BaseEstimator, desired_complexity: int = 1, complexity_measure='max_rules', *args, **kwargs) |
16,904 | imodels.tree.cart_ccp | _get_alpha | null | def _get_alpha(self, X, y, sample_weight=None):
path = self.estimator_.cost_complexity_pruning_path(X, y)
ccp_alphas, impurities = path.ccp_alphas, path.impurities
complexities = {}
low = 0
high = len(ccp_alphas) - 1
cur = 0
while low <= high:
cur = (high + low) // 2
est_params = self.estimator_.get_params()
est_params['ccp_alpha'] = ccp_alphas[cur]
copied_estimator = deepcopy(self.estimator_).set_params(**est_params)
copied_estimator.fit(X, y)
if self._get_complexity(copied_estimator, self.complexity_measure) < self.desired_complexity:
high = cur - 1
elif self._get_complexity(copied_estimator, self.complexity_measure) > self.desired_complexity:
low = cur + 1
else:
break
self.alpha = ccp_alphas[cur]
| (self, X, y, sample_weight=None) |
16,905 | imodels.tree.cart_ccp | _get_complexity | null | def _get_complexity(self, BaseEstimator, complexity_measure):
return compute_tree_complexity(BaseEstimator.tree_, self.complexity_measure)
| (self, BaseEstimator, complexity_measure) |
16,914 | imodels.tree.cart_ccp | fit | null | # closest_alpha, closest_leaves = min(complexities.items(), key=lambda x: abs(self.desired_complexity - x[1]))
# self.alpha = closest_alpha
def fit(self, X, y, sample_weight=None):
params_for_fitting = self.estimator_.get_params()
self._get_alpha(X, y, sample_weight)
params_for_fitting['ccp_alpha'] = self.alpha
self.estimator_.set_params(**params_for_fitting)
self.estimator_.fit(X, y)
| (self, X, y, sample_weight=None) |
16,921 | sklearn.tree._classes | DecisionTreeClassifier | A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : {"gini", "entropy", "log_loss"}, default="gini"
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "log_loss" and "entropy" both for the
Shannon information gain, see :ref:`tree_mathematical_formulation`.
splitter : {"best", "random"}, default="best"
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_depth : int, default=None
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float or {"sqrt", "log2"}, default=None
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`max(1, int(max_features * n_features_in_))` features are considered at
each split.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
random_state : int, RandomState instance or None, default=None
Controls the randomness of the estimator. The features are always
randomly permuted at each split, even if ``splitter`` is set to
``"best"``. When ``max_features < n_features``, the algorithm will
select ``max_features`` at random at each split before finding the best
split among them. But the best found split may vary across different
runs, even if ``max_features=n_features``. That is the case, if the
improvement of the criterion is identical for several splits and one
split has to be selected at random. To obtain a deterministic behaviour
during fitting, ``random_state`` has to be fixed to an integer.
See :term:`Glossary <random_state>` for details.
max_leaf_nodes : int, default=None
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
class_weight : dict, list of dict or "balanced", default=None
Weights associated with classes in the form ``{class_label: weight}``.
If None, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
ccp_alpha : non-negative float, default=0.0
Complexity parameter used for Minimal Cost-Complexity Pruning. The
subtree with the largest cost complexity that is smaller than
``ccp_alpha`` will be chosen. By default, no pruning is performed. See
:ref:`minimal_cost_complexity_pruning` for details.
.. versionadded:: 0.22
monotonic_cst : array-like of int of shape (n_features), default=None
Indicates the monotonicity constraint to enforce on each feature.
- 1: monotonic increase
- 0: no constraint
- -1: monotonic decrease
If monotonic_cst is None, no constraints are applied.
Monotonicity constraints are not supported for:
- multiclass classifications (i.e. when `n_classes > 2`),
- multioutput classifications (i.e. when `n_outputs_ > 1`),
- classifications trained on data with missing values.
The constraints hold over the probability of the positive class.
Read more in the :ref:`User Guide <monotonic_cst_gbdt>`.
.. versionadded:: 1.4
Attributes
----------
classes_ : ndarray of shape (n_classes,) or list of ndarray
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
max_features_ : int
The inferred value of max_features.
n_classes_ : int or list of int
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree instance
The underlying Tree object. Please refer to
``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and
:ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py`
for basic usage of these attributes.
See Also
--------
DecisionTreeRegressor : A decision tree regressor.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
The :meth:`predict` method operates using the :func:`numpy.argmax`
function on the outputs of :meth:`predict_proba`. This means that in
case the highest predicted probabilities are tied, the classifier will
predict the tied class with the lowest index in :term:`classes_`.
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
https://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
| class DecisionTreeClassifier(ClassifierMixin, BaseDecisionTree):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : {"gini", "entropy", "log_loss"}, default="gini"
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "log_loss" and "entropy" both for the
Shannon information gain, see :ref:`tree_mathematical_formulation`.
splitter : {"best", "random"}, default="best"
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_depth : int, default=None
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float or {"sqrt", "log2"}, default=None
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`max(1, int(max_features * n_features_in_))` features are considered at
each split.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
random_state : int, RandomState instance or None, default=None
Controls the randomness of the estimator. The features are always
randomly permuted at each split, even if ``splitter`` is set to
``"best"``. When ``max_features < n_features``, the algorithm will
select ``max_features`` at random at each split before finding the best
split among them. But the best found split may vary across different
runs, even if ``max_features=n_features``. That is the case, if the
improvement of the criterion is identical for several splits and one
split has to be selected at random. To obtain a deterministic behaviour
during fitting, ``random_state`` has to be fixed to an integer.
See :term:`Glossary <random_state>` for details.
max_leaf_nodes : int, default=None
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
class_weight : dict, list of dict or "balanced", default=None
Weights associated with classes in the form ``{class_label: weight}``.
If None, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
ccp_alpha : non-negative float, default=0.0
Complexity parameter used for Minimal Cost-Complexity Pruning. The
subtree with the largest cost complexity that is smaller than
``ccp_alpha`` will be chosen. By default, no pruning is performed. See
:ref:`minimal_cost_complexity_pruning` for details.
.. versionadded:: 0.22
monotonic_cst : array-like of int of shape (n_features), default=None
Indicates the monotonicity constraint to enforce on each feature.
- 1: monotonic increase
- 0: no constraint
- -1: monotonic decrease
If monotonic_cst is None, no constraints are applied.
Monotonicity constraints are not supported for:
- multiclass classifications (i.e. when `n_classes > 2`),
- multioutput classifications (i.e. when `n_outputs_ > 1`),
- classifications trained on data with missing values.
The constraints hold over the probability of the positive class.
Read more in the :ref:`User Guide <monotonic_cst_gbdt>`.
.. versionadded:: 1.4
Attributes
----------
classes_ : ndarray of shape (n_classes,) or list of ndarray
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
max_features_ : int
The inferred value of max_features.
n_classes_ : int or list of int
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree instance
The underlying Tree object. Please refer to
``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and
:ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py`
for basic usage of these attributes.
See Also
--------
DecisionTreeRegressor : A decision tree regressor.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
The :meth:`predict` method operates using the :func:`numpy.argmax`
function on the outputs of :meth:`predict_proba`. This means that in
case the highest predicted probabilities are tied, the classifier will
predict the tied class with the lowest index in :term:`classes_`.
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
https://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
_parameter_constraints: dict = {
**BaseDecisionTree._parameter_constraints,
"criterion": [StrOptions({"gini", "entropy", "log_loss"}), Hidden(Criterion)],
"class_weight": [dict, list, StrOptions({"balanced"}), None],
}
def __init__(
self,
*,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_decrease=0.0,
class_weight=None,
ccp_alpha=0.0,
monotonic_cst=None,
):
super().__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
min_impurity_decrease=min_impurity_decrease,
monotonic_cst=monotonic_cst,
ccp_alpha=ccp_alpha,
)
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels) as integers or strings.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. Splits are also
ignored if they would result in any single class carrying a
negative weight in either child node.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you're doing.
Returns
-------
self : DecisionTreeClassifier
Fitted estimator.
"""
super()._fit(
X,
y,
sample_weight=sample_weight,
check_input=check_input,
)
return self
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you're doing.
Returns
-------
proba : ndarray of shape (n_samples, n_classes) or list of n_outputs \
such arrays if n_outputs > 1
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
check_is_fitted(self)
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
return proba[:, : self.n_classes_]
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, : self.n_classes_[k]]
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
proba : ndarray of shape (n_samples, n_classes) or list of n_outputs \
such arrays if n_outputs > 1
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
def _more_tags(self):
# XXX: nan is only support for dense arrays, but we set this for common test to
# pass, specifically: check_estimators_nan_inf
allow_nan = self.splitter == "best" and self.criterion in {
"gini",
"log_loss",
"entropy",
}
return {"multilabel": True, "allow_nan": allow_nan}
| (*, criterion='gini', splitter='best', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=None, random_state=None, max_leaf_nodes=None, min_impurity_decrease=0.0, class_weight=None, ccp_alpha=0.0, monotonic_cst=None) |
16,923 | sklearn.tree._classes | __init__ | null | def __init__(
self,
*,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_decrease=0.0,
class_weight=None,
ccp_alpha=0.0,
monotonic_cst=None,
):
super().__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
min_impurity_decrease=min_impurity_decrease,
monotonic_cst=monotonic_cst,
ccp_alpha=ccp_alpha,
)
| (self, *, criterion='gini', splitter='best', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=None, random_state=None, max_leaf_nodes=None, min_impurity_decrease=0.0, class_weight=None, ccp_alpha=0.0, monotonic_cst=None) |
16,945 | sklearn.tree._classes | fit | Build a decision tree classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels) as integers or strings.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. Splits are also
ignored if they would result in any single class carrying a
negative weight in either child node.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you're doing.
Returns
-------
self : DecisionTreeClassifier
Fitted estimator.
| def _more_tags(self):
# XXX: nan is only support for dense arrays, but we set this for common test to
# pass, specifically: check_estimators_nan_inf
allow_nan = self.splitter == "best" and self.criterion in {
"squared_error",
"friedman_mse",
"poisson",
}
return {"allow_nan": allow_nan}
| (self, X, y, sample_weight=None, check_input=True) |
16,950 | sklearn.tree._classes | predict | Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you're doing.
Returns
-------
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The predicted classes, or the predict values.
| def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you're doing.
Returns
-------
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The predicted classes, or the predict values.
"""
check_is_fitted(self)
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if is_classifier(self):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
class_type = self.classes_[0].dtype
predictions = np.zeros((n_samples, self.n_outputs_), dtype=class_type)
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1), axis=0
)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
| (self, X, check_input=True) |
16,952 | sklearn.tree._classes | predict_proba | Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you're doing.
Returns
-------
proba : ndarray of shape (n_samples, n_classes) or list of n_outputs such arrays if n_outputs > 1
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
| def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you're doing.
Returns
-------
proba : ndarray of shape (n_samples, n_classes) or list of n_outputs \
such arrays if n_outputs > 1
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
check_is_fitted(self)
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
return proba[:, : self.n_classes_]
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, : self.n_classes_[k]]
all_proba.append(proba_k)
return all_proba
| (self, X, check_input=True) |
16,959 | sklearn.tree._classes | DecisionTreeRegressor | A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : {"squared_error", "friedman_mse", "absolute_error", "poisson"}, default="squared_error"
The function to measure the quality of a split. Supported criteria
are "squared_error" for the mean squared error, which is equal to
variance reduction as feature selection criterion and minimizes the L2
loss using the mean of each terminal node, "friedman_mse", which uses
mean squared error with Friedman's improvement score for potential
splits, "absolute_error" for the mean absolute error, which minimizes
the L1 loss using the median of each terminal node, and "poisson" which
uses reduction in Poisson deviance to find splits.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
.. versionadded:: 0.24
Poisson deviance criterion.
splitter : {"best", "random"}, default="best"
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_depth : int, default=None
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float or {"sqrt", "log2"}, default=None
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`max(1, int(max_features * n_features_in_))` features are considered at each
split.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
random_state : int, RandomState instance or None, default=None
Controls the randomness of the estimator. The features are always
randomly permuted at each split, even if ``splitter`` is set to
``"best"``. When ``max_features < n_features``, the algorithm will
select ``max_features`` at random at each split before finding the best
split among them. But the best found split may vary across different
runs, even if ``max_features=n_features``. That is the case, if the
improvement of the criterion is identical for several splits and one
split has to be selected at random. To obtain a deterministic behaviour
during fitting, ``random_state`` has to be fixed to an integer.
See :term:`Glossary <random_state>` for details.
max_leaf_nodes : int, default=None
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
ccp_alpha : non-negative float, default=0.0
Complexity parameter used for Minimal Cost-Complexity Pruning. The
subtree with the largest cost complexity that is smaller than
``ccp_alpha`` will be chosen. By default, no pruning is performed. See
:ref:`minimal_cost_complexity_pruning` for details.
.. versionadded:: 0.22
monotonic_cst : array-like of int of shape (n_features), default=None
Indicates the monotonicity constraint to enforce on each feature.
- 1: monotonic increase
- 0: no constraint
- -1: monotonic decrease
If monotonic_cst is None, no constraints are applied.
Monotonicity constraints are not supported for:
- multioutput regressions (i.e. when `n_outputs_ > 1`),
- regressions trained on data with missing values.
Read more in the :ref:`User Guide <monotonic_cst_gbdt>`.
.. versionadded:: 1.4
Attributes
----------
feature_importances_ : ndarray of shape (n_features,)
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
max_features_ : int
The inferred value of max_features.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree instance
The underlying Tree object. Please refer to
``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and
:ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py`
for basic usage of these attributes.
See Also
--------
DecisionTreeClassifier : A decision tree classifier.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
https://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_diabetes
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> X, y = load_diabetes(return_X_y=True)
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, X, y, cv=10)
... # doctest: +SKIP
...
array([-0.39..., -0.46..., 0.02..., 0.06..., -0.50...,
0.16..., 0.11..., -0.73..., -0.30..., -0.00...])
| class DecisionTreeRegressor(RegressorMixin, BaseDecisionTree):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : {"squared_error", "friedman_mse", "absolute_error", \
"poisson"}, default="squared_error"
The function to measure the quality of a split. Supported criteria
are "squared_error" for the mean squared error, which is equal to
variance reduction as feature selection criterion and minimizes the L2
loss using the mean of each terminal node, "friedman_mse", which uses
mean squared error with Friedman's improvement score for potential
splits, "absolute_error" for the mean absolute error, which minimizes
the L1 loss using the median of each terminal node, and "poisson" which
uses reduction in Poisson deviance to find splits.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
.. versionadded:: 0.24
Poisson deviance criterion.
splitter : {"best", "random"}, default="best"
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_depth : int, default=None
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float or {"sqrt", "log2"}, default=None
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`max(1, int(max_features * n_features_in_))` features are considered at each
split.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
random_state : int, RandomState instance or None, default=None
Controls the randomness of the estimator. The features are always
randomly permuted at each split, even if ``splitter`` is set to
``"best"``. When ``max_features < n_features``, the algorithm will
select ``max_features`` at random at each split before finding the best
split among them. But the best found split may vary across different
runs, even if ``max_features=n_features``. That is the case, if the
improvement of the criterion is identical for several splits and one
split has to be selected at random. To obtain a deterministic behaviour
during fitting, ``random_state`` has to be fixed to an integer.
See :term:`Glossary <random_state>` for details.
max_leaf_nodes : int, default=None
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
ccp_alpha : non-negative float, default=0.0
Complexity parameter used for Minimal Cost-Complexity Pruning. The
subtree with the largest cost complexity that is smaller than
``ccp_alpha`` will be chosen. By default, no pruning is performed. See
:ref:`minimal_cost_complexity_pruning` for details.
.. versionadded:: 0.22
monotonic_cst : array-like of int of shape (n_features), default=None
Indicates the monotonicity constraint to enforce on each feature.
- 1: monotonic increase
- 0: no constraint
- -1: monotonic decrease
If monotonic_cst is None, no constraints are applied.
Monotonicity constraints are not supported for:
- multioutput regressions (i.e. when `n_outputs_ > 1`),
- regressions trained on data with missing values.
Read more in the :ref:`User Guide <monotonic_cst_gbdt>`.
.. versionadded:: 1.4
Attributes
----------
feature_importances_ : ndarray of shape (n_features,)
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
max_features_ : int
The inferred value of max_features.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree instance
The underlying Tree object. Please refer to
``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and
:ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py`
for basic usage of these attributes.
See Also
--------
DecisionTreeClassifier : A decision tree classifier.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
https://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_diabetes
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> X, y = load_diabetes(return_X_y=True)
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, X, y, cv=10)
... # doctest: +SKIP
...
array([-0.39..., -0.46..., 0.02..., 0.06..., -0.50...,
0.16..., 0.11..., -0.73..., -0.30..., -0.00...])
"""
_parameter_constraints: dict = {
**BaseDecisionTree._parameter_constraints,
"criterion": [
StrOptions({"squared_error", "friedman_mse", "absolute_error", "poisson"}),
Hidden(Criterion),
],
}
def __init__(
self,
*,
criterion="squared_error",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_decrease=0.0,
ccp_alpha=0.0,
monotonic_cst=None,
):
super().__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
min_impurity_decrease=min_impurity_decrease,
ccp_alpha=ccp_alpha,
monotonic_cst=monotonic_cst,
)
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (real numbers). Use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you're doing.
Returns
-------
self : DecisionTreeRegressor
Fitted estimator.
"""
super()._fit(
X,
y,
sample_weight=sample_weight,
check_input=check_input,
)
return self
def _compute_partial_dependence_recursion(self, grid, target_features):
"""Fast partial dependence computation.
Parameters
----------
grid : ndarray of shape (n_samples, n_target_features)
The grid points on which the partial dependence should be
evaluated.
target_features : ndarray of shape (n_target_features)
The set of target features for which the partial dependence
should be evaluated.
Returns
-------
averaged_predictions : ndarray of shape (n_samples,)
The value of the partial dependence function on each grid point.
"""
grid = np.asarray(grid, dtype=DTYPE, order="C")
averaged_predictions = np.zeros(
shape=grid.shape[0], dtype=np.float64, order="C"
)
self.tree_.compute_partial_dependence(
grid, target_features, averaged_predictions
)
return averaged_predictions
def _more_tags(self):
# XXX: nan is only support for dense arrays, but we set this for common test to
# pass, specifically: check_estimators_nan_inf
allow_nan = self.splitter == "best" and self.criterion in {
"squared_error",
"friedman_mse",
"poisson",
}
return {"allow_nan": allow_nan}
| (*, criterion='squared_error', splitter='best', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=None, random_state=None, max_leaf_nodes=None, min_impurity_decrease=0.0, ccp_alpha=0.0, monotonic_cst=None) |
16,961 | sklearn.tree._classes | __init__ | null | def __init__(
self,
*,
criterion="squared_error",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_decrease=0.0,
ccp_alpha=0.0,
monotonic_cst=None,
):
super().__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
min_impurity_decrease=min_impurity_decrease,
ccp_alpha=ccp_alpha,
monotonic_cst=monotonic_cst,
)
| (self, *, criterion='squared_error', splitter='best', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=None, random_state=None, max_leaf_nodes=None, min_impurity_decrease=0.0, ccp_alpha=0.0, monotonic_cst=None) |
16,968 | sklearn.tree._classes | _compute_partial_dependence_recursion | Fast partial dependence computation.
Parameters
----------
grid : ndarray of shape (n_samples, n_target_features)
The grid points on which the partial dependence should be
evaluated.
target_features : ndarray of shape (n_target_features)
The set of target features for which the partial dependence
should be evaluated.
Returns
-------
averaged_predictions : ndarray of shape (n_samples,)
The value of the partial dependence function on each grid point.
| def _compute_partial_dependence_recursion(self, grid, target_features):
"""Fast partial dependence computation.
Parameters
----------
grid : ndarray of shape (n_samples, n_target_features)
The grid points on which the partial dependence should be
evaluated.
target_features : ndarray of shape (n_target_features)
The set of target features for which the partial dependence
should be evaluated.
Returns
-------
averaged_predictions : ndarray of shape (n_samples,)
The value of the partial dependence function on each grid point.
"""
grid = np.asarray(grid, dtype=DTYPE, order="C")
averaged_predictions = np.zeros(
shape=grid.shape[0], dtype=np.float64, order="C"
)
self.tree_.compute_partial_dependence(
grid, target_features, averaged_predictions
)
return averaged_predictions
| (self, grid, target_features) |
16,973 | sklearn.tree._classes | _more_tags | null | def _more_tags(self):
# XXX: nan is only support for dense arrays, but we set this for common test to
# pass, specifically: check_estimators_nan_inf
allow_nan = self.splitter == "best" and self.criterion in {
"squared_error",
"friedman_mse",
"poisson",
}
return {"allow_nan": allow_nan}
| (self) |
16,984 | sklearn.tree._classes | fit | Build a decision tree regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (real numbers). Use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you're doing.
Returns
-------
self : DecisionTreeRegressor
Fitted estimator.
| def _more_tags(self):
# XXX: nan is only support for dense arrays, but we set this for common test to
# pass, specifically: check_estimators_nan_inf
allow_nan = self.splitter == "best" and self.criterion in {
"squared_error",
"friedman_mse",
"poisson",
}
return {"allow_nan": allow_nan}
| (self, X, y, sample_weight=None, check_input=True) |
16,995 | imodels.util.distillation | DistilledRegressor |
Class to implement distillation. Currently only supports regression.
Params
------
teacher: initial model to be trained
must be a regressor or a binary classifier
student: model to be distilled from teacher's predictions
must be a regressor
| class DistilledRegressor(BaseEstimator, RegressorMixin):
"""
Class to implement distillation. Currently only supports regression.
Params
------
teacher: initial model to be trained
must be a regressor or a binary classifier
student: model to be distilled from teacher's predictions
must be a regressor
"""
def __init__(self, teacher: BaseEstimator, student: BaseEstimator,
n_iters_teacher: int=1):
self.teacher = teacher
self.student = student
self.n_iters_teacher = n_iters_teacher
self._validate_student()
self._check_teacher_type()
def _validate_student(self):
if is_regressor(self.student):
pass
else:
if not hasattr(self.student, "prediction_task"):
raise ValueError("Student must be either a scikit-learn or imodels regressor")
elif self.student.prediction_task == "classification":
raise ValueError("Student must be a regressor")
def _check_teacher_type(self):
if hasattr(self.teacher, "prediction_task"):
self.teacher_type = self.teacher.prediction_task
elif hasattr(self.teacher, "_estimator_type"):
if is_regressor(self.teacher):
self.teacher_type = "regression"
else:
self.teacher_type = "classification"
def set_teacher_params(self, **params):
self.teacher.set_params(**params)
def set_student_params(self, **params):
self.student.set_params(**params)
def fit(self, X, y, **kwargs):
# fit teacher
for iter_teacher in range(self.n_iters_teacher):
self.teacher.fit(X, y, **kwargs)
if self.teacher_type == "regression":
y = self.teacher.predict(X)
else:
y = self.teacher.predict_proba(X)[:, 1] # assumes binary classifier
# fit student
self.student.fit(X, y)
def predict(self, X):
return self.student.predict(X)
| (teacher: sklearn.base.BaseEstimator, student: sklearn.base.BaseEstimator, n_iters_teacher: int = 1) |
16,997 | imodels.util.distillation | __init__ | null | def __init__(self, teacher: BaseEstimator, student: BaseEstimator,
n_iters_teacher: int=1):
self.teacher = teacher
self.student = student
self.n_iters_teacher = n_iters_teacher
self._validate_student()
self._check_teacher_type()
| (self, teacher: sklearn.base.BaseEstimator, student: sklearn.base.BaseEstimator, n_iters_teacher: int = 1) |
17,003 | imodels.util.distillation | _check_teacher_type | null | def _check_teacher_type(self):
if hasattr(self.teacher, "prediction_task"):
self.teacher_type = self.teacher.prediction_task
elif hasattr(self.teacher, "_estimator_type"):
if is_regressor(self.teacher):
self.teacher_type = "regression"
else:
self.teacher_type = "classification"
| (self) |
17,012 | imodels.util.distillation | _validate_student | null | def _validate_student(self):
if is_regressor(self.student):
pass
else:
if not hasattr(self.student, "prediction_task"):
raise ValueError("Student must be either a scikit-learn or imodels regressor")
elif self.student.prediction_task == "classification":
raise ValueError("Student must be a regressor")
| (self) |
17,013 | imodels.util.distillation | fit | null | def fit(self, X, y, **kwargs):
# fit teacher
for iter_teacher in range(self.n_iters_teacher):
self.teacher.fit(X, y, **kwargs)
if self.teacher_type == "regression":
y = self.teacher.predict(X)
else:
y = self.teacher.predict_proba(X)[:, 1] # assumes binary classifier
# fit student
self.student.fit(X, y)
| (self, X, y, **kwargs) |
17,016 | imodels.util.distillation | predict | null | def predict(self, X):
return self.student.predict(X)
| (self, X) |
17,020 | imodels.util.distillation | set_student_params | null | def set_student_params(self, **params):
self.student.set_params(**params)
| (self, **params) |
17,021 | imodels.util.distillation | set_teacher_params | null | def set_teacher_params(self, **params):
self.teacher.set_params(**params)
| (self, **params) |
17,022 | imodels.tree.figs | FIGSClassifier | null | class FIGSClassifier(FIGS, ClassifierMixin):
...
| (max_rules: int = 12, max_trees: int = None, min_impurity_decrease: float = 0.0, random_state=None, max_features: str = None) |
17,024 | imodels.tree.figs | __init__ |
Params
------
max_rules: int
Max total number of rules across all trees
max_trees: int
Max total number of trees
min_impurity_decrease: float
A node will be split if this split induces a decrease of the impurity greater than or equal to this value.
max_features
The number of features to consider when looking for the best split (see https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html)
| def __init__(
self,
max_rules: int = 12,
max_trees: int = None,
min_impurity_decrease: float = 0.0,
random_state=None,
max_features: str = None,
):
"""
Params
------
max_rules: int
Max total number of rules across all trees
max_trees: int
Max total number of trees
min_impurity_decrease: float
A node will be split if this split induces a decrease of the impurity greater than or equal to this value.
max_features
The number of features to consider when looking for the best split (see https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html)
"""
super().__init__()
self.max_rules = max_rules
self.max_trees = max_trees
self.min_impurity_decrease = min_impurity_decrease
self.random_state = random_state
self.max_features = max_features
self._init_decision_function()
| (self, max_rules: int = 12, max_trees: Optional[int] = None, min_impurity_decrease: float = 0.0, random_state=None, max_features: Optional[str] = None) |
17,028 | imodels.tree.figs | __str__ | null | def __str__(self):
if not hasattr(self, "trees_"):
s = self.__class__.__name__
s += "("
s += "max_rules="
s += repr(self.max_rules)
s += ")"
return s
else:
s = "> ------------------------------\n"
s += "> FIGS-Fast Interpretable Greedy-Tree Sums:\n"
s += '> \tPredictions are made by summing the "Val" reached by traversing each tree.\n'
s += "> \tFor classifiers, a sigmoid function is then applied to the sum.\n"
s += "> ------------------------------\n"
s += "\n\t+\n".join([self._tree_to_str(t) for t in self.trees_])
if hasattr(self, "feature_names_") and self.feature_names_ is not None:
for i in range(len(self.feature_names_))[::-1]:
s = s.replace(f"X_{i}", self.feature_names_[i])
return s
| (self) |
17,031 | imodels.tree.figs | _construct_node_with_stump |
Params
------
compare_nodes_with_sample_weight: Deprecated
If this is set to true and sample_weight is passed, use sample_weight to compare nodes
Otherwise, use sample_weight only for picking a split given a particular node
| def _construct_node_with_stump(
self,
X,
y,
idxs,
tree_num,
sample_weight=None,
compare_nodes_with_sample_weight=True,
max_features=None,
):
"""
Params
------
compare_nodes_with_sample_weight: Deprecated
If this is set to true and sample_weight is passed, use sample_weight to compare nodes
Otherwise, use sample_weight only for picking a split given a particular node
"""
# array indices
SPLIT = 0
LEFT = 1
RIGHT = 2
# fit stump
stump = tree.DecisionTreeRegressor(max_depth=1, max_features=max_features)
sweight = None
if sample_weight is not None:
sweight = sample_weight[idxs]
stump.fit(X[idxs], y[idxs], sample_weight=sweight)
# these are all arrays, arr[0] is split node
# note: -2 is dummy
feature = stump.tree_.feature
threshold = stump.tree_.threshold
impurity = stump.tree_.impurity
n_node_samples = stump.tree_.n_node_samples
value = stump.tree_.value
# no split
if len(feature) == 1:
# print('no split found!', idxs.sum(), impurity, feature)
return Node(
idxs=idxs,
value=value[SPLIT],
tree_num=tree_num,
feature=feature[SPLIT],
threshold=threshold[SPLIT],
impurity=impurity[SPLIT],
impurity_reduction=None,
)
# manage sample weights
idxs_split = X[:, feature[SPLIT]] <= threshold[SPLIT]
idxs_left = idxs_split & idxs
idxs_right = ~idxs_split & idxs
if sample_weight is None:
n_node_samples_left = n_node_samples[LEFT]
n_node_samples_right = n_node_samples[RIGHT]
else:
n_node_samples_left = sample_weight[idxs_left].sum()
n_node_samples_right = sample_weight[idxs_right].sum()
n_node_samples_split = n_node_samples_left + n_node_samples_right
# calculate impurity
impurity_reduction = (
impurity[SPLIT]
- impurity[LEFT] * n_node_samples_left / n_node_samples_split
- impurity[RIGHT] * n_node_samples_right / n_node_samples_split
) * n_node_samples_split
node_split = Node(
idxs=idxs,
value=value[SPLIT],
tree_num=tree_num,
feature=feature[SPLIT],
threshold=threshold[SPLIT],
impurity=impurity[SPLIT],
impurity_reduction=impurity_reduction,
)
# print('\t>>>', node_split, 'impurity', impurity, 'num_pts', idxs.sum(), 'imp_reduc', impurity_reduction)
# manage children
node_left = Node(
idxs=idxs_left,
value=value[LEFT],
impurity=impurity[LEFT],
tree_num=tree_num,
)
node_right = Node(
idxs=idxs_right,
value=value[RIGHT],
impurity=impurity[RIGHT],
tree_num=tree_num,
)
node_split.setattrs(
left_temp=node_left,
right_temp=node_right,
)
return node_split
| (self, X, y, idxs, tree_num, sample_weight=None, compare_nodes_with_sample_weight=True, max_features=None) |
17,032 | imodels.tree.figs | _encode_categories | null | def _encode_categories(self, X, categorical_features):
encoder = None
if hasattr(self, "_encoder"):
encoder = self._encoder
return encode_categories(X, categorical_features, encoder)
| (self, X, categorical_features) |
17,036 | imodels.tree.figs | _init_decision_function | Sets decision function based on _estimator_type | def _init_decision_function(self):
"""Sets decision function based on _estimator_type"""
# used by sklearn GridSearchCV, BaggingClassifier
if isinstance(self, ClassifierMixin):
def decision_function(x):
return self.predict_proba(x)[:, 1]
elif isinstance(self, RegressorMixin):
decision_function = self.predict
| (self) |
17,038 | imodels.tree.figs | _predict_tree | Predict for a single tree | def _predict_tree(self, root: Node, X):
"""Predict for a single tree"""
def _predict_tree_single_point(root: Node, x):
if root.left is None and root.right is None:
return root.value[0, 0]
left = x[root.feature] <= root.threshold
if left:
if root.left is None: # we don't actually have to worry about this case
return root.value
else:
return _predict_tree_single_point(root.left, x)
else:
if (
root.right is None
): # we don't actually have to worry about this case
return root.value
else:
return _predict_tree_single_point(root.right, x)
preds = np.zeros(X.shape[0])
for i in range(X.shape[0]):
preds[i] = _predict_tree_single_point(root, X[i])
return preds
| (self, root: imodels.tree.figs.Node, X) |
17,041 | imodels.tree.figs | _tree_to_str | null | def _tree_to_str(self, root: Node, prefix=""):
if root is None:
return ""
elif root.threshold is None:
return ""
pprefix = prefix + "\t"
return (
prefix
+ str(root)
+ "\n"
+ self._tree_to_str(root.left, pprefix)
+ self._tree_to_str(root.right, pprefix)
)
| (self, root: imodels.tree.figs.Node, prefix='') |
17,042 | imodels.tree.figs | _tree_to_str_with_data | null | def _tree_to_str_with_data(self, X, y, root: Node, prefix=""):
if root is None:
return ""
elif root.threshold is None:
return ""
pprefix = prefix + "\t"
left = X[:, root.feature] <= root.threshold
return (
prefix
+ root.print_root(y)
+ "\n"
+ self._tree_to_str_with_data(X[left], y[left], root.left, pprefix)
+ self._tree_to_str_with_data(X[~left], y[~left], root.right, pprefix)
)
| (self, X, y, root: imodels.tree.figs.Node, prefix='') |
17,045 | imodels.tree.figs | fit |
Params
------
_sample_weight: array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Splits that would create child nodes with net zero or negative weight
are ignored while searching for a split in each node.
| def fit(
self,
X,
y=None,
feature_names=None,
verbose=False,
sample_weight=None,
categorical_features=None,
):
"""
Params
------
_sample_weight: array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Splits that would create child nodes with net zero or negative weight
are ignored while searching for a split in each node.
"""
if categorical_features is not None:
X, self._encoder = self._encode_categories(X, categorical_features)
X, y, feature_names = check_fit_arguments(self, X, y, feature_names)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
self.trees_ = [] # list of the root nodes of added trees
self.complexity_ = 0 # tracks the number of rules in the model
y_predictions_per_tree = {} # predictions for each tree
y_residuals_per_tree = {} # based on predictions above
# set up initial potential_splits
# everything in potential_splits either is_root (so it can be added directly to self.trees_)
# or it is a child of a root node that has already been added
idxs = np.ones(X.shape[0], dtype=bool)
node_init = self._construct_node_with_stump(
X=X,
y=y,
idxs=idxs,
tree_num=-1,
sample_weight=sample_weight,
max_features=self.max_features,
)
potential_splits = [node_init]
for node in potential_splits:
node.setattrs(is_root=True)
potential_splits = sorted(potential_splits, key=lambda x: x.impurity_reduction)
# start the greedy fitting algorithm
finished = False
while len(potential_splits) > 0 and not finished:
# print('potential_splits', [str(s) for s in potential_splits])
# get node with max impurity_reduction (since it's sorted)
split_node = potential_splits.pop()
# don't split on node
if split_node.impurity_reduction < self.min_impurity_decrease:
finished = True
break
elif (
split_node.is_root
and self.max_trees is not None
and len(self.trees_) >= self.max_trees
):
# If the node is the root of a new tree and we have reached self.max_trees,
# don't split on it, but allow later splits to continue growing existing trees
continue
# split on node
if verbose:
print("\nadding " + str(split_node))
self.complexity_ += 1
# if added a tree root
if split_node.is_root:
# start a new tree
self.trees_.append(split_node)
# update tree_num
for node_ in [split_node, split_node.left_temp, split_node.right_temp]:
if node_ is not None:
node_.tree_num = len(self.trees_) - 1
# add new root potential node
node_new_root = Node(
is_root=True, idxs=np.ones(X.shape[0], dtype=bool), tree_num=-1
)
potential_splits.append(node_new_root)
# add children to potential splits
# assign left_temp, right_temp to be proper children
# (basically adds them to tree in predict method)
split_node.setattrs(left=split_node.left_temp, right=split_node.right_temp)
# add children to potential_splits
potential_splits.append(split_node.left)
potential_splits.append(split_node.right)
# update predictions for altered tree
for tree_num_ in range(len(self.trees_)):
y_predictions_per_tree[tree_num_] = self._predict_tree(
self.trees_[tree_num_], X
)
# dummy 0 preds for possible new trees
y_predictions_per_tree[-1] = np.zeros(X.shape[0])
# update residuals for each tree
# -1 is key for potential new tree
for tree_num_ in list(range(len(self.trees_))) + [-1]:
y_residuals_per_tree[tree_num_] = deepcopy(y)
# subtract predictions of all other trees
# Since the current tree makes a constant prediction over the node being split,
# one may ignore its contributions to the residuals without affecting the impurity decrease.
for tree_num_other_ in range(len(self.trees_)):
if not tree_num_other_ == tree_num_:
y_residuals_per_tree[tree_num_] -= y_predictions_per_tree[
tree_num_other_
]
# recompute all impurities + update potential_split children
potential_splits_new = []
for potential_split in potential_splits:
y_target = y_residuals_per_tree[potential_split.tree_num]
# re-calculate the best split
potential_split_updated = self._construct_node_with_stump(
X=X,
y=y_target,
idxs=potential_split.idxs,
tree_num=potential_split.tree_num,
sample_weight=sample_weight,
max_features=self.max_features,
)
# need to preserve certain attributes from before (value at this split + is_root)
# value may change because residuals may have changed, but we want it to store the value from before
potential_split.setattrs(
feature=potential_split_updated.feature,
threshold=potential_split_updated.threshold,
impurity_reduction=potential_split_updated.impurity_reduction,
impurity=potential_split_updated.impurity,
left_temp=potential_split_updated.left_temp,
right_temp=potential_split_updated.right_temp,
)
# this is a valid split
if potential_split.impurity_reduction is not None:
potential_splits_new.append(potential_split)
# sort so largest impurity reduction comes last (should probs make this a heap later)
potential_splits = sorted(
potential_splits_new, key=lambda x: x.impurity_reduction
)
if verbose:
print(self)
if self.max_rules is not None and self.complexity_ >= self.max_rules:
finished = True
break
# annotate final tree with node_id and value_sklearn, and prepare importance_data_
importance_data = []
for tree_ in self.trees_:
node_counter = iter(range(0, int(1e06)))
def _annotate_node(node: Node, X, y):
if node is None:
return
# TODO does not incorporate sample weights
value_counts = pd.Series(y).value_counts()
try:
neg_count = value_counts[0.0]
except KeyError:
neg_count = 0
try:
pos_count = value_counts[1.0]
except KeyError:
pos_count = 0
value_sklearn = np.array([neg_count, pos_count], dtype=float)
node.setattrs(node_id=next(node_counter), value_sklearn=value_sklearn)
idxs_left = X[:, node.feature] <= node.threshold
_annotate_node(node.left, X[idxs_left], y[idxs_left])
_annotate_node(node.right, X[~idxs_left], y[~idxs_left])
_annotate_node(tree_, X, y)
# now that the samples per node are known, we can start to compute the importances
importance_data_tree = np.zeros(len(self.feature_names_))
def _importances(node: Node):
if node is None or node.left is None:
return 0.0
# TODO does not incorporate sample weights, but will if added to value_sklearn
importance_data_tree[node.feature] += (
np.sum(node.value_sklearn) * node.impurity
- np.sum(node.left.value_sklearn) * node.left.impurity
- np.sum(node.right.value_sklearn) * node.right.impurity
)
return (
np.sum(node.value_sklearn)
+ _importances(node.left)
+ _importances(node.right)
)
# require the tree to have more than 1 node, otherwise just leave importance_data_tree as zeros
if 1 < next(node_counter):
tree_samples = _importances(tree_)
if tree_samples != 0:
importance_data_tree /= tree_samples
else:
importance_data_tree = 0
importance_data.append(importance_data_tree)
self.importance_data_ = importance_data
return self
| (self, X, y=None, feature_names=None, verbose=False, sample_weight=None, categorical_features=None) |
17,048 | imodels.tree.figs | plot | null | def plot(
self,
cols=2,
feature_names=None,
filename=None,
label="all",
impurity=False,
tree_number=None,
dpi=150,
fig_size=None,
):
is_single_tree = len(self.trees_) < 2 or tree_number is not None
n_cols = int(cols)
n_rows = int(np.ceil(len(self.trees_) / n_cols))
if feature_names is None:
if hasattr(self, "feature_names_") and self.feature_names_ is not None:
feature_names = self.feature_names_
n_plots = int(len(self.trees_)) if tree_number is None else 1
fig, axs = plt.subplots(n_plots, dpi=dpi)
if fig_size is not None:
fig.set_size_inches(fig_size, fig_size)
n_classes = 1 if isinstance(self, RegressorMixin) else 2
ax_size = int(len(self.trees_))
for i in range(n_plots):
r = i // n_cols
c = i % n_cols
if not is_single_tree:
ax = axs[i]
else:
ax = axs
try:
dt = extract_sklearn_tree_from_figs(
self, i if tree_number is None else tree_number, n_classes
)
plot_tree(
dt,
ax=ax,
feature_names=feature_names,
label=label,
impurity=impurity,
)
except IndexError:
ax.axis("off")
continue
ttl = f"Tree {i}" if n_plots > 1 else f"Tree {tree_number}"
ax.set_title(ttl)
if filename is not None:
plt.savefig(filename)
return
plt.show()
| (self, cols=2, feature_names=None, filename=None, label='all', impurity=False, tree_number=None, dpi=150, fig_size=None) |
17,049 | imodels.tree.figs | predict | null | def predict(self, X, categorical_features=None):
if hasattr(self, "_encoder"):
X = self._encode_categories(X, categorical_features=categorical_features)
X = check_array(X)
preds = np.zeros(X.shape[0])
for tree in self.trees_:
preds += self._predict_tree(tree, X)
if isinstance(self, RegressorMixin):
return preds
elif isinstance(self, ClassifierMixin):
return (preds > 0.5).astype(int)
| (self, X, categorical_features=None) |
17,050 | imodels.tree.figs | predict_proba | Predict probability for classifiers:
Default behavior is to constrain the outputs to the range of probabilities, i.e. 0 to 1, with a sigmoid function.
Set use_clipped_prediction=True to use prior behavior of clipping between 0 and 1 instead.
| def predict_proba(self, X, categorical_features=None, use_clipped_prediction=False):
"""Predict probability for classifiers:
Default behavior is to constrain the outputs to the range of probabilities, i.e. 0 to 1, with a sigmoid function.
Set use_clipped_prediction=True to use prior behavior of clipping between 0 and 1 instead.
"""
if hasattr(self, "_encoder"):
X = self._encode_categories(X, categorical_features=categorical_features)
X = check_array(X)
if isinstance(self, RegressorMixin):
return NotImplemented
preds = np.zeros(X.shape[0])
for tree in self.trees_:
preds += self._predict_tree(tree, X)
if use_clipped_prediction:
# old behavior, pre v1.3.9
# constrain to range of probabilities by clipping
preds = np.clip(preds, a_min=0.0, a_max=1.0)
else:
# constrain to range of probabilities with a sigmoid function
preds = expit(preds)
return np.vstack((1 - preds, preds)).transpose()
| (self, X, categorical_features=None, use_clipped_prediction=False) |
17,051 | imodels.tree.figs | print_tree | null | def print_tree(self, X, y, feature_names=None):
s = "------------\n" + "\n\t+\n".join(
[self._tree_to_str_with_data(X, y, t) for t in self.trees_]
)
if feature_names is None:
if hasattr(self, "feature_names_") and self.feature_names_ is not None:
feature_names = self.feature_names_
if feature_names is not None:
for i in range(len(feature_names))[::-1]:
s = s.replace(f"X_{i}", feature_names[i])
return s
| (self, X, y, feature_names=None) |
17,053 | sklearn.utils._metadata_requests | set_fit_request | Request metadata passed to the ``fit`` method.
Note that this method is only relevant if
``enable_metadata_routing=True`` (see :func:`sklearn.set_config`).
Please see :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
The options for each parameter are:
- ``True``: metadata is requested, and passed to ``fit`` if provided. The request is ignored if metadata is not provided.
- ``False``: metadata is not requested and the meta-estimator will not pass it to ``fit``.
- ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it.
- ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name.
The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the
existing request. This allows you to change the request for some
parameters and not others.
.. versionadded:: 1.3
.. note::
This method is only relevant if this estimator is used as a
sub-estimator of a meta-estimator, e.g. used inside a
:class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect.
Parameters
----------
categorical_features : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``categorical_features`` parameter in ``fit``.
feature_names : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``feature_names`` parameter in ``fit``.
sample_weight : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``sample_weight`` parameter in ``fit``.
verbose : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``verbose`` parameter in ``fit``.
Returns
-------
self : object
The updated object.
| def __get__(self, instance, owner):
# we would want to have a method which accepts only the expected args
def func(**kw):
"""Updates the request for provided parameters
This docstring is overwritten below.
See REQUESTER_DOC for expected functionality
"""
if not _routing_enabled():
raise RuntimeError(
"This method is only available when metadata routing is enabled."
" You can enable it using"
" sklearn.set_config(enable_metadata_routing=True)."
)
if self.validate_keys and (set(kw) - set(self.keys)):
raise TypeError(
f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments"
f" are: {set(self.keys)}"
)
requests = instance._get_metadata_request()
method_metadata_request = getattr(requests, self.name)
for prop, alias in kw.items():
if alias is not UNCHANGED:
method_metadata_request.add_request(param=prop, alias=alias)
instance._metadata_request = requests
return instance
# Now we set the relevant attributes of the function so that it seems
# like a normal method to the end user, with known expected arguments.
func.__name__ = f"set_{self.name}_request"
params = [
inspect.Parameter(
name="self",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=owner,
)
]
params.extend(
[
inspect.Parameter(
k,
inspect.Parameter.KEYWORD_ONLY,
default=UNCHANGED,
annotation=Optional[Union[bool, None, str]],
)
for k in self.keys
]
)
func.__signature__ = inspect.Signature(
params,
return_annotation=owner,
)
doc = REQUESTER_DOC.format(method=self.name)
for metadata in self.keys:
doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name)
doc += REQUESTER_DOC_RETURN
func.__doc__ = doc
return func
| (self: imodels.tree.figs.FIGSClassifier, *, categorical_features: Union[bool, NoneType, str] = '$UNCHANGED$', feature_names: Union[bool, NoneType, str] = '$UNCHANGED$', sample_weight: Union[bool, NoneType, str] = '$UNCHANGED$', verbose: Union[bool, NoneType, str] = '$UNCHANGED$') -> imodels.tree.figs.FIGSClassifier |
17,055 | sklearn.utils._metadata_requests | set_predict_proba_request | Request metadata passed to the ``predict_proba`` method.
Note that this method is only relevant if
``enable_metadata_routing=True`` (see :func:`sklearn.set_config`).
Please see :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
The options for each parameter are:
- ``True``: metadata is requested, and passed to ``predict_proba`` if provided. The request is ignored if metadata is not provided.
- ``False``: metadata is not requested and the meta-estimator will not pass it to ``predict_proba``.
- ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it.
- ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name.
The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the
existing request. This allows you to change the request for some
parameters and not others.
.. versionadded:: 1.3
.. note::
This method is only relevant if this estimator is used as a
sub-estimator of a meta-estimator, e.g. used inside a
:class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect.
Parameters
----------
categorical_features : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``categorical_features`` parameter in ``predict_proba``.
use_clipped_prediction : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``use_clipped_prediction`` parameter in ``predict_proba``.
Returns
-------
self : object
The updated object.
| def __get__(self, instance, owner):
# we would want to have a method which accepts only the expected args
def func(**kw):
"""Updates the request for provided parameters
This docstring is overwritten below.
See REQUESTER_DOC for expected functionality
"""
if not _routing_enabled():
raise RuntimeError(
"This method is only available when metadata routing is enabled."
" You can enable it using"
" sklearn.set_config(enable_metadata_routing=True)."
)
if self.validate_keys and (set(kw) - set(self.keys)):
raise TypeError(
f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments"
f" are: {set(self.keys)}"
)
requests = instance._get_metadata_request()
method_metadata_request = getattr(requests, self.name)
for prop, alias in kw.items():
if alias is not UNCHANGED:
method_metadata_request.add_request(param=prop, alias=alias)
instance._metadata_request = requests
return instance
# Now we set the relevant attributes of the function so that it seems
# like a normal method to the end user, with known expected arguments.
func.__name__ = f"set_{self.name}_request"
params = [
inspect.Parameter(
name="self",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=owner,
)
]
params.extend(
[
inspect.Parameter(
k,
inspect.Parameter.KEYWORD_ONLY,
default=UNCHANGED,
annotation=Optional[Union[bool, None, str]],
)
for k in self.keys
]
)
func.__signature__ = inspect.Signature(
params,
return_annotation=owner,
)
doc = REQUESTER_DOC.format(method=self.name)
for metadata in self.keys:
doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name)
doc += REQUESTER_DOC_RETURN
func.__doc__ = doc
return func
| (self: imodels.tree.figs.FIGSClassifier, *, categorical_features: Union[bool, NoneType, str] = '$UNCHANGED$', use_clipped_prediction: Union[bool, NoneType, str] = '$UNCHANGED$') -> imodels.tree.figs.FIGSClassifier |
17,056 | sklearn.utils._metadata_requests | set_predict_request | Request metadata passed to the ``predict`` method.
Note that this method is only relevant if
``enable_metadata_routing=True`` (see :func:`sklearn.set_config`).
Please see :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
The options for each parameter are:
- ``True``: metadata is requested, and passed to ``predict`` if provided. The request is ignored if metadata is not provided.
- ``False``: metadata is not requested and the meta-estimator will not pass it to ``predict``.
- ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it.
- ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name.
The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the
existing request. This allows you to change the request for some
parameters and not others.
.. versionadded:: 1.3
.. note::
This method is only relevant if this estimator is used as a
sub-estimator of a meta-estimator, e.g. used inside a
:class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect.
Parameters
----------
categorical_features : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``categorical_features`` parameter in ``predict``.
Returns
-------
self : object
The updated object.
| def __get__(self, instance, owner):
# we would want to have a method which accepts only the expected args
def func(**kw):
"""Updates the request for provided parameters
This docstring is overwritten below.
See REQUESTER_DOC for expected functionality
"""
if not _routing_enabled():
raise RuntimeError(
"This method is only available when metadata routing is enabled."
" You can enable it using"
" sklearn.set_config(enable_metadata_routing=True)."
)
if self.validate_keys and (set(kw) - set(self.keys)):
raise TypeError(
f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments"
f" are: {set(self.keys)}"
)
requests = instance._get_metadata_request()
method_metadata_request = getattr(requests, self.name)
for prop, alias in kw.items():
if alias is not UNCHANGED:
method_metadata_request.add_request(param=prop, alias=alias)
instance._metadata_request = requests
return instance
# Now we set the relevant attributes of the function so that it seems
# like a normal method to the end user, with known expected arguments.
func.__name__ = f"set_{self.name}_request"
params = [
inspect.Parameter(
name="self",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=owner,
)
]
params.extend(
[
inspect.Parameter(
k,
inspect.Parameter.KEYWORD_ONLY,
default=UNCHANGED,
annotation=Optional[Union[bool, None, str]],
)
for k in self.keys
]
)
func.__signature__ = inspect.Signature(
params,
return_annotation=owner,
)
doc = REQUESTER_DOC.format(method=self.name)
for metadata in self.keys:
doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name)
doc += REQUESTER_DOC_RETURN
func.__doc__ = doc
return func
| (self: imodels.tree.figs.FIGSClassifier, *, categorical_features: Union[bool, NoneType, str] = '$UNCHANGED$') -> imodels.tree.figs.FIGSClassifier |
17,058 | imodels.tree.figs | FIGSClassifierCV | null | class FIGSClassifierCV(FIGSCV):
def __init__(
self,
n_rules_list: List[int] = [6, 12, 24, 30, 50],
n_trees_list: List[int] = [5, 5, 5, 5, 5],
cv: int = 3,
scoring="accuracy",
*args,
**kwargs,
):
super(FIGSClassifierCV, self).__init__(
figs=FIGSClassifier,
n_rules_list=n_rules_list,
n_trees_list=n_trees_list,
cv=cv,
scoring=scoring,
*args,
**kwargs,
)
| (n_rules_list: List[int] = [6, 12, 24, 30, 50], n_trees_list: List[int] = [5, 5, 5, 5, 5], cv: int = 3, scoring='accuracy', *args, **kwargs) |
17,059 | imodels.tree.figs | __init__ | null | def __init__(
self,
n_rules_list: List[int] = [6, 12, 24, 30, 50],
n_trees_list: List[int] = [5, 5, 5, 5, 5],
cv: int = 3,
scoring="accuracy",
*args,
**kwargs,
):
super(FIGSClassifierCV, self).__init__(
figs=FIGSClassifier,
n_rules_list=n_rules_list,
n_trees_list=n_trees_list,
cv=cv,
scoring=scoring,
*args,
**kwargs,
)
| (self, n_rules_list: List[int] = [6, 12, 24, 30, 50], n_trees_list: List[int] = [5, 5, 5, 5, 5], cv: int = 3, scoring='accuracy', *args, **kwargs) |
17,060 | imodels.tree.figs | fit | null | def fit(self, X, y):
self.scores_ = []
for _i, n_rules in enumerate(self.n_rules_list):
est = self._figs_class(max_rules=n_rules, max_trees=self.n_trees_list[_i])
cv_scores = cross_val_score(est, X, y, cv=self.cv, scoring=self.scoring)
mean_score = np.mean(cv_scores)
if len(self.scores_) == 0:
self.figs = est
elif mean_score > np.max(self.scores_):
self.figs = est
self.scores_.append(mean_score)
self.figs.fit(X=X, y=y)
| (self, X, y) |
17,061 | imodels.tree.figs | predict | null | def predict(self, X):
return self.figs.predict(X)
| (self, X) |
17,062 | imodels.tree.figs | predict_proba | null | def predict_proba(self, X):
return self.figs.predict_proba(X)
| (self, X) |
17,063 | imodels.tree.figs | FIGSRegressor | null | class FIGSRegressor(FIGS, RegressorMixin):
...
| (max_rules: int = 12, max_trees: int = None, min_impurity_decrease: float = 0.0, random_state=None, max_features: str = None) |
17,099 | imodels.tree.figs | FIGSRegressorCV | null | class FIGSRegressorCV(FIGSCV):
def __init__(
self,
n_rules_list: List[int] = [6, 12, 24, 30, 50],
n_trees_list: List[int] = [5, 5, 5, 5, 5],
cv: int = 3,
scoring="r2",
*args,
**kwargs,
):
super(FIGSRegressorCV, self).__init__(
figs=FIGSRegressor,
n_rules_list=n_rules_list,
n_trees_list=n_trees_list,
cv=cv,
scoring=scoring,
*args,
**kwargs,
)
| (n_rules_list: List[int] = [6, 12, 24, 30, 50], n_trees_list: List[int] = [5, 5, 5, 5, 5], cv: int = 3, scoring='r2', *args, **kwargs) |
17,100 | imodels.tree.figs | __init__ | null | def __init__(
self,
n_rules_list: List[int] = [6, 12, 24, 30, 50],
n_trees_list: List[int] = [5, 5, 5, 5, 5],
cv: int = 3,
scoring="r2",
*args,
**kwargs,
):
super(FIGSRegressorCV, self).__init__(
figs=FIGSRegressor,
n_rules_list=n_rules_list,
n_trees_list=n_trees_list,
cv=cv,
scoring=scoring,
*args,
**kwargs,
)
| (self, n_rules_list: List[int] = [6, 12, 24, 30, 50], n_trees_list: List[int] = [5, 5, 5, 5, 5], cv: int = 3, scoring='r2', *args, **kwargs) |
17,104 | imodels.rule_set.fplasso | FPLassoClassifier | null | class FPLassoClassifier(FPLasso, ClassifierMixin):
def _init_prediction_task(self):
self.prediction_task = 'classification'
| (minsupport=0.1, maxcardinality=2, verbose=False, n_estimators=100, tree_size=4, sample_fract='default', max_rules=2000, memory_par=0.01, tree_generator=None, lin_trim_quantile=0.025, lin_standardise=True, exp_rand_tree_size=True, include_linear=True, alpha=None, random_state=None) |
17,106 | imodels.rule_set.fplasso | __init__ | null | def __init__(self,
minsupport=0.1,
maxcardinality=2,
verbose=False,
n_estimators=100,
tree_size=4,
sample_fract='default',
max_rules=2000,
memory_par=0.01,
tree_generator=None,
lin_trim_quantile=0.025,
lin_standardise=True,
exp_rand_tree_size=True,
include_linear=True,
alpha=None,
random_state=None):
super().__init__(n_estimators,
tree_size,
sample_fract,
max_rules,
memory_par,
tree_generator,
lin_trim_quantile,
lin_standardise,
exp_rand_tree_size,
include_linear,
alpha,
random_state)
self.minsupport = minsupport
self.maxcardinality = maxcardinality
self.verbose = verbose
| (self, minsupport=0.1, maxcardinality=2, verbose=False, n_estimators=100, tree_size=4, sample_fract='default', max_rules=2000, memory_par=0.01, tree_generator=None, lin_trim_quantile=0.025, lin_standardise=True, exp_rand_tree_size=True, include_linear=True, alpha=None, random_state=None) |
17,110 | imodels.rule_set.rule_fit | __str__ | null | def __str__(self):
if not hasattr(self, 'coef'):
s = self.__class__.__name__
s += "("
s += "max_rules="
s += repr(self.max_rules)
s += ")"
return s
else:
s = '> ------------------------------\n'
s += '> RuleFit:\n'
s += '> \tPredictions are made by summing the coefficients of each rule\n'
s += '> ------------------------------\n'
return s + self.visualize().to_string(index=False) + '\n'
| (self) |
17,114 | imodels.rule_set.fplasso | _extract_rules | null | def _extract_rules(self, X, y) -> List[str]:
X = pd.DataFrame(X, columns=self.feature_placeholders)
itemsets = extract_fpgrowth(X, minsupport=self.minsupport,
maxcardinality=self.maxcardinality,
verbose=self.verbose)
return itemsets_to_rules(itemsets)
| (self, X, y) -> List[str] |
17,118 | imodels.rule_set.rule_fit | _get_rules | Return the estimated rules
Parameters
----------
exclude_zero_coef: If True (default), returns only the rules with an estimated
coefficient not equalt to zero.
subregion: If None (default) returns global importances (FP 2004 eq. 28/29), else returns importance over
subregion of inputs (FP 2004 eq. 30/31/32).
Returns
-------
rules: pandas.DataFrame with the rules. Column 'rule' describes the rule, 'coef' holds
the coefficients and 'support' the support of the rule in the training
data set (X)
| def _get_rules(self, exclude_zero_coef=False, subregion=None):
"""Return the estimated rules
Parameters
----------
exclude_zero_coef: If True (default), returns only the rules with an estimated
coefficient not equalt to zero.
subregion: If None (default) returns global importances (FP 2004 eq. 28/29), else returns importance over
subregion of inputs (FP 2004 eq. 30/31/32).
Returns
-------
rules: pandas.DataFrame with the rules. Column 'rule' describes the rule, 'coef' holds
the coefficients and 'support' the support of the rule in the training
data set (X)
"""
n_features = len(self.coef) - len(self.rules_)
rule_ensemble = list(self.rules_without_feature_names_)
output_rules = []
# Add coefficients for linear effects
for i in range(0, n_features):
if self.lin_standardise:
coef = self.coef[i] * self.friedscale.scale_multipliers[i]
else:
coef = self.coef[i]
if subregion is None:
importance = abs(coef) * self.stddev[i]
else:
subregion = np.array(subregion)
importance = sum(abs(coef) * abs([x[i] for x in self.winsorizer.trim(subregion)] - self.mean[i])) / len(
subregion)
output_rules += [(self.feature_names[i],
'linear', coef, 1, importance)]
# Add rules
for i in range(0, len(self.rules_)):
rule = rule_ensemble[i]
coef = self.coef[i + n_features]
if subregion is None:
importance = abs(coef) * (rule.support *
(1 - rule.support)) ** (1 / 2)
else:
rkx = self.transform(subregion, [rule])[:, -1]
importance = sum(
abs(coef) * abs(rkx - rule.support)) / len(subregion)
output_rules += [(self.rules_[i].rule, 'rule',
coef, rule.support, importance)]
rules = pd.DataFrame(output_rules, columns=[
"rule", "type", "coef", "support", "importance"])
if exclude_zero_coef:
rules = rules.ix[rules.coef != 0]
return rules
| (self, exclude_zero_coef=False, subregion=None) |
17,120 | imodels.rule_set.fplasso | _init_prediction_task | null | def _init_prediction_task(self):
self.prediction_task = 'classification'
| (self) |
17,122 | imodels.rule_set.rule_fit | _predict_continuous_output | Predict outcome of linear model for X
| def _predict_continuous_output(self, X):
"""Predict outcome of linear model for X
"""
if type(X) == pd.DataFrame:
X = X.values.astype(np.float32)
y_pred = np.zeros(X.shape[0])
y_pred += self._eval_weighted_rule_sum(X)
if self.include_linear:
if self.lin_standardise:
X = self.friedscale.scale(X)
y_pred += X @ self.coef[:X.shape[1]]
return y_pred + self.intercept
| (self, X) |
17,126 | imodels.rule_set.rule_fit | _score_rules | null | def _score_rules(self, X, y, rules) -> Tuple[List[Rule], List[float], float]:
X_concat = np.zeros([X.shape[0], 0])
# standardise linear variables if requested (for regression model only)
if self.include_linear:
# standard deviation and mean of winsorized features
self.winsorizer.train(X)
winsorized_X = self.winsorizer.trim(X)
self.stddev = np.std(winsorized_X, axis=0)
self.mean = np.mean(winsorized_X, axis=0)
if self.lin_standardise:
self.friedscale.train(X)
X_regn = self.friedscale.scale(X)
else:
X_regn = X.copy()
X_concat = np.concatenate((X_concat, X_regn), axis=1)
X_rules = self.transform(X, rules)
if X_rules.shape[0] > 0:
X_concat = np.concatenate((X_concat, X_rules), axis=1)
# no rules fit and self.include_linear == False
if X_concat.shape[1] == 0:
return [], [], 0
prediction_task = 'regression' if isinstance(
self, RegressorMixin) else 'classification'
return score_linear(X_concat, y, rules,
prediction_task=prediction_task,
max_rules=self.max_rules,
alpha=self.alpha,
cv=self.cv,
random_state=self.random_state)
| (self, X, y, rules) -> Tuple[List[imodels.util.rule.Rule], List[float], float] |
17,129 | imodels.rule_set.fplasso | fit | null | def fit(self, X, y=None, feature_names=None, undiscretized_features=[]):
self.undiscretized_features = undiscretized_features
super().fit(X, y, feature_names=feature_names)
return self
| (self, X, y=None, feature_names=None, undiscretized_features=[]) |
17,133 | imodels.rule_set.rule_fit | predict | Predict. For regression returns continuous output.
For classification, returns discrete output.
| def predict(self, X):
'''Predict. For regression returns continuous output.
For classification, returns discrete output.
'''
check_is_fitted(self)
if scipy.sparse.issparse(X):
X = X.toarray()
X = check_array(X)
if isinstance(self, RegressorMixin):
return self._predict_continuous_output(X)
else:
return np.argmax(self.predict_proba(X), axis=1)
| (self, X) |
17,134 | imodels.rule_set.rule_fit | predict_proba | null | def predict_proba(self, X):
check_is_fitted(self)
if scipy.sparse.issparse(X):
X = X.toarray()
X = check_array(X)
continuous_output = self._predict_continuous_output(X)
logits = np.vstack(
(1 - continuous_output, continuous_output)).transpose()
return softmax(logits, axis=1)
| (self, X) |
17,136 | sklearn.utils._metadata_requests | set_fit_request | Request metadata passed to the ``fit`` method.
Note that this method is only relevant if
``enable_metadata_routing=True`` (see :func:`sklearn.set_config`).
Please see :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
The options for each parameter are:
- ``True``: metadata is requested, and passed to ``fit`` if provided. The request is ignored if metadata is not provided.
- ``False``: metadata is not requested and the meta-estimator will not pass it to ``fit``.
- ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it.
- ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name.
The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the
existing request. This allows you to change the request for some
parameters and not others.
.. versionadded:: 1.3
.. note::
This method is only relevant if this estimator is used as a
sub-estimator of a meta-estimator, e.g. used inside a
:class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect.
Parameters
----------
feature_names : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``feature_names`` parameter in ``fit``.
undiscretized_features : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``undiscretized_features`` parameter in ``fit``.
Returns
-------
self : object
The updated object.
| def __get__(self, instance, owner):
# we would want to have a method which accepts only the expected args
def func(**kw):
"""Updates the request for provided parameters
This docstring is overwritten below.
See REQUESTER_DOC for expected functionality
"""
if not _routing_enabled():
raise RuntimeError(
"This method is only available when metadata routing is enabled."
" You can enable it using"
" sklearn.set_config(enable_metadata_routing=True)."
)
if self.validate_keys and (set(kw) - set(self.keys)):
raise TypeError(
f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments"
f" are: {set(self.keys)}"
)
requests = instance._get_metadata_request()
method_metadata_request = getattr(requests, self.name)
for prop, alias in kw.items():
if alias is not UNCHANGED:
method_metadata_request.add_request(param=prop, alias=alias)
instance._metadata_request = requests
return instance
# Now we set the relevant attributes of the function so that it seems
# like a normal method to the end user, with known expected arguments.
func.__name__ = f"set_{self.name}_request"
params = [
inspect.Parameter(
name="self",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=owner,
)
]
params.extend(
[
inspect.Parameter(
k,
inspect.Parameter.KEYWORD_ONLY,
default=UNCHANGED,
annotation=Optional[Union[bool, None, str]],
)
for k in self.keys
]
)
func.__signature__ = inspect.Signature(
params,
return_annotation=owner,
)
doc = REQUESTER_DOC.format(method=self.name)
for metadata in self.keys:
doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name)
doc += REQUESTER_DOC_RETURN
func.__doc__ = doc
return func
| (self: imodels.rule_set.fplasso.FPLassoClassifier, *, feature_names: Union[bool, NoneType, str] = '$UNCHANGED$', undiscretized_features: Union[bool, NoneType, str] = '$UNCHANGED$') -> imodels.rule_set.fplasso.FPLassoClassifier |
17,140 | sklearn.utils._metadata_requests | set_transform_request | Request metadata passed to the ``transform`` method.
Note that this method is only relevant if
``enable_metadata_routing=True`` (see :func:`sklearn.set_config`).
Please see :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
The options for each parameter are:
- ``True``: metadata is requested, and passed to ``transform`` if provided. The request is ignored if metadata is not provided.
- ``False``: metadata is not requested and the meta-estimator will not pass it to ``transform``.
- ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it.
- ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name.
The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the
existing request. This allows you to change the request for some
parameters and not others.
.. versionadded:: 1.3
.. note::
This method is only relevant if this estimator is used as a
sub-estimator of a meta-estimator, e.g. used inside a
:class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect.
Parameters
----------
rules : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``rules`` parameter in ``transform``.
Returns
-------
self : object
The updated object.
| def __get__(self, instance, owner):
# we would want to have a method which accepts only the expected args
def func(**kw):
"""Updates the request for provided parameters
This docstring is overwritten below.
See REQUESTER_DOC for expected functionality
"""
if not _routing_enabled():
raise RuntimeError(
"This method is only available when metadata routing is enabled."
" You can enable it using"
" sklearn.set_config(enable_metadata_routing=True)."
)
if self.validate_keys and (set(kw) - set(self.keys)):
raise TypeError(
f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments"
f" are: {set(self.keys)}"
)
requests = instance._get_metadata_request()
method_metadata_request = getattr(requests, self.name)
for prop, alias in kw.items():
if alias is not UNCHANGED:
method_metadata_request.add_request(param=prop, alias=alias)
instance._metadata_request = requests
return instance
# Now we set the relevant attributes of the function so that it seems
# like a normal method to the end user, with known expected arguments.
func.__name__ = f"set_{self.name}_request"
params = [
inspect.Parameter(
name="self",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=owner,
)
]
params.extend(
[
inspect.Parameter(
k,
inspect.Parameter.KEYWORD_ONLY,
default=UNCHANGED,
annotation=Optional[Union[bool, None, str]],
)
for k in self.keys
]
)
func.__signature__ = inspect.Signature(
params,
return_annotation=owner,
)
doc = REQUESTER_DOC.format(method=self.name)
for metadata in self.keys:
doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name)
doc += REQUESTER_DOC_RETURN
func.__doc__ = doc
return func
| (self: imodels.rule_set.fplasso.FPLassoClassifier, *, rules: Union[bool, NoneType, str] = '$UNCHANGED$') -> imodels.rule_set.fplasso.FPLassoClassifier |
17,141 | imodels.rule_set.rule_fit | transform | Transform dataset.
Parameters
----------
X : array-like matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency.
Returns
-------
X_transformed: matrix, shape=(n_samples, n_out)
Transformed data set
| def _score_rules(self, X, y, rules) -> Tuple[List[Rule], List[float], float]:
X_concat = np.zeros([X.shape[0], 0])
# standardise linear variables if requested (for regression model only)
if self.include_linear:
# standard deviation and mean of winsorized features
self.winsorizer.train(X)
winsorized_X = self.winsorizer.trim(X)
self.stddev = np.std(winsorized_X, axis=0)
self.mean = np.mean(winsorized_X, axis=0)
if self.lin_standardise:
self.friedscale.train(X)
X_regn = self.friedscale.scale(X)
else:
X_regn = X.copy()
X_concat = np.concatenate((X_concat, X_regn), axis=1)
X_rules = self.transform(X, rules)
if X_rules.shape[0] > 0:
X_concat = np.concatenate((X_concat, X_rules), axis=1)
# no rules fit and self.include_linear == False
if X_concat.shape[1] == 0:
return [], [], 0
prediction_task = 'regression' if isinstance(
self, RegressorMixin) else 'classification'
return score_linear(X_concat, y, rules,
prediction_task=prediction_task,
max_rules=self.max_rules,
alpha=self.alpha,
cv=self.cv,
random_state=self.random_state)
| (self, X=None, rules=None) |
17,142 | imodels.rule_set.rule_fit | visualize | null | def visualize(self, decimals=2):
rules = self._get_rules()
rules = rules[rules.coef != 0].sort_values("support", ascending=False)
pd.set_option('display.max_colwidth', None)
return rules[['rule', 'coef']].round(decimals)
| (self, decimals=2) |
17,143 | imodels.rule_set.fplasso | FPLassoRegressor | null | class FPLassoRegressor(FPLasso, RegressorMixin):
def _init_prediction_task(self):
self.prediction_task = 'regression'
| (minsupport=0.1, maxcardinality=2, verbose=False, n_estimators=100, tree_size=4, sample_fract='default', max_rules=2000, memory_par=0.01, tree_generator=None, lin_trim_quantile=0.025, lin_standardise=True, exp_rand_tree_size=True, include_linear=True, alpha=None, random_state=None) |
17,159 | imodels.rule_set.fplasso | _init_prediction_task | null | def _init_prediction_task(self):
self.prediction_task = 'regression'
| (self) |
17,182 | imodels.rule_set.fpskope | FPSkopeClassifier | null | class FPSkopeClassifier(SkopeRulesClassifier):
def __init__(self,
minsupport=0.1,
maxcardinality=2,
verbose=False,
precision_min=0.5,
recall_min=0.01,
n_estimators=10,
max_samples=.8,
max_samples_features=1.,
bootstrap=False,
bootstrap_features=False,
max_depth=3,
max_depth_duplication=None,
max_features=1.,
min_samples_split=2,
n_jobs=1,
random_state=None):
super().__init__(precision_min,
recall_min,
n_estimators,
max_samples,
max_samples_features,
bootstrap,
bootstrap_features,
max_depth,
max_depth_duplication,
max_features,
min_samples_split,
n_jobs,
random_state,
verbose)
self.minsupport = minsupport
self.maxcardinality = maxcardinality
self.verbose = verbose
def fit(self, X, y=None, feature_names=None, undiscretized_features=[], sample_weight=None):
self.undiscretized_features = undiscretized_features
super().fit(X, y, feature_names=feature_names, sample_weight=sample_weight)
return self
def _extract_rules(self, X, y) -> List[str]:
X = pd.DataFrame(X, columns=self.feature_placeholders)
itemsets = extract_fpgrowth(X, minsupport=self.minsupport,
maxcardinality=self.maxcardinality,
verbose=self.verbose)
return [itemsets_to_rules(itemsets)], [np.arange(X.shape[0])], [np.arange(len(self.feature_names))]
def _score_rules(self, X, y, rules) -> List[Rule]:
return score_precision_recall(X, y,
rules,
self.estimators_samples_,
self.estimators_features_,
self.feature_placeholders,
oob=False)
| (minsupport=0.1, maxcardinality=2, verbose=False, precision_min=0.5, recall_min=0.01, n_estimators=10, max_samples=0.8, max_samples_features=1.0, bootstrap=False, bootstrap_features=False, max_depth=3, max_depth_duplication=None, max_features=1.0, min_samples_split=2, n_jobs=1, random_state=None) |
17,184 | imodels.rule_set.fpskope | __init__ | null | def __init__(self,
minsupport=0.1,
maxcardinality=2,
verbose=False,
precision_min=0.5,
recall_min=0.01,
n_estimators=10,
max_samples=.8,
max_samples_features=1.,
bootstrap=False,
bootstrap_features=False,
max_depth=3,
max_depth_duplication=None,
max_features=1.,
min_samples_split=2,
n_jobs=1,
random_state=None):
super().__init__(precision_min,
recall_min,
n_estimators,
max_samples,
max_samples_features,
bootstrap,
bootstrap_features,
max_depth,
max_depth_duplication,
max_features,
min_samples_split,
n_jobs,
random_state,
verbose)
self.minsupport = minsupport
self.maxcardinality = maxcardinality
self.verbose = verbose
| (self, minsupport=0.1, maxcardinality=2, verbose=False, precision_min=0.5, recall_min=0.01, n_estimators=10, max_samples=0.8, max_samples_features=1.0, bootstrap=False, bootstrap_features=False, max_depth=3, max_depth_duplication=None, max_features=1.0, min_samples_split=2, n_jobs=1, random_state=None) |
17,191 | imodels.rule_set.fpskope | _extract_rules | null | def _extract_rules(self, X, y) -> List[str]:
X = pd.DataFrame(X, columns=self.feature_placeholders)
itemsets = extract_fpgrowth(X, minsupport=self.minsupport,
maxcardinality=self.maxcardinality,
verbose=self.verbose)
return [itemsets_to_rules(itemsets)], [np.arange(X.shape[0])], [np.arange(len(self.feature_names))]
| (self, X, y) -> List[str] |
17,197 | imodels.rule_set.skope_rules | _predict_top_rules | Predict if a particular sample is an outlier or not,
using the n_rules most performing rules.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32``
n_rules : int
The number of rules used for the prediction. If one of the
n_rules most performing rules is activated, the prediction
is equal to 1.
Returns
-------
is_outlier : array, shape (n_samples,)
For each observations, tells whether or not (1 or 0) it should
be considered as an outlier according to the selected rules.
| def _predict_top_rules(self, X, n_rules) -> np.ndarray:
"""Predict if a particular sample is an outlier or not,
using the n_rules most performing rules.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32``
n_rules : int
The number of rules used for the prediction. If one of the
n_rules most performing rules is activated, the prediction
is equal to 1.
Returns
-------
is_outlier : array, shape (n_samples,)
For each observations, tells whether or not (1 or 0) it should
be considered as an outlier according to the selected rules.
"""
return np.array((self._score_top_rules(X) > len(self.rules_) - n_rules),
dtype=int)
| (self, X, n_rules) -> numpy.ndarray |
17,198 | imodels.rule_set.skope_rules | _prune_rules | null | def _prune_rules(self, rules) -> List[Rule]:
return deduplicate(
prune_mins(rules, self.precision_min, self.recall_min),
self.max_depth_duplication
)
| (self, rules) -> List[imodels.util.rule.Rule] |
17,201 | imodels.rule_set.skope_rules | _rules_vote | Score representing a vote of the base classifiers (rules).
The score of an input sample is computed as the sum of the binary
rules outputs: a score of k means than k rules have voted positively.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The training input samples.
Returns
-------
scores : array, shape (n_samples,)
The score of the input samples.
The higher, the more abnormal. Positive scores represent outliers,
null scores represent inliers.
| def _rules_vote(self, X) -> np.ndarray:
"""Score representing a vote of the base classifiers (rules).
The score of an input sample is computed as the sum of the binary
rules outputs: a score of k means than k rules have voted positively.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The training input samples.
Returns
-------
scores : array, shape (n_samples,)
The score of the input samples.
The higher, the more abnormal. Positive scores represent outliers,
null scores represent inliers.
"""
# Check if fit had been called
check_is_fitted(self, ['rules_', 'estimators_samples_', 'max_samples_'])
# Input validation
X = check_array(X)
if X.shape[1] != self.n_features_:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of features at training time."
" Please reshape your data."
% (X.shape[1], self.n_features_))
df = pandas.DataFrame(X, columns=self.feature_placeholders)
selected_rules = self.rules_without_feature_names_
scores = np.zeros(X.shape[0])
for (r, _) in selected_rules:
scores[list(df.query(r).index)] += 1
return scores
| (self, X) -> numpy.ndarray |
17,202 | imodels.rule_set.fpskope | _score_rules | null | def _score_rules(self, X, y, rules) -> List[Rule]:
return score_precision_recall(X, y,
rules,
self.estimators_samples_,
self.estimators_features_,
self.feature_placeholders,
oob=False)
| (self, X, y, rules) -> List[imodels.util.rule.Rule] |
17,203 | imodels.rule_set.skope_rules | _score_top_rules | Score representing an ordering between the base classifiers (rules).
The score is high when the instance is detected by a performing rule.
If there are n rules, ordered by increasing OOB precision, a score of k
means than the kth rule has voted positively, but not the (k-1) first
rules.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The training input samples.
Returns
-------
scores : array, shape (n_samples,)
The score of the input samples.
Positive scores represent outliers, null scores represent inliers.
| def _score_top_rules(self, X) -> np.ndarray:
"""Score representing an ordering between the base classifiers (rules).
The score is high when the instance is detected by a performing rule.
If there are n rules, ordered by increasing OOB precision, a score of k
means than the kth rule has voted positively, but not the (k-1) first
rules.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The training input samples.
Returns
-------
scores : array, shape (n_samples,)
The score of the input samples.
Positive scores represent outliers, null scores represent inliers.
"""
# Check if fit had been called
check_is_fitted(self, ['rules_', 'estimators_samples_', 'max_samples_'])
# Input validation
X = check_array(X)
if X.shape[1] != self.n_features_:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of features at training time."
" Please reshape your data."
% (X.shape[1], self.n_features_))
df = pandas.DataFrame(X, columns=self.feature_placeholders)
selected_rules = self.rules_without_feature_names_
scores = np.zeros(X.shape[0])
for (k, r) in enumerate(list((selected_rules))):
scores[list(df.query(r.rule).index)] = np.maximum(
len(selected_rules) - k,
scores[list(df.query(r.rule).index)])
return scores
| (self, X) -> numpy.ndarray |
17,206 | imodels.rule_set.fpskope | fit | null | def fit(self, X, y=None, feature_names=None, undiscretized_features=[], sample_weight=None):
self.undiscretized_features = undiscretized_features
super().fit(X, y, feature_names=feature_names, sample_weight=sample_weight)
return self
| (self, X, y=None, feature_names=None, undiscretized_features=[], sample_weight=None) |
17,209 | imodels.rule_set.skope_rules | predict | Predict if a particular sample is an outlier or not.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32``
Returns
-------
is_outlier : array, shape (n_samples,)
For each observations, tells whether or not (1 or 0) it should
be considered as an outlier according to the selected rules.
| def predict(self, X) -> np.ndarray:
"""Predict if a particular sample is an outlier or not.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32``
Returns
-------
is_outlier : array, shape (n_samples,)
For each observations, tells whether or not (1 or 0) it should
be considered as an outlier according to the selected rules.
"""
X = check_array(X)
return np.argmax(self.predict_proba(X), axis=1)
| (self, X) -> numpy.ndarray |
17,210 | imodels.rule_set.skope_rules | predict_proba | Predict probability of a particular sample being an outlier or not
| def predict_proba(self, X) -> np.ndarray:
'''Predict probability of a particular sample being an outlier or not
'''
X = check_array(X)
weight_sum = np.sum([w[0] for (r, w) in self.rules_without_feature_names_])
if weight_sum == 0:
return np.vstack((np.ones(X.shape[0]), np.zeros(X.shape[0]))).transpose()
y = self._eval_weighted_rule_sum(X) / weight_sum
return np.vstack((1 - y, y)).transpose()
| (self, X) -> numpy.ndarray |
17,212 | sklearn.utils._metadata_requests | set_fit_request | Request metadata passed to the ``fit`` method.
Note that this method is only relevant if
``enable_metadata_routing=True`` (see :func:`sklearn.set_config`).
Please see :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
The options for each parameter are:
- ``True``: metadata is requested, and passed to ``fit`` if provided. The request is ignored if metadata is not provided.
- ``False``: metadata is not requested and the meta-estimator will not pass it to ``fit``.
- ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it.
- ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name.
The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the
existing request. This allows you to change the request for some
parameters and not others.
.. versionadded:: 1.3
.. note::
This method is only relevant if this estimator is used as a
sub-estimator of a meta-estimator, e.g. used inside a
:class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect.
Parameters
----------
feature_names : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``feature_names`` parameter in ``fit``.
sample_weight : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``sample_weight`` parameter in ``fit``.
undiscretized_features : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``undiscretized_features`` parameter in ``fit``.
Returns
-------
self : object
The updated object.
| def __get__(self, instance, owner):
# we would want to have a method which accepts only the expected args
def func(**kw):
"""Updates the request for provided parameters
This docstring is overwritten below.
See REQUESTER_DOC for expected functionality
"""
if not _routing_enabled():
raise RuntimeError(
"This method is only available when metadata routing is enabled."
" You can enable it using"
" sklearn.set_config(enable_metadata_routing=True)."
)
if self.validate_keys and (set(kw) - set(self.keys)):
raise TypeError(
f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments"
f" are: {set(self.keys)}"
)
requests = instance._get_metadata_request()
method_metadata_request = getattr(requests, self.name)
for prop, alias in kw.items():
if alias is not UNCHANGED:
method_metadata_request.add_request(param=prop, alias=alias)
instance._metadata_request = requests
return instance
# Now we set the relevant attributes of the function so that it seems
# like a normal method to the end user, with known expected arguments.
func.__name__ = f"set_{self.name}_request"
params = [
inspect.Parameter(
name="self",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=owner,
)
]
params.extend(
[
inspect.Parameter(
k,
inspect.Parameter.KEYWORD_ONLY,
default=UNCHANGED,
annotation=Optional[Union[bool, None, str]],
)
for k in self.keys
]
)
func.__signature__ = inspect.Signature(
params,
return_annotation=owner,
)
doc = REQUESTER_DOC.format(method=self.name)
for metadata in self.keys:
doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name)
doc += REQUESTER_DOC_RETURN
func.__doc__ = doc
return func
| (self: imodels.rule_set.fpskope.FPSkopeClassifier, *, feature_names: Union[bool, NoneType, str] = '$UNCHANGED$', sample_weight: Union[bool, NoneType, str] = '$UNCHANGED$', undiscretized_features: Union[bool, NoneType, str] = '$UNCHANGED$') -> imodels.rule_set.fpskope.FPSkopeClassifier |
17,215 | imodels.rule_list.greedy_rule_list | GreedyRuleListClassifier | null | class GreedyRuleListClassifier(BaseEstimator, RuleList, ClassifierMixin):
def __init__(self, max_depth: int = 5, class_weight=None,
criterion: str = 'gini'):
'''
Params
------
max_depth
Maximum depth the list can achieve
criterion: str
Criterion used to split
'gini', 'entropy', or 'log_loss'
'''
self.max_depth = max_depth
self.class_weight = class_weight
self.criterion = criterion
self.depth = 0 # tracks the fitted depth
def fit(self, X, y, depth: int = 0, feature_names=None, verbose=False):
"""
Params
------
X: array_like
Feature set
y: array_like
target variable
depth
the depth of the current layer (used to recurse)
"""
X, y, feature_names = check_fit_arguments(self, X, y, feature_names)
return self.fit_node_recursive(X, y, depth=0, verbose=verbose)
def fit_node_recursive(self, X, y, depth: int, verbose):
# base case 1: no data in this group
if y.size == 0:
return []
# base case 2: all y is the same in this group
elif np.all(y == y[0]):
return [{'val': y[0], 'num_pts': y.size}]
# base case 3: max depth reached
elif depth == self.max_depth:
return [{'val': np.mean(y), 'num_pts': y.size}]
# recursively generate rule list
else:
# find a split with the best value for the criterion
m = DecisionTreeClassifier(max_depth=1, criterion=self.criterion)
m.fit(X, y)
col = m.tree_.feature[0]
cutoff = m.tree_.threshold[0]
# col, cutoff, criterion_val = self._find_best_split(X, y)
if col == -2:
return []
y_left = y[X[:, col] < cutoff] # left-hand side data
y_right = y[X[:, col] >= cutoff] # right-hand side data
# put higher probability of class 1 on the right-hand side
if len(y_left) > 0 and np.mean(y_left) > np.mean(y_right):
flip = True
tmp = deepcopy(y_left)
y_left = deepcopy(y_right)
y_right = tmp
x_left = X[X[:, col] >= cutoff]
else:
flip = False
x_left = X[X[:, col] < cutoff]
# print
if verbose:
print(
f'{np.mean(100 * y):.2f} -> {self.feature_names_[col]} -> {np.mean(100 * y_left):.2f} ({y_left.size}) {np.mean(100 * y_right):.2f} ({y_right.size})')
# save info
par_node = [{
'col': self.feature_names_[col],
'index_col': col,
'cutoff': cutoff,
'val': np.mean(y_left), # will be the values before splitting in the next lower level
'flip': flip,
'val_right': np.mean(y_right),
'num_pts': y.size,
'num_pts_right': y_right.size
}]
# generate tree for the non-leaf data
par_node = par_node + \
self.fit_node_recursive(x_left, y_left, depth + 1, verbose=verbose)
self.depth += 1 # increase the depth since we call fit once
self.rules_ = par_node
self.complexity_ = len(self.rules_)
self.classes_ = unique_labels(y)
return par_node
def predict_proba(self, X):
check_is_fitted(self)
X = check_array(X)
n = X.shape[0]
probs = np.zeros(n)
for i in range(n):
x = X[i]
for j, rule in enumerate(self.rules_):
if j == len(self.rules_) - 1:
probs[i] = rule['val']
continue
regular_condition = x[rule["index_col"]] >= rule["cutoff"]
flipped_condition = x[rule["index_col"]] < rule["cutoff"]
condition = flipped_condition if rule["flip"] else regular_condition
if condition:
probs[i] = rule['val_right']
break
return np.vstack((1 - probs, probs)).transpose() # probs (n, 2)
def predict(self, X):
check_is_fitted(self)
X = check_array(X)
return np.argmax(self.predict_proba(X), axis=1)
"""
def __str__(self):
# s = ''
# for rule in self.rules_:
# s += f"mean {rule['val'].round(3)} ({rule['num_pts']} pts)\n"
# if 'col' in rule:
# s += f"if {rule['col']} >= {rule['cutoff']} then {rule['val_right'].round(3)} ({rule['num_pts_right']} pts)\n"
# return s
"""
def __str__(self):
'''Print out the list in a nice way
'''
s = '> ------------------------------\n> Greedy Rule List\n> ------------------------------\n'
def red(s):
# return f"\033[91m{s}\033[00m"
return s
def cyan(s):
# return f"\033[96m{s}\033[00m"
return s
def rule_name(rule):
if rule['flip']:
return '~' + rule['col']
return rule['col']
# rule = self.rules_[0]
# s += f"{red((100 * rule['val']).round(3))}% IwI ({rule['num_pts']} pts)\n"
for rule in self.rules_:
s += u'\u2193\n' + f"{cyan((100 * rule['val']).round(2))}% risk ({rule['num_pts']} pts)\n"
# s += f"\t{'Else':>45} => {cyan((100 * rule['val']).round(2)):>6}% IwI ({rule['val'] * rule['num_pts']:.0f}/{rule['num_pts']} pts)\n"
if 'col' in rule:
# prefix = f"if {rule['col']} >= {rule['cutoff']}"
prefix = f"if {rule_name(rule)}"
val = f"{100 * rule['val_right'].round(3)}"
s += f"\t{prefix} ==> {red(val)}% risk ({rule['num_pts_right']} pts)\n"
# rule = self.rules_[-1]
# s += f"{red((100 * rule['val']).round(3))}% IwI ({rule['num_pts']} pts)\n"
return s
######## HERE ONWARDS CUSTOM SPLITTING (DEPRECATED IN FAVOR OF SKLEARN STUMP) ########
######################################################################################
def _find_best_split(self, x, y):
"""
Find the best split from all features
returns: the column to split on, the cutoff value, and the actual criterion_value
"""
col = None
min_criterion_val = 1e10
cutoff = None
# iterating through each feature
for i, c in enumerate(x.T):
# find the best split of that feature
criterion_val, cur_cutoff = self._split_on_feature(c, y)
# found perfect cutoff
if criterion_val == 0:
return i, cur_cutoff, criterion_val
# check if it's best so far
elif criterion_val <= min_criterion_val:
min_criterion_val = criterion_val
col = i
cutoff = cur_cutoff
return col, cutoff, min_criterion_val
def _split_on_feature(self, col, y):
"""
col: the column we split on
y: target var
"""
min_criterion_val = 1e10
cutoff = 0.5
# iterate through each value in the column
for value in np.unique(col):
# separate y into 2 groups
y_predict = col < value
# get criterion val of this split
criterion_val = self._weighted_criterion(y_predict, y)
# check if it's the smallest one so far
if criterion_val <= min_criterion_val:
min_criterion_val = criterion_val
cutoff = value
return min_criterion_val, cutoff
def _weighted_criterion(self, split_decision, y_real):
"""Returns criterion calculated over a split
split decision, True/False, and y_true can be multi class
"""
if split_decision.shape[0] != y_real.shape[0]:
print('They have to be the same length')
return None
# choose the splitting criterion
if self.criterion == 'entropy':
criterion_func = self._entropy_criterion
elif self.criterion == 'gini':
criterion_func = self._gini_criterion
elif self.criterion == 'neg_corr':
return self._neg_corr_criterion(split_decision, y_real)
# left-hand side criterion
s_left = criterion_func(y_real[split_decision])
# right-hand side criterion
s_right = criterion_func(y_real[~split_decision])
# overall criterion, again weighted average
n = y_real.shape[0]
if self.class_weight is not None:
sample_weights = np.ones(n)
for c in self.class_weight.keys():
idxs_c = y_real == c
sample_weights[idxs_c] = self.class_weight[c]
total_weight = np.sum(sample_weights)
weight_left = np.sum(sample_weights[split_decision]) / total_weight
# weight_right = np.sum(sample_weights[~split_decision]) / total_weight
else:
tot_left_samples = np.sum(split_decision == 1)
weight_left = tot_left_samples / n
s = weight_left * s_left + (1 - weight_left) * s_right
return s
def _gini_criterion(self, y):
'''Returns gini index for one node
= sum(pc * (1 – pc))
'''
s = 0
n = y.shape[0]
classes = np.unique(y)
# for each class, get entropy
for c in classes:
# weights for each class
n_c = np.sum(y == c)
p_c = n_c / n
# weighted avg
s += p_c * (1 - p_c)
return s
def _entropy_criterion(self, y):
"""Returns entropy of a divided group of data
Data may have multiple classes
"""
s = 0
n = len(y)
classes = set(y)
# for each class, get entropy
for c in classes:
# weights for each class
weight = sum(y == c) / n
def _entropy_from_counts(c1, c2):
"""Returns entropy of a group of data
c1: count of one class
c2: count of another class
"""
if c1 == 0 or c2 == 0: # when there is only one class in the group, entropy is 0
return 0
def _entropy_func(p): return -p * math.log(p, 2)
p1 = c1 * 1.0 / (c1 + c2)
p2 = c2 * 1.0 / (c1 + c2)
return _entropy_func(p1) + _entropy_func(p2)
# weighted avg
s += weight * _entropy_from_counts(sum(y == c), sum(y != c))
return s
def _neg_corr_criterion(self, split_decision, y):
'''Returns negative correlation between y
and the binary splitting variable split_decision
y must be binary
'''
if np.unique(y).size < 2:
return 0
elif np.unique(y).size != 2:
print('y must be binary output for corr criterion')
# y should be 1 more often on the "right side" of the split
if y.sum() < y.size / 2:
y = 1 - y
return -1 * np.corrcoef(split_decision.astype(np.int), y)[0, 1]
| (max_depth: int = 5, class_weight=None, criterion: str = 'gini') |
17,217 | imodels.rule_list.greedy_rule_list | __init__ |
Params
------
max_depth
Maximum depth the list can achieve
criterion: str
Criterion used to split
'gini', 'entropy', or 'log_loss'
| def __init__(self, max_depth: int = 5, class_weight=None,
criterion: str = 'gini'):
'''
Params
------
max_depth
Maximum depth the list can achieve
criterion: str
Criterion used to split
'gini', 'entropy', or 'log_loss'
'''
self.max_depth = max_depth
self.class_weight = class_weight
self.criterion = criterion
self.depth = 0 # tracks the fitted depth
| (self, max_depth: int = 5, class_weight=None, criterion: str = 'gini') |
17,221 | imodels.rule_list.greedy_rule_list | __str__ | Print out the list in a nice way
| def __str__(self):
'''Print out the list in a nice way
'''
s = '> ------------------------------\n> Greedy Rule List\n> ------------------------------\n'
def red(s):
# return f"\033[91m{s}\033[00m"
return s
def cyan(s):
# return f"\033[96m{s}\033[00m"
return s
def rule_name(rule):
if rule['flip']:
return '~' + rule['col']
return rule['col']
# rule = self.rules_[0]
# s += f"{red((100 * rule['val']).round(3))}% IwI ({rule['num_pts']} pts)\n"
for rule in self.rules_:
s += u'\u2193\n' + f"{cyan((100 * rule['val']).round(2))}% risk ({rule['num_pts']} pts)\n"
# s += f"\t{'Else':>45} => {cyan((100 * rule['val']).round(2)):>6}% IwI ({rule['val'] * rule['num_pts']:.0f}/{rule['num_pts']} pts)\n"
if 'col' in rule:
# prefix = f"if {rule['col']} >= {rule['cutoff']}"
prefix = f"if {rule_name(rule)}"
val = f"{100 * rule['val_right'].round(3)}"
s += f"\t{prefix} ==> {red(val)}% risk ({rule['num_pts_right']} pts)\n"
# rule = self.rules_[-1]
# s += f"{red((100 * rule['val']).round(3))}% IwI ({rule['num_pts']} pts)\n"
return s
| (self) |
17,224 | imodels.rule_list.greedy_rule_list | _entropy_criterion | Returns entropy of a divided group of data
Data may have multiple classes
| def _entropy_criterion(self, y):
"""Returns entropy of a divided group of data
Data may have multiple classes
"""
s = 0
n = len(y)
classes = set(y)
# for each class, get entropy
for c in classes:
# weights for each class
weight = sum(y == c) / n
def _entropy_from_counts(c1, c2):
"""Returns entropy of a group of data
c1: count of one class
c2: count of another class
"""
if c1 == 0 or c2 == 0: # when there is only one class in the group, entropy is 0
return 0
def _entropy_func(p): return -p * math.log(p, 2)
p1 = c1 * 1.0 / (c1 + c2)
p2 = c2 * 1.0 / (c1 + c2)
return _entropy_func(p1) + _entropy_func(p2)
# weighted avg
s += weight * _entropy_from_counts(sum(y == c), sum(y != c))
return s
| (self, y) |
17,225 | imodels.rule_list.greedy_rule_list | _find_best_split |
Find the best split from all features
returns: the column to split on, the cutoff value, and the actual criterion_value
| def _find_best_split(self, x, y):
"""
Find the best split from all features
returns: the column to split on, the cutoff value, and the actual criterion_value
"""
col = None
min_criterion_val = 1e10
cutoff = None
# iterating through each feature
for i, c in enumerate(x.T):
# find the best split of that feature
criterion_val, cur_cutoff = self._split_on_feature(c, y)
# found perfect cutoff
if criterion_val == 0:
return i, cur_cutoff, criterion_val
# check if it's best so far
elif criterion_val <= min_criterion_val:
min_criterion_val = criterion_val
col = i
cutoff = cur_cutoff
return col, cutoff, min_criterion_val
| (self, x, y) |
17,230 | imodels.rule_list.greedy_rule_list | _gini_criterion | Returns gini index for one node
= sum(pc * (1 – pc))
| def _gini_criterion(self, y):
'''Returns gini index for one node
= sum(pc * (1 – pc))
'''
s = 0
n = y.shape[0]
classes = np.unique(y)
# for each class, get entropy
for c in classes:
# weights for each class
n_c = np.sum(y == c)
p_c = n_c / n
# weighted avg
s += p_c * (1 - p_c)
return s
| (self, y) |
17,232 | imodels.rule_list.greedy_rule_list | _neg_corr_criterion | Returns negative correlation between y
and the binary splitting variable split_decision
y must be binary
| def _neg_corr_criterion(self, split_decision, y):
'''Returns negative correlation between y
and the binary splitting variable split_decision
y must be binary
'''
if np.unique(y).size < 2:
return 0
elif np.unique(y).size != 2:
print('y must be binary output for corr criterion')
# y should be 1 more often on the "right side" of the split
if y.sum() < y.size / 2:
y = 1 - y
return -1 * np.corrcoef(split_decision.astype(np.int), y)[0, 1]
| (self, split_decision, y) |
17,235 | imodels.rule_list.greedy_rule_list | _split_on_feature |
col: the column we split on
y: target var
| def _split_on_feature(self, col, y):
"""
col: the column we split on
y: target var
"""
min_criterion_val = 1e10
cutoff = 0.5
# iterate through each value in the column
for value in np.unique(col):
# separate y into 2 groups
y_predict = col < value
# get criterion val of this split
criterion_val = self._weighted_criterion(y_predict, y)
# check if it's the smallest one so far
if criterion_val <= min_criterion_val:
min_criterion_val = criterion_val
cutoff = value
return min_criterion_val, cutoff
| (self, col, y) |
17,238 | imodels.rule_list.greedy_rule_list | _weighted_criterion | Returns criterion calculated over a split
split decision, True/False, and y_true can be multi class
| def _weighted_criterion(self, split_decision, y_real):
"""Returns criterion calculated over a split
split decision, True/False, and y_true can be multi class
"""
if split_decision.shape[0] != y_real.shape[0]:
print('They have to be the same length')
return None
# choose the splitting criterion
if self.criterion == 'entropy':
criterion_func = self._entropy_criterion
elif self.criterion == 'gini':
criterion_func = self._gini_criterion
elif self.criterion == 'neg_corr':
return self._neg_corr_criterion(split_decision, y_real)
# left-hand side criterion
s_left = criterion_func(y_real[split_decision])
# right-hand side criterion
s_right = criterion_func(y_real[~split_decision])
# overall criterion, again weighted average
n = y_real.shape[0]
if self.class_weight is not None:
sample_weights = np.ones(n)
for c in self.class_weight.keys():
idxs_c = y_real == c
sample_weights[idxs_c] = self.class_weight[c]
total_weight = np.sum(sample_weights)
weight_left = np.sum(sample_weights[split_decision]) / total_weight
# weight_right = np.sum(sample_weights[~split_decision]) / total_weight
else:
tot_left_samples = np.sum(split_decision == 1)
weight_left = tot_left_samples / n
s = weight_left * s_left + (1 - weight_left) * s_right
return s
| (self, split_decision, y_real) |
17,239 | imodels.rule_list.greedy_rule_list | fit |
Params
------
X: array_like
Feature set
y: array_like
target variable
depth
the depth of the current layer (used to recurse)
| def fit(self, X, y, depth: int = 0, feature_names=None, verbose=False):
"""
Params
------
X: array_like
Feature set
y: array_like
target variable
depth
the depth of the current layer (used to recurse)
"""
X, y, feature_names = check_fit_arguments(self, X, y, feature_names)
return self.fit_node_recursive(X, y, depth=0, verbose=verbose)
| (self, X, y, depth: int = 0, feature_names=None, verbose=False) |
17,240 | imodels.rule_list.greedy_rule_list | fit_node_recursive | null | def fit_node_recursive(self, X, y, depth: int, verbose):
# base case 1: no data in this group
if y.size == 0:
return []
# base case 2: all y is the same in this group
elif np.all(y == y[0]):
return [{'val': y[0], 'num_pts': y.size}]
# base case 3: max depth reached
elif depth == self.max_depth:
return [{'val': np.mean(y), 'num_pts': y.size}]
# recursively generate rule list
else:
# find a split with the best value for the criterion
m = DecisionTreeClassifier(max_depth=1, criterion=self.criterion)
m.fit(X, y)
col = m.tree_.feature[0]
cutoff = m.tree_.threshold[0]
# col, cutoff, criterion_val = self._find_best_split(X, y)
if col == -2:
return []
y_left = y[X[:, col] < cutoff] # left-hand side data
y_right = y[X[:, col] >= cutoff] # right-hand side data
# put higher probability of class 1 on the right-hand side
if len(y_left) > 0 and np.mean(y_left) > np.mean(y_right):
flip = True
tmp = deepcopy(y_left)
y_left = deepcopy(y_right)
y_right = tmp
x_left = X[X[:, col] >= cutoff]
else:
flip = False
x_left = X[X[:, col] < cutoff]
# print
if verbose:
print(
f'{np.mean(100 * y):.2f} -> {self.feature_names_[col]} -> {np.mean(100 * y_left):.2f} ({y_left.size}) {np.mean(100 * y_right):.2f} ({y_right.size})')
# save info
par_node = [{
'col': self.feature_names_[col],
'index_col': col,
'cutoff': cutoff,
'val': np.mean(y_left), # will be the values before splitting in the next lower level
'flip': flip,
'val_right': np.mean(y_right),
'num_pts': y.size,
'num_pts_right': y_right.size
}]
# generate tree for the non-leaf data
par_node = par_node + \
self.fit_node_recursive(x_left, y_left, depth + 1, verbose=verbose)
self.depth += 1 # increase the depth since we call fit once
self.rules_ = par_node
self.complexity_ = len(self.rules_)
self.classes_ = unique_labels(y)
return par_node
| (self, X, y, depth: int, verbose) |
17,243 | imodels.rule_list.greedy_rule_list | predict | null | def predict(self, X):
check_is_fitted(self)
X = check_array(X)
return np.argmax(self.predict_proba(X), axis=1)
| (self, X) |
17,244 | imodels.rule_list.greedy_rule_list | predict_proba | null | def predict_proba(self, X):
check_is_fitted(self)
X = check_array(X)
n = X.shape[0]
probs = np.zeros(n)
for i in range(n):
x = X[i]
for j, rule in enumerate(self.rules_):
if j == len(self.rules_) - 1:
probs[i] = rule['val']
continue
regular_condition = x[rule["index_col"]] >= rule["cutoff"]
flipped_condition = x[rule["index_col"]] < rule["cutoff"]
condition = flipped_condition if rule["flip"] else regular_condition
if condition:
probs[i] = rule['val_right']
break
return np.vstack((1 - probs, probs)).transpose() # probs (n, 2)
| (self, X) |
17,246 | sklearn.utils._metadata_requests | set_fit_request | Request metadata passed to the ``fit`` method.
Note that this method is only relevant if
``enable_metadata_routing=True`` (see :func:`sklearn.set_config`).
Please see :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
The options for each parameter are:
- ``True``: metadata is requested, and passed to ``fit`` if provided. The request is ignored if metadata is not provided.
- ``False``: metadata is not requested and the meta-estimator will not pass it to ``fit``.
- ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it.
- ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name.
The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the
existing request. This allows you to change the request for some
parameters and not others.
.. versionadded:: 1.3
.. note::
This method is only relevant if this estimator is used as a
sub-estimator of a meta-estimator, e.g. used inside a
:class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect.
Parameters
----------
depth : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``depth`` parameter in ``fit``.
feature_names : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``feature_names`` parameter in ``fit``.
verbose : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``verbose`` parameter in ``fit``.
Returns
-------
self : object
The updated object.
| def __get__(self, instance, owner):
# we would want to have a method which accepts only the expected args
def func(**kw):
"""Updates the request for provided parameters
This docstring is overwritten below.
See REQUESTER_DOC for expected functionality
"""
if not _routing_enabled():
raise RuntimeError(
"This method is only available when metadata routing is enabled."
" You can enable it using"
" sklearn.set_config(enable_metadata_routing=True)."
)
if self.validate_keys and (set(kw) - set(self.keys)):
raise TypeError(
f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments"
f" are: {set(self.keys)}"
)
requests = instance._get_metadata_request()
method_metadata_request = getattr(requests, self.name)
for prop, alias in kw.items():
if alias is not UNCHANGED:
method_metadata_request.add_request(param=prop, alias=alias)
instance._metadata_request = requests
return instance
# Now we set the relevant attributes of the function so that it seems
# like a normal method to the end user, with known expected arguments.
func.__name__ = f"set_{self.name}_request"
params = [
inspect.Parameter(
name="self",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=owner,
)
]
params.extend(
[
inspect.Parameter(
k,
inspect.Parameter.KEYWORD_ONLY,
default=UNCHANGED,
annotation=Optional[Union[bool, None, str]],
)
for k in self.keys
]
)
func.__signature__ = inspect.Signature(
params,
return_annotation=owner,
)
doc = REQUESTER_DOC.format(method=self.name)
for metadata in self.keys:
doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name)
doc += REQUESTER_DOC_RETURN
func.__doc__ = doc
return func
| (self: imodels.rule_list.greedy_rule_list.GreedyRuleListClassifier, *, depth: Union[bool, NoneType, str] = '$UNCHANGED$', feature_names: Union[bool, NoneType, str] = '$UNCHANGED$', verbose: Union[bool, NoneType, str] = '$UNCHANGED$') -> imodels.rule_list.greedy_rule_list.GreedyRuleListClassifier |
17,249 | imodels.tree.cart_wrapper | GreedyTreeClassifier | Wrapper around sklearn greedy tree classifier
| class GreedyTreeClassifier(DecisionTreeClassifier):
"""Wrapper around sklearn greedy tree classifier
"""
def fit(self, X, y, feature_names=None, sample_weight=None, check_input=True):
"""Build a decision tree classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels) as integers or strings.
feature_names : array-like of shape (n_features)
The names of the features
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. Splits are also
ignored if they would result in any single class carrying a
negative weight in either child node.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : DecisionTreeClassifier
Fitted estimator.
"""
X, y, feature_names = check_fit_arguments(self, X, y, feature_names)
super().fit(X, y, sample_weight=sample_weight, check_input=check_input)
self._set_complexity()
def _set_complexity(self):
"""Set complexity as number of non-leaf nodes
"""
self.complexity_ = compute_tree_complexity(self.tree_)
def __str__(self):
s = '> ------------------------------\n'
s += '> Greedy CART Tree:\n'
s += '> \tPrediction is made by looking at the value in the appropriate leaf of the tree\n'
s += '> ------------------------------' + '\n'
if hasattr(self, 'feature_names') and self.feature_names is not None:
return s + export_text(self, feature_names=self.feature_names, show_weights=True)
else:
return s + export_text(self, show_weights=True)
| (*, criterion='gini', splitter='best', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=None, random_state=None, max_leaf_nodes=None, min_impurity_decrease=0.0, class_weight=None, ccp_alpha=0.0, monotonic_cst=None) |
17,255 | imodels.tree.cart_wrapper | __str__ | null | def __str__(self):
s = '> ------------------------------\n'
s += '> Greedy CART Tree:\n'
s += '> \tPrediction is made by looking at the value in the appropriate leaf of the tree\n'
s += '> ------------------------------' + '\n'
if hasattr(self, 'feature_names') and self.feature_names is not None:
return s + export_text(self, feature_names=self.feature_names, show_weights=True)
else:
return s + export_text(self, show_weights=True)
| (self) |
17,267 | imodels.tree.cart_wrapper | _set_complexity | Set complexity as number of non-leaf nodes
| def _set_complexity(self):
"""Set complexity as number of non-leaf nodes
"""
self.complexity_ = compute_tree_complexity(self.tree_)
| (self) |
17,275 | imodels.tree.cart_wrapper | fit | Build a decision tree classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels) as integers or strings.
feature_names : array-like of shape (n_features)
The names of the features
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. Splits are also
ignored if they would result in any single class carrying a
negative weight in either child node.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : DecisionTreeClassifier
Fitted estimator.
| def fit(self, X, y, feature_names=None, sample_weight=None, check_input=True):
"""Build a decision tree classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels) as integers or strings.
feature_names : array-like of shape (n_features)
The names of the features
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. Splits are also
ignored if they would result in any single class carrying a
negative weight in either child node.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : DecisionTreeClassifier
Fitted estimator.
"""
X, y, feature_names = check_fit_arguments(self, X, y, feature_names)
super().fit(X, y, sample_weight=sample_weight, check_input=check_input)
self._set_complexity()
| (self, X, y, feature_names=None, sample_weight=None, check_input=True) |
17,284 | sklearn.utils._metadata_requests | set_fit_request | Request metadata passed to the ``fit`` method.
Note that this method is only relevant if
``enable_metadata_routing=True`` (see :func:`sklearn.set_config`).
Please see :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
The options for each parameter are:
- ``True``: metadata is requested, and passed to ``fit`` if provided. The request is ignored if metadata is not provided.
- ``False``: metadata is not requested and the meta-estimator will not pass it to ``fit``.
- ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it.
- ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name.
The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the
existing request. This allows you to change the request for some
parameters and not others.
.. versionadded:: 1.3
.. note::
This method is only relevant if this estimator is used as a
sub-estimator of a meta-estimator, e.g. used inside a
:class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect.
Parameters
----------
check_input : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``check_input`` parameter in ``fit``.
feature_names : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``feature_names`` parameter in ``fit``.
sample_weight : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``sample_weight`` parameter in ``fit``.
Returns
-------
self : object
The updated object.
| def __get__(self, instance, owner):
# we would want to have a method which accepts only the expected args
def func(**kw):
"""Updates the request for provided parameters
This docstring is overwritten below.
See REQUESTER_DOC for expected functionality
"""
if not _routing_enabled():
raise RuntimeError(
"This method is only available when metadata routing is enabled."
" You can enable it using"
" sklearn.set_config(enable_metadata_routing=True)."
)
if self.validate_keys and (set(kw) - set(self.keys)):
raise TypeError(
f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments"
f" are: {set(self.keys)}"
)
requests = instance._get_metadata_request()
method_metadata_request = getattr(requests, self.name)
for prop, alias in kw.items():
if alias is not UNCHANGED:
method_metadata_request.add_request(param=prop, alias=alias)
instance._metadata_request = requests
return instance
# Now we set the relevant attributes of the function so that it seems
# like a normal method to the end user, with known expected arguments.
func.__name__ = f"set_{self.name}_request"
params = [
inspect.Parameter(
name="self",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=owner,
)
]
params.extend(
[
inspect.Parameter(
k,
inspect.Parameter.KEYWORD_ONLY,
default=UNCHANGED,
annotation=Optional[Union[bool, None, str]],
)
for k in self.keys
]
)
func.__signature__ = inspect.Signature(
params,
return_annotation=owner,
)
doc = REQUESTER_DOC.format(method=self.name)
for metadata in self.keys:
doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name)
doc += REQUESTER_DOC_RETURN
func.__doc__ = doc
return func
| (self: imodels.tree.cart_wrapper.GreedyTreeClassifier, *, check_input: Union[bool, NoneType, str] = '$UNCHANGED$', feature_names: Union[bool, NoneType, str] = '$UNCHANGED$', sample_weight: Union[bool, NoneType, str] = '$UNCHANGED$') -> imodels.tree.cart_wrapper.GreedyTreeClassifier |
17,289 | imodels.tree.cart_wrapper | GreedyTreeRegressor | Wrapper around sklearn greedy tree regressor
| class GreedyTreeRegressor(DecisionTreeRegressor):
"""Wrapper around sklearn greedy tree regressor
"""
def fit(self, X, y, feature_names=None, sample_weight=None, check_input=True):
"""Build a decision tree regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (real numbers). Use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : DecisionTreeRegressor
Fitted estimator.
"""
if feature_names is not None:
self.feature_names = feature_names
else:
self.feature_names = ["X" + str(i + 1) for i in range(X.shape[1])]
super().fit(X, y, sample_weight=sample_weight, check_input=check_input)
self._set_complexity()
def _set_complexity(self):
"""Set complexity as number of non-leaf nodes
"""
self.complexity_ = compute_tree_complexity(self.tree_)
def __str__(self):
if hasattr(self, 'feature_names') and self.feature_names is not None:
return 'GreedyTree:\n' + export_text(self, feature_names=self.feature_names, show_weights=True)
else:
return 'GreedyTree:\n' + export_text(self, show_weights=True)
| (*, criterion='squared_error', splitter='best', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=None, random_state=None, max_leaf_nodes=None, min_impurity_decrease=0.0, ccp_alpha=0.0, monotonic_cst=None) |
17,295 | imodels.tree.cart_wrapper | __str__ | null | def __str__(self):
if hasattr(self, 'feature_names') and self.feature_names is not None:
return 'GreedyTree:\n' + export_text(self, feature_names=self.feature_names, show_weights=True)
else:
return 'GreedyTree:\n' + export_text(self, show_weights=True)
| (self) |
17,316 | imodels.tree.cart_wrapper | fit | Build a decision tree regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (real numbers). Use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : DecisionTreeRegressor
Fitted estimator.
| def fit(self, X, y, feature_names=None, sample_weight=None, check_input=True):
"""Build a decision tree regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (real numbers). Use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : DecisionTreeRegressor
Fitted estimator.
"""
if feature_names is not None:
self.feature_names = feature_names
else:
self.feature_names = ["X" + str(i + 1) for i in range(X.shape[1])]
super().fit(X, y, sample_weight=sample_weight, check_input=check_input)
self._set_complexity()
| (self, X, y, feature_names=None, sample_weight=None, check_input=True) |
17,327 | imodels.tree.cart_ccp | HSDecisionTreeCCPClassifierCV | null | class HSDecisionTreeCCPClassifierCV(HSTreeClassifier):
def __init__(self, estimator_: BaseEstimator, reg_param_list: List[float] = [0.1, 1, 10, 50, 100, 500],
desired_complexity: int = 1, cv: int = 3, scoring=None, *args, **kwargs):
super().__init__(estimator_=estimator_, reg_param=None)
self.reg_param_list = np.array(reg_param_list)
self.cv = cv
self.scoring = scoring
self.desired_complexity = desired_complexity
def fit(self, X, y, sample_weight=None, *args, **kwargs):
m = DecisionTreeCCPClassifier(self.estimator_, desired_complexity=self.desired_complexity)
m.fit(X, y, sample_weight, *args, **kwargs)
self.scores_ = []
for reg_param in self.reg_param_list:
est = HSTreeClassifier(deepcopy(m.estimator_), reg_param)
cv_scores = cross_val_score(est, X, y, cv=self.cv, scoring=self.scoring)
self.scores_.append(np.mean(cv_scores))
self.reg_param = self.reg_param_list[np.argmax(self.scores_)]
super().fit(X=X, y=y)
| (estimator_: sklearn.base.BaseEstimator, reg_param_list: List[float] = [0.1, 1, 10, 50, 100, 500], desired_complexity: int = 1, cv: int = 3, scoring=None, *args, **kwargs) |
17,329 | imodels.tree.cart_ccp | __init__ | null | def __init__(self, estimator_: BaseEstimator, reg_param_list: List[float] = [0.1, 1, 10, 50, 100, 500],
desired_complexity: int = 1, cv: int = 3, scoring=None, *args, **kwargs):
super().__init__(estimator_=estimator_, reg_param=None)
self.reg_param_list = np.array(reg_param_list)
self.cv = cv
self.scoring = scoring
self.desired_complexity = desired_complexity
| (self, estimator_: sklearn.base.BaseEstimator, reg_param_list: List[float] = [0.1, 1, 10, 50, 100, 500], desired_complexity: int = 1, cv: int = 3, scoring=None, *args, **kwargs) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.