index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
17,815 | imodels.rule_set.rule_set | RuleSet | null | class RuleSet:
def _extract_rules(self, X, y):
pass
def _score_rules(self, X, y, rules):
pass
def _prune_rules(self, rules):
pass
def _eval_weighted_rule_sum(self, X) -> np.ndarray:
check_is_fitted(self, ['rules_without_feature_names_', 'n_features_', 'feature_placeholders'])
X = check_array(X)
if X.shape[1] != self.n_features_:
raise ValueError("X.shape[1] = %d should be equal to %d, the number of features at training time."
" Please reshape your data."
% (X.shape[1], self.n_features_))
df = pd.DataFrame(X, columns=self.feature_placeholders)
selected_rules = self.rules_without_feature_names_
scores = np.zeros(X.shape[0])
for r in selected_rules:
features_r_uses = list(map(lambda x: x[0], r.agg_dict.keys()))
scores[df[features_r_uses].query(str(r)).index.values] += r.args[0]
return scores
def _get_complexity(self):
check_is_fitted(self, ['rules_without_feature_names_'])
return sum([len(rule.agg_dict) for rule in self.rules_without_feature_names_])
| () |
17,821 | imodels.algebraic.slim | SLIMClassifier | null | class SLIMClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, alpha=1):
'''Model is initialized during fitting
Params
------
alpha: float
weight for sparsity penalty
'''
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
'''fit a logistic model with integer coefficient and L1 regularization.
In case the optimization fails, fit lasso and round coefs.
Params
------
_sample_weight: np.ndarray (n,), optional
weight for each individual sample
'''
X, y = check_X_y(X, y)
check_classification_targets(y)
self.n_features_in_ = X.shape[1]
self.classes_, y = np.unique(y, return_inverse=True) # deals with str inputs
self.model_ = LogisticRegression()
self.model_.classes_ = self.classes_
try:
import cvxpy as cp # package for optimization, import here to make it optional
from cvxpy.error import SolverError
# declare the integer-valued optimization variable
w = cp.Variable(X.shape[1], integer=True)
# set up the minimization problem
logits = -X @ w
residuals = cp.multiply(1 - y, logits) - cp.logistic(logits)
if sample_weight is not None:
residuals = cp.multiply(sample_weight, residuals)
celoss = -cp.sum(residuals)
l1_penalty = self.alpha * cp.norm(w, 1)
obj = cp.Minimize(celoss + l1_penalty)
prob = cp.Problem(obj)
try:
# solve the problem using an appropriate solver
prob.solve()
self.model_.coef_ = np.array([w.value.astype(int)])
self.model_.intercept_ = 0
except SolverError:
warnings.warn("mosek solver required for mixed-integer exponential cone "
"programming. Rounding non-integer coefficients instead")
self._fit_backup(X, y, sample_weight)
except ImportError:
warnings.warn("Should install cvxpy with pip install cvxpy. Rounding non-integer "
"coefficients instead.")
self._fit_backup(X, y, sample_weight)
return self
def _fit_backup(self, X, y, sample_weight=None):
m = LogisticRegression(C=1 / self.alpha)
m.fit(X, y, sample_weight=sample_weight)
self.model_.coef_ = np.round(m.coef_).astype(int)
self.model_.intercept_ = m.intercept_
def predict(self, X):
check_is_fitted(self)
X = check_array(X)
return self.model_.predict(X)
def predict_proba(self, X):
check_is_fitted(self)
X = check_array(X)
return self.model_.predict_proba(X)
| (alpha=1) |
17,823 | imodels.algebraic.slim | __init__ | Model is initialized during fitting
Params
------
alpha: float
weight for sparsity penalty
| def __init__(self, alpha=1):
'''Model is initialized during fitting
Params
------
alpha: float
weight for sparsity penalty
'''
self.alpha = alpha
| (self, alpha=1) |
17,829 | imodels.algebraic.slim | _fit_backup | null | def _fit_backup(self, X, y, sample_weight=None):
m = LogisticRegression(C=1 / self.alpha)
m.fit(X, y, sample_weight=sample_weight)
self.model_.coef_ = np.round(m.coef_).astype(int)
self.model_.intercept_ = m.intercept_
| (self, X, y, sample_weight=None) |
17,838 | imodels.algebraic.slim | fit | fit a logistic model with integer coefficient and L1 regularization.
In case the optimization fails, fit lasso and round coefs.
Params
------
_sample_weight: np.ndarray (n,), optional
weight for each individual sample
| def fit(self, X, y, sample_weight=None):
'''fit a logistic model with integer coefficient and L1 regularization.
In case the optimization fails, fit lasso and round coefs.
Params
------
_sample_weight: np.ndarray (n,), optional
weight for each individual sample
'''
X, y = check_X_y(X, y)
check_classification_targets(y)
self.n_features_in_ = X.shape[1]
self.classes_, y = np.unique(y, return_inverse=True) # deals with str inputs
self.model_ = LogisticRegression()
self.model_.classes_ = self.classes_
try:
import cvxpy as cp # package for optimization, import here to make it optional
from cvxpy.error import SolverError
# declare the integer-valued optimization variable
w = cp.Variable(X.shape[1], integer=True)
# set up the minimization problem
logits = -X @ w
residuals = cp.multiply(1 - y, logits) - cp.logistic(logits)
if sample_weight is not None:
residuals = cp.multiply(sample_weight, residuals)
celoss = -cp.sum(residuals)
l1_penalty = self.alpha * cp.norm(w, 1)
obj = cp.Minimize(celoss + l1_penalty)
prob = cp.Problem(obj)
try:
# solve the problem using an appropriate solver
prob.solve()
self.model_.coef_ = np.array([w.value.astype(int)])
self.model_.intercept_ = 0
except SolverError:
warnings.warn("mosek solver required for mixed-integer exponential cone "
"programming. Rounding non-integer coefficients instead")
self._fit_backup(X, y, sample_weight)
except ImportError:
warnings.warn("Should install cvxpy with pip install cvxpy. Rounding non-integer "
"coefficients instead.")
self._fit_backup(X, y, sample_weight)
return self
| (self, X, y, sample_weight=None) |
17,841 | imodels.algebraic.slim | predict | null | def predict(self, X):
check_is_fitted(self)
X = check_array(X)
return self.model_.predict(X)
| (self, X) |
17,842 | imodels.algebraic.slim | predict_proba | null | def predict_proba(self, X):
check_is_fitted(self)
X = check_array(X)
return self.model_.predict_proba(X)
| (self, X) |
17,847 | imodels.algebraic.slim | SLIMRegressor | Sparse integer linear model
Params
------
alpha: float
weight for sparsity penalty
| class SLIMRegressor(BaseEstimator, RegressorMixin):
'''Sparse integer linear model
Params
------
alpha: float
weight for sparsity penalty
'''
def __init__(self, alpha=0.01):
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
'''fit a linear model with integer coefficient and L1 regularization.
In case the optimization fails, fit lasso and round coefs.
Params
------
_sample_weight: np.ndarray (n,), optional
weight for each individual sample
'''
X, y = check_X_y(X, y)
self.n_features_in_ = X.shape[1]
self.model_ = LinearRegression()
try:
import cvxpy as cp # package for optimization, import here to make it optional
from cvxpy.error import SolverError
# declare the integer-valued optimization variable
w = cp.Variable(X.shape[1], integer=True)
# set up the minimization problem
residuals = X @ w - y
if sample_weight is not None:
residuals = cp.multiply(sample_weight, residuals)
mse = cp.sum_squares(residuals)
l1_penalty = self.alpha * cp.norm(w, 1)
obj = cp.Minimize(mse + l1_penalty)
prob = cp.Problem(obj)
try:
# solve the problem using an appropriate solver
prob.solve()
self.model_.coef_ = w.value.astype(int)
self.model_.intercept_ = 0
except SolverError:
warnings.warn("gurobi, mosek, or cplex solver required for mixed-integer "
"quadratic programming. Rounding non-integer coefficients instead.")
self._fit_backup(X, y, sample_weight)
except ImportError:
warnings.warn("Should install cvxpy with pip install cvxpy. Rounding non-integer "
"coefficients instead.")
self._fit_backup(X, y, sample_weight)
return self
def _fit_backup(self, X, y, sample_weight):
m = Lasso(alpha=self.alpha)
m.fit(X, y, sample_weight=sample_weight)
self.model_.coef_ = np.round(m.coef_).astype(int)
self.model_.intercept_ = m.intercept_
def predict(self, X):
check_is_fitted(self)
X = check_array(X)
return self.model_.predict(X)
| (alpha=0.01) |
17,849 | imodels.algebraic.slim | __init__ | null | def __init__(self, alpha=0.01):
self.alpha = alpha
| (self, alpha=0.01) |
17,855 | imodels.algebraic.slim | _fit_backup | null | def _fit_backup(self, X, y, sample_weight):
m = Lasso(alpha=self.alpha)
m.fit(X, y, sample_weight=sample_weight)
self.model_.coef_ = np.round(m.coef_).astype(int)
self.model_.intercept_ = m.intercept_
| (self, X, y, sample_weight) |
17,864 | imodels.algebraic.slim | fit | fit a linear model with integer coefficient and L1 regularization.
In case the optimization fails, fit lasso and round coefs.
Params
------
_sample_weight: np.ndarray (n,), optional
weight for each individual sample
| def fit(self, X, y, sample_weight=None):
'''fit a linear model with integer coefficient and L1 regularization.
In case the optimization fails, fit lasso and round coefs.
Params
------
_sample_weight: np.ndarray (n,), optional
weight for each individual sample
'''
X, y = check_X_y(X, y)
self.n_features_in_ = X.shape[1]
self.model_ = LinearRegression()
try:
import cvxpy as cp # package for optimization, import here to make it optional
from cvxpy.error import SolverError
# declare the integer-valued optimization variable
w = cp.Variable(X.shape[1], integer=True)
# set up the minimization problem
residuals = X @ w - y
if sample_weight is not None:
residuals = cp.multiply(sample_weight, residuals)
mse = cp.sum_squares(residuals)
l1_penalty = self.alpha * cp.norm(w, 1)
obj = cp.Minimize(mse + l1_penalty)
prob = cp.Problem(obj)
try:
# solve the problem using an appropriate solver
prob.solve()
self.model_.coef_ = w.value.astype(int)
self.model_.intercept_ = 0
except SolverError:
warnings.warn("gurobi, mosek, or cplex solver required for mixed-integer "
"quadratic programming. Rounding non-integer coefficients instead.")
self._fit_backup(X, y, sample_weight)
except ImportError:
warnings.warn("Should install cvxpy with pip install cvxpy. Rounding non-integer "
"coefficients instead.")
self._fit_backup(X, y, sample_weight)
return self
| (self, X, y, sample_weight=None) |
17,872 | imodels.rule_set.skope_rules | SkopeRulesClassifier | An easy-interpretable classifier optimizing simple logical rules.
Parameters
----------
feature_names : list of str, optional
The names of each feature to be used for returning rules in string
format.
precision_min : float, optional (default=0.5)
The minimal precision of a rule to be selected.
recall_min : float, optional (default=0.01)
The minimal recall of a rule to be selected.
n_estimators : int, optional (default=10)
The number of base estimators (rules) to use for prediction. More are
built before selection. All are available in the estimators_ attribute.
max_samples : int or float, optional (default=.8)
The number of samples to draw from X to train each decision tree, from
which rules are generated and selected.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
If max_samples is larger than the number of samples provided,
all samples will be used for all trees (no sampling).
max_samples_features : int or float, optional (default=1.0)
The number of features to draw from X to train each decision tree, from
which rules are generated and selected.
- If int, then draw `max_features` features.
- If float, then draw `max_features * X.shape[1]` features.
bootstrap : boolean, optional (default=False)
Whether samples are drawn with replacement.
bootstrap_features : boolean, optional (default=False)
Whether features are drawn with replacement.
max_depth : integer or List or None, optional (default=3)
The maximum depth of the decision trees. If None, then nodes are
expanded until all leaves are pure or until all leaves contain less
than min_samples_split samples.
If an iterable is passed, you will train n_estimators
for each tree depth. It allows you to create and compare
rules of different length.
max_depth_duplication : integer, optional (default=None)
The maximum depth of the decision tree for rule deduplication,
if None then no deduplication occurs.
max_features : int, float, string or None, optional (default="auto")
The number of features considered (by each decision tree) when looking
for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node for
each decision tree.
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional
- If int, random_state is the seed used by the random number generator.
- If RandomState instance, random_state is the random number generator.
- If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
Attributes
----------
rules_ : dict of tuples (rule, precision, recall, nb).
The collection of `n_estimators` rules used in the ``predict`` method.
The rules are generated by fitted sub-estimators (decision trees). Each
rule satisfies recall_min and precision_min conditions. The selection
is done according to OOB precisions.
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators used to generate candidate
rules.
estimators_samples_ : list of arrays
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator.
estimators_features_ : list of arrays
The subset of drawn features for each base estimator.
max_samples_ : integer
The actual number of samples
n_features_ : integer
The number of features when ``fit`` is performed.
classes_ : array, shape (n_classes,)
The classes labels.
| class SkopeRulesClassifier(BaseEstimator, RuleSet, ClassifierMixin):
"""An easy-interpretable classifier optimizing simple logical rules.
Parameters
----------
feature_names : list of str, optional
The names of each feature to be used for returning rules in string
format.
precision_min : float, optional (default=0.5)
The minimal precision of a rule to be selected.
recall_min : float, optional (default=0.01)
The minimal recall of a rule to be selected.
n_estimators : int, optional (default=10)
The number of base estimators (rules) to use for prediction. More are
built before selection. All are available in the estimators_ attribute.
max_samples : int or float, optional (default=.8)
The number of samples to draw from X to train each decision tree, from
which rules are generated and selected.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
If max_samples is larger than the number of samples provided,
all samples will be used for all trees (no sampling).
max_samples_features : int or float, optional (default=1.0)
The number of features to draw from X to train each decision tree, from
which rules are generated and selected.
- If int, then draw `max_features` features.
- If float, then draw `max_features * X.shape[1]` features.
bootstrap : boolean, optional (default=False)
Whether samples are drawn with replacement.
bootstrap_features : boolean, optional (default=False)
Whether features are drawn with replacement.
max_depth : integer or List or None, optional (default=3)
The maximum depth of the decision trees. If None, then nodes are
expanded until all leaves are pure or until all leaves contain less
than min_samples_split samples.
If an iterable is passed, you will train n_estimators
for each tree depth. It allows you to create and compare
rules of different length.
max_depth_duplication : integer, optional (default=None)
The maximum depth of the decision tree for rule deduplication,
if None then no deduplication occurs.
max_features : int, float, string or None, optional (default="auto")
The number of features considered (by each decision tree) when looking
for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node for
each decision tree.
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional
- If int, random_state is the seed used by the random number generator.
- If RandomState instance, random_state is the random number generator.
- If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
Attributes
----------
rules_ : dict of tuples (rule, precision, recall, nb).
The collection of `n_estimators` rules used in the ``predict`` method.
The rules are generated by fitted sub-estimators (decision trees). Each
rule satisfies recall_min and precision_min conditions. The selection
is done according to OOB precisions.
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators used to generate candidate
rules.
estimators_samples_ : list of arrays
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator.
estimators_features_ : list of arrays
The subset of drawn features for each base estimator.
max_samples_ : integer
The actual number of samples
n_features_ : integer
The number of features when ``fit`` is performed.
classes_ : array, shape (n_classes,)
The classes labels.
"""
def __init__(self,
precision_min=0.5,
recall_min=0.01,
n_estimators=10,
max_samples=.8,
max_samples_features=.8,
bootstrap=False,
bootstrap_features=False,
max_depth=3,
max_depth_duplication=None,
max_features=1.,
min_samples_split=2,
n_jobs=1,
random_state=None,
verbose=0):
self.precision_min = precision_min
self.recall_min = recall_min
self.n_estimators = n_estimators
self.max_samples = max_samples
self.max_samples_features = max_samples_features
self.bootstrap = bootstrap
self.bootstrap_features = bootstrap_features
self.max_depth = max_depth
self.max_depth_duplication = max_depth_duplication
self.max_features = max_features
self.min_samples_split = min_samples_split
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
def fit(self, X, y, feature_names=None, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X. Has to follow the convention 0 for
normal data, 1 for anomalies.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples, typically
the amount in case of transactions data. Used to grow regression
trees producing further rules to be tested.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
X, y, feature_names = check_fit_arguments(self, X, y, feature_names)
check_classification_targets(y)
self.n_features_ = X.shape[1]
self.sample_weight = sample_weight
self.classes_ = unique_labels(y)
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError(
"This method needs samples of at least 2 classes in the data, but the data contains only one class: %r"
% self.classes_[0]
)
if not isinstance(self.max_depth_duplication, int) and self.max_depth_duplication is not None:
raise ValueError("max_depth_duplication should be an integer")
if not set(self.classes_) == {0, 1}:
warn(
"Found labels %s. This method assumes target class to be labeled as 1 and normal data to be labeled as "
"0. Any label different from 0 will be considered as being from the target class."
% set(self.classes_)
)
y = (y > 0)
# ensure that max_samples is in [1, n_samples]:
n_samples = X.shape[0]
if isinstance(self.max_samples, six.string_types):
raise ValueError(
'max_samples (%s) is not supported. Valid choices are: "auto", int or float'
% self.max_samples
)
elif isinstance(self.max_samples, INTEGER_TYPES):
if self.max_samples > n_samples:
warn(
"max_samples (%s) is greater than the total number of samples (%s). max_samples will be set "
"to n_samples for estimation."
% (self.max_samples, n_samples)
)
max_samples = n_samples
else:
max_samples = self.max_samples
else: # float
if not (0. < self.max_samples <= 1.):
raise ValueError("max_samples must be in (0, 1], got %r" % self.max_samples)
max_samples = int(self.max_samples * X.shape[0])
self.max_samples_ = max_samples
self.feature_dict_ = get_feature_dict(X.shape[1], feature_names)
self.feature_placeholders = np.array(list(self.feature_dict_.keys()))
self.feature_names = np.array(list(self.feature_dict_.values()))
extracted_rules, self.estimators_samples_, self.estimators_features_ = self._extract_rules(X, y)
scored_rules = self._score_rules(X, y, extracted_rules)
self.rules_ = self._prune_rules(scored_rules)
self.rules_without_feature_names_ = self.rules_
self.rules_ = [
replace_feature_name(rule, self.feature_dict_) for rule in self.rules_
]
self.complexity_ = self._get_complexity()
return self
def predict(self, X) -> np.ndarray:
"""Predict if a particular sample is an outlier or not.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32``
Returns
-------
is_outlier : array, shape (n_samples,)
For each observations, tells whether or not (1 or 0) it should
be considered as an outlier according to the selected rules.
"""
X = check_array(X)
return np.argmax(self.predict_proba(X), axis=1)
def predict_proba(self, X) -> np.ndarray:
'''Predict probability of a particular sample being an outlier or not
'''
X = check_array(X)
weight_sum = np.sum([w[0] for (r, w) in self.rules_without_feature_names_])
if weight_sum == 0:
return np.vstack((np.ones(X.shape[0]), np.zeros(X.shape[0]))).transpose()
y = self._eval_weighted_rule_sum(X) / weight_sum
return np.vstack((1 - y, y)).transpose()
def _rules_vote(self, X) -> np.ndarray:
"""Score representing a vote of the base classifiers (rules).
The score of an input sample is computed as the sum of the binary
rules outputs: a score of k means than k rules have voted positively.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The training input samples.
Returns
-------
scores : array, shape (n_samples,)
The score of the input samples.
The higher, the more abnormal. Positive scores represent outliers,
null scores represent inliers.
"""
# Check if fit had been called
check_is_fitted(self, ['rules_', 'estimators_samples_', 'max_samples_'])
# Input validation
X = check_array(X)
if X.shape[1] != self.n_features_:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of features at training time."
" Please reshape your data."
% (X.shape[1], self.n_features_))
df = pandas.DataFrame(X, columns=self.feature_placeholders)
selected_rules = self.rules_without_feature_names_
scores = np.zeros(X.shape[0])
for (r, _) in selected_rules:
scores[list(df.query(r).index)] += 1
return scores
def _score_top_rules(self, X) -> np.ndarray:
"""Score representing an ordering between the base classifiers (rules).
The score is high when the instance is detected by a performing rule.
If there are n rules, ordered by increasing OOB precision, a score of k
means than the kth rule has voted positively, but not the (k-1) first
rules.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The training input samples.
Returns
-------
scores : array, shape (n_samples,)
The score of the input samples.
Positive scores represent outliers, null scores represent inliers.
"""
# Check if fit had been called
check_is_fitted(self, ['rules_', 'estimators_samples_', 'max_samples_'])
# Input validation
X = check_array(X)
if X.shape[1] != self.n_features_:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of features at training time."
" Please reshape your data."
% (X.shape[1], self.n_features_))
df = pandas.DataFrame(X, columns=self.feature_placeholders)
selected_rules = self.rules_without_feature_names_
scores = np.zeros(X.shape[0])
for (k, r) in enumerate(list((selected_rules))):
scores[list(df.query(r.rule).index)] = np.maximum(
len(selected_rules) - k,
scores[list(df.query(r.rule).index)])
return scores
def _predict_top_rules(self, X, n_rules) -> np.ndarray:
"""Predict if a particular sample is an outlier or not,
using the n_rules most performing rules.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32``
n_rules : int
The number of rules used for the prediction. If one of the
n_rules most performing rules is activated, the prediction
is equal to 1.
Returns
-------
is_outlier : array, shape (n_samples,)
For each observations, tells whether or not (1 or 0) it should
be considered as an outlier according to the selected rules.
"""
return np.array((self._score_top_rules(X) > len(self.rules_) - n_rules),
dtype=int)
def _extract_rules(self, X, y) -> Tuple[List[str], List[np.array], List[np.array]]:
return extract_skope(X, y,
feature_names=self.feature_placeholders,
sample_weight=self.sample_weight,
n_estimators=self.n_estimators,
max_samples=self.max_samples_,
max_samples_features=self.max_samples_features,
bootstrap=self.bootstrap,
bootstrap_features=self.bootstrap_features,
max_depths=self.max_depth,
max_features=self.max_features,
min_samples_split=self.min_samples_split,
n_jobs=self.n_jobs,
random_state=self.random_state,
verbose=self.verbose)
def _score_rules(self, X, y, rules) -> List[Rule]:
return score_precision_recall(X, y, rules, self.estimators_samples_, self.estimators_features_, self.feature_placeholders)
def _prune_rules(self, rules) -> List[Rule]:
return deduplicate(
prune_mins(rules, self.precision_min, self.recall_min),
self.max_depth_duplication
)
| (precision_min=0.5, recall_min=0.01, n_estimators=10, max_samples=0.8, max_samples_features=0.8, bootstrap=False, bootstrap_features=False, max_depth=3, max_depth_duplication=None, max_features=1.0, min_samples_split=2, n_jobs=1, random_state=None, verbose=0) |
17,874 | imodels.rule_set.skope_rules | __init__ | null | def __init__(self,
precision_min=0.5,
recall_min=0.01,
n_estimators=10,
max_samples=.8,
max_samples_features=.8,
bootstrap=False,
bootstrap_features=False,
max_depth=3,
max_depth_duplication=None,
max_features=1.,
min_samples_split=2,
n_jobs=1,
random_state=None,
verbose=0):
self.precision_min = precision_min
self.recall_min = recall_min
self.n_estimators = n_estimators
self.max_samples = max_samples
self.max_samples_features = max_samples_features
self.bootstrap = bootstrap
self.bootstrap_features = bootstrap_features
self.max_depth = max_depth
self.max_depth_duplication = max_depth_duplication
self.max_features = max_features
self.min_samples_split = min_samples_split
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
| (self, precision_min=0.5, recall_min=0.01, n_estimators=10, max_samples=0.8, max_samples_features=0.8, bootstrap=False, bootstrap_features=False, max_depth=3, max_depth_duplication=None, max_features=1.0, min_samples_split=2, n_jobs=1, random_state=None, verbose=0) |
17,881 | imodels.rule_set.skope_rules | _extract_rules | null | def _extract_rules(self, X, y) -> Tuple[List[str], List[np.array], List[np.array]]:
return extract_skope(X, y,
feature_names=self.feature_placeholders,
sample_weight=self.sample_weight,
n_estimators=self.n_estimators,
max_samples=self.max_samples_,
max_samples_features=self.max_samples_features,
bootstrap=self.bootstrap,
bootstrap_features=self.bootstrap_features,
max_depths=self.max_depth,
max_features=self.max_features,
min_samples_split=self.min_samples_split,
n_jobs=self.n_jobs,
random_state=self.random_state,
verbose=self.verbose)
| (self, X, y) -> Tuple[List[str], List[<built-in function array>], List[<built-in function array>]] |
17,892 | imodels.rule_set.skope_rules | _score_rules | null | def _score_rules(self, X, y, rules) -> List[Rule]:
return score_precision_recall(X, y, rules, self.estimators_samples_, self.estimators_features_, self.feature_placeholders)
| (self, X, y, rules) -> List[imodels.util.rule.Rule] |
17,896 | imodels.rule_set.skope_rules | fit | Fit the model according to the given training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X. Has to follow the convention 0 for
normal data, 1 for anomalies.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples, typically
the amount in case of transactions data. Used to grow regression
trees producing further rules to be tested.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
Returns self.
| def fit(self, X, y, feature_names=None, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X. Has to follow the convention 0 for
normal data, 1 for anomalies.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples, typically
the amount in case of transactions data. Used to grow regression
trees producing further rules to be tested.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
X, y, feature_names = check_fit_arguments(self, X, y, feature_names)
check_classification_targets(y)
self.n_features_ = X.shape[1]
self.sample_weight = sample_weight
self.classes_ = unique_labels(y)
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError(
"This method needs samples of at least 2 classes in the data, but the data contains only one class: %r"
% self.classes_[0]
)
if not isinstance(self.max_depth_duplication, int) and self.max_depth_duplication is not None:
raise ValueError("max_depth_duplication should be an integer")
if not set(self.classes_) == {0, 1}:
warn(
"Found labels %s. This method assumes target class to be labeled as 1 and normal data to be labeled as "
"0. Any label different from 0 will be considered as being from the target class."
% set(self.classes_)
)
y = (y > 0)
# ensure that max_samples is in [1, n_samples]:
n_samples = X.shape[0]
if isinstance(self.max_samples, six.string_types):
raise ValueError(
'max_samples (%s) is not supported. Valid choices are: "auto", int or float'
% self.max_samples
)
elif isinstance(self.max_samples, INTEGER_TYPES):
if self.max_samples > n_samples:
warn(
"max_samples (%s) is greater than the total number of samples (%s). max_samples will be set "
"to n_samples for estimation."
% (self.max_samples, n_samples)
)
max_samples = n_samples
else:
max_samples = self.max_samples
else: # float
if not (0. < self.max_samples <= 1.):
raise ValueError("max_samples must be in (0, 1], got %r" % self.max_samples)
max_samples = int(self.max_samples * X.shape[0])
self.max_samples_ = max_samples
self.feature_dict_ = get_feature_dict(X.shape[1], feature_names)
self.feature_placeholders = np.array(list(self.feature_dict_.keys()))
self.feature_names = np.array(list(self.feature_dict_.values()))
extracted_rules, self.estimators_samples_, self.estimators_features_ = self._extract_rules(X, y)
scored_rules = self._score_rules(X, y, extracted_rules)
self.rules_ = self._prune_rules(scored_rules)
self.rules_without_feature_names_ = self.rules_
self.rules_ = [
replace_feature_name(rule, self.feature_dict_) for rule in self.rules_
]
self.complexity_ = self._get_complexity()
return self
| (self, X, y, feature_names=None, sample_weight=None) |
17,902 | sklearn.utils._metadata_requests | set_fit_request | Request metadata passed to the ``fit`` method.
Note that this method is only relevant if
``enable_metadata_routing=True`` (see :func:`sklearn.set_config`).
Please see :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
The options for each parameter are:
- ``True``: metadata is requested, and passed to ``fit`` if provided. The request is ignored if metadata is not provided.
- ``False``: metadata is not requested and the meta-estimator will not pass it to ``fit``.
- ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it.
- ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name.
The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the
existing request. This allows you to change the request for some
parameters and not others.
.. versionadded:: 1.3
.. note::
This method is only relevant if this estimator is used as a
sub-estimator of a meta-estimator, e.g. used inside a
:class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect.
Parameters
----------
feature_names : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``feature_names`` parameter in ``fit``.
sample_weight : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``sample_weight`` parameter in ``fit``.
Returns
-------
self : object
The updated object.
| def __get__(self, instance, owner):
# we would want to have a method which accepts only the expected args
def func(**kw):
"""Updates the request for provided parameters
This docstring is overwritten below.
See REQUESTER_DOC for expected functionality
"""
if not _routing_enabled():
raise RuntimeError(
"This method is only available when metadata routing is enabled."
" You can enable it using"
" sklearn.set_config(enable_metadata_routing=True)."
)
if self.validate_keys and (set(kw) - set(self.keys)):
raise TypeError(
f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments"
f" are: {set(self.keys)}"
)
requests = instance._get_metadata_request()
method_metadata_request = getattr(requests, self.name)
for prop, alias in kw.items():
if alias is not UNCHANGED:
method_metadata_request.add_request(param=prop, alias=alias)
instance._metadata_request = requests
return instance
# Now we set the relevant attributes of the function so that it seems
# like a normal method to the end user, with known expected arguments.
func.__name__ = f"set_{self.name}_request"
params = [
inspect.Parameter(
name="self",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=owner,
)
]
params.extend(
[
inspect.Parameter(
k,
inspect.Parameter.KEYWORD_ONLY,
default=UNCHANGED,
annotation=Optional[Union[bool, None, str]],
)
for k in self.keys
]
)
func.__signature__ = inspect.Signature(
params,
return_annotation=owner,
)
doc = REQUESTER_DOC.format(method=self.name)
for metadata in self.keys:
doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name)
doc += REQUESTER_DOC_RETURN
func.__doc__ = doc
return func
| (self: imodels.rule_set.skope_rules.SkopeRulesClassifier, *, feature_names: Union[bool, NoneType, str] = '$UNCHANGED$', sample_weight: Union[bool, NoneType, str] = '$UNCHANGED$') -> imodels.rule_set.skope_rules.SkopeRulesClassifier |
17,905 | imodels.rule_set.slipper_util | SlipperBaseEstimator | An estimator that supports building rules as described in
A Simple, Fast, and Effective Rule Learner (1999). Intended to be used
as part of the SlipperRulesClassifier.
| class SlipperBaseEstimator(BaseEstimator, ClassifierMixin):
""" An estimator that supports building rules as described in
A Simple, Fast, and Effective Rule Learner (1999). Intended to be used
as part of the SlipperRulesClassifier.
"""
def __init__(self):
self.Z = None
self.rule = None
self.D = None
def _make_candidate(self, X, y, curr_rule, feat, A_c):
""" Make candidate rules for grow routine to compare scores"""
# make candidate rules
candidates = [curr_rule.copy() for _ in range(3)]
candidates = [
x + [{
'feature': int(feat),
'operator': operator,
'pivot': float(A_c)}]
for x, operator in zip(candidates, ['>', '<', '=='])
]
# pick best condition
Zs = [self._grow_rule_obj(X, y, r) for r in candidates]
return candidates[Zs.index(max(Zs))]
def _condition_classify(self, X, condition):
"""
Helper function to make classifications for a condition
in a rule
"""
logic = 'X[:, {}] {} {}'.format(
condition['feature'],
condition['operator'],
condition['pivot']
)
output = np.where(eval(logic))
return output[0]
def _rule_predict(self, X, rule):
""" return all indices for which the passed rule holds on X """
preds = np.zeros(X.shape[0])
positive_cases = set(range(X.shape[0]))
for condition in rule:
outputs = set(list(self._condition_classify(X, condition)))
positive_cases = positive_cases.intersection(outputs)
preds[list(positive_cases)] = 1
return preds
def _get_design_matrices(self, X, y, rule):
""" produce design matrices used in most equations"""
preds = self._rule_predict(X, rule)
W_plus_idx = np.where((preds == 1) & (y == 1))
W_minus_idx = np.where((preds == 1) & (y == 0))
return np.sum(self.D[W_plus_idx]), np.sum(self.D[W_minus_idx])
def _grow_rule_obj(self, X, y, rule):
""" equation to maximize in growing rule
equation 6 from Cohen & Singer (1999)
"""
W_plus, W_minus = self._get_design_matrices(X, y, rule)
# C_R = self._sample_weight(W_plus, W_minus)
return np.sqrt(W_plus) - np.sqrt(W_minus)
def _sample_weight(self, plus, minus):
""" Calculate learner sample weight
in paper this is C_R, which is confidence of learner
"""
return 0.5 * np.log((plus + (1 / (2 * len(self.D)))) /
(minus + 1 / (2 * len(self.D))))
def _grow_rule(self, X, y):
""" Starts with empty conjunction of conditions and
greedily adds rules to maximize Z_tilde
"""
stop_condition = False
features = list(range(X.shape[1]))
# rule is stored as a list of dictionaries, each dictionary is a condition
curr_rule = []
while not stop_condition:
candidate_rule = curr_rule.copy()
for feat in features:
try:
pivots = np.percentile(X[:, feat], range(0, 100, 4),
method='linear')
except:
pivots = np.percentile(X[:, feat], range(0, 100, 4), # deprecated
interpolation='midpoint')
# get a list of possible rules
feature_candidates = [
self._make_candidate(X, y, curr_rule, feat, A_c)
for A_c in pivots
]
# get max Z_tilde and update candidate accordingly
tildes = [self._grow_rule_obj(X, y, r) for r in feature_candidates]
if max(tildes) > self._grow_rule_obj(X, y, candidate_rule):
candidate_rule = feature_candidates[
tildes.index(max(tildes))
]
preds = self._rule_predict(X, candidate_rule)
negative_coverage = np.where((preds == y) & (y == 0))
if self._grow_rule_obj(X, y, curr_rule) >= self._grow_rule_obj(X, y, candidate_rule) or \
len(negative_coverage) == 0:
stop_condition = True
else:
curr_rule = candidate_rule.copy()
return curr_rule
def _prune_rule(self, X, y, rule):
""" Remove conditions from greedily built rule until
objective does not improve
"""
stop_condition = False
curr_rule = rule.copy()
while not stop_condition:
candidate_rules = []
if len(curr_rule) == 1:
return curr_rule
candidate_rules = [
self._pop_condition(curr_rule, condition)
for condition in curr_rule
]
prune_objs = [self._prune_rule_obj(X, y, rule) for x in candidate_rules]
best_prune = candidate_rules[
prune_objs.index(min(prune_objs))
]
if self._prune_rule_obj(X, y, rule) > self._prune_rule_obj(X, y, rule):
curr_rule = best_prune.copy()
else:
stop_condition = True
return curr_rule
def _pop_condition(self, rule, condition):
"""
Remove a condition from an existing Rule object
"""
temp = rule.copy()
temp.remove(condition)
return temp
def _make_default_rule(self, X, y):
"""
Make the default rule that is true for every observation
of data set. Without default rule a SlipperBaseEstimator would never
predict negative
"""
default_rule = []
features = random.choices(
range(X.shape[1]),
k=random.randint(2, 8)
)
default_rule.append({
'feature': str(features[0]),
'operator': '>',
'pivot': str(min(X[:, features[0]]))
})
for i, x in enumerate(features):
if i % 2:
default_rule.append({
'feature': x,
'operator': '<',
'pivot': str(max(X[:, x]))
})
else:
default_rule.append({
'feature': x,
'operator': '>',
'pivot': str(min(X[:, x]))
})
return default_rule
def _prune_rule_obj(self, X, y, rule):
"""
objective function for prune rule routine
eq 7 from Cohen & Singer (1999)
"""
V_plus, V_minus = self._get_design_matrices(X, y, rule)
C_R = self._sample_weight(V_plus, V_minus)
return (1 - V_plus - V_minus) + V_plus * np.exp(-C_R) \
+ V_minus * np.exp(C_R)
def _eq_5(self, X, y, rule):
"""
equation 5 from Cohen & Singer (1999)
used to compare the learned rule with a default rule
"""
W_plus, W_minus = self._get_design_matrices(X, y, rule)
return 1 - np.square(np.sqrt(W_plus) - np.sqrt(W_minus))
def _set_rule_or_default(self, X, y, learned_rule):
"""
Compare output of eq 5 between learned rule and default rule
return rule that minimizes eq 5
"""
rules = [self._make_default_rule(X, y), learned_rule]
scores = [self._eq_5(X, y, rule) for rule in rules]
self.rule = rules[scores.index(min(scores))]
def _make_feature_dict(self, num_features, features):
"""
Map features to place holder names
"""
if features is None:
new_feats = ['X_' + str(i) for i in range(num_features)]
else:
new_feats = features
self.feature_dict = {
old_feat: new_feat for old_feat, new_feat in enumerate(new_feats)
}
def predict_proba(self, X):
proba = self.predict(X)
proba = proba.reshape(-1, 1)
proba = np.hstack([
np.zeros(proba.shape), proba
])
return proba
def predict(self, X):
"""
external predict function that returns predictions
using estimators rule
"""
return self._rule_predict(X, self.rule)
def fit(self, X, y, sample_weight=None, feature_names=None):
"""
Main loop for training
"""
X, y, feature_names = check_fit_arguments(self, X, y, feature_names)
if sample_weight is not None:
self.D = sample_weight
X_grow, X_prune, y_grow, y_prune = \
train_test_split(X, y, test_size=0.33)
self._make_feature_dict(X.shape[1], feature_names)
rule = self._grow_rule(X_grow, y_grow)
rule = self._prune_rule(X_prune, y_prune, rule)
self._set_rule_or_default(X, y, rule)
return self
| () |
17,907 | imodels.rule_set.slipper_util | __init__ | null | def __init__(self):
self.Z = None
self.rule = None
self.D = None
| (self) |
17,913 | imodels.rule_set.slipper_util | _condition_classify |
Helper function to make classifications for a condition
in a rule
| def _condition_classify(self, X, condition):
"""
Helper function to make classifications for a condition
in a rule
"""
logic = 'X[:, {}] {} {}'.format(
condition['feature'],
condition['operator'],
condition['pivot']
)
output = np.where(eval(logic))
return output[0]
| (self, X, condition) |
17,914 | imodels.rule_set.slipper_util | _eq_5 |
equation 5 from Cohen & Singer (1999)
used to compare the learned rule with a default rule
| def _eq_5(self, X, y, rule):
"""
equation 5 from Cohen & Singer (1999)
used to compare the learned rule with a default rule
"""
W_plus, W_minus = self._get_design_matrices(X, y, rule)
return 1 - np.square(np.sqrt(W_plus) - np.sqrt(W_minus))
| (self, X, y, rule) |
17,915 | imodels.rule_set.slipper_util | _get_design_matrices | produce design matrices used in most equations | def _get_design_matrices(self, X, y, rule):
""" produce design matrices used in most equations"""
preds = self._rule_predict(X, rule)
W_plus_idx = np.where((preds == 1) & (y == 1))
W_minus_idx = np.where((preds == 1) & (y == 0))
return np.sum(self.D[W_plus_idx]), np.sum(self.D[W_minus_idx])
| (self, X, y, rule) |
17,919 | imodels.rule_set.slipper_util | _grow_rule | Starts with empty conjunction of conditions and
greedily adds rules to maximize Z_tilde
| def _grow_rule(self, X, y):
""" Starts with empty conjunction of conditions and
greedily adds rules to maximize Z_tilde
"""
stop_condition = False
features = list(range(X.shape[1]))
# rule is stored as a list of dictionaries, each dictionary is a condition
curr_rule = []
while not stop_condition:
candidate_rule = curr_rule.copy()
for feat in features:
try:
pivots = np.percentile(X[:, feat], range(0, 100, 4),
method='linear')
except:
pivots = np.percentile(X[:, feat], range(0, 100, 4), # deprecated
interpolation='midpoint')
# get a list of possible rules
feature_candidates = [
self._make_candidate(X, y, curr_rule, feat, A_c)
for A_c in pivots
]
# get max Z_tilde and update candidate accordingly
tildes = [self._grow_rule_obj(X, y, r) for r in feature_candidates]
if max(tildes) > self._grow_rule_obj(X, y, candidate_rule):
candidate_rule = feature_candidates[
tildes.index(max(tildes))
]
preds = self._rule_predict(X, candidate_rule)
negative_coverage = np.where((preds == y) & (y == 0))
if self._grow_rule_obj(X, y, curr_rule) >= self._grow_rule_obj(X, y, candidate_rule) or \
len(negative_coverage) == 0:
stop_condition = True
else:
curr_rule = candidate_rule.copy()
return curr_rule
| (self, X, y) |
17,920 | imodels.rule_set.slipper_util | _grow_rule_obj | equation to maximize in growing rule
equation 6 from Cohen & Singer (1999)
| def _grow_rule_obj(self, X, y, rule):
""" equation to maximize in growing rule
equation 6 from Cohen & Singer (1999)
"""
W_plus, W_minus = self._get_design_matrices(X, y, rule)
# C_R = self._sample_weight(W_plus, W_minus)
return np.sqrt(W_plus) - np.sqrt(W_minus)
| (self, X, y, rule) |
17,921 | imodels.rule_set.slipper_util | _make_candidate | Make candidate rules for grow routine to compare scores | def _make_candidate(self, X, y, curr_rule, feat, A_c):
""" Make candidate rules for grow routine to compare scores"""
# make candidate rules
candidates = [curr_rule.copy() for _ in range(3)]
candidates = [
x + [{
'feature': int(feat),
'operator': operator,
'pivot': float(A_c)}]
for x, operator in zip(candidates, ['>', '<', '=='])
]
# pick best condition
Zs = [self._grow_rule_obj(X, y, r) for r in candidates]
return candidates[Zs.index(max(Zs))]
| (self, X, y, curr_rule, feat, A_c) |
17,922 | imodels.rule_set.slipper_util | _make_default_rule |
Make the default rule that is true for every observation
of data set. Without default rule a SlipperBaseEstimator would never
predict negative
| def _make_default_rule(self, X, y):
"""
Make the default rule that is true for every observation
of data set. Without default rule a SlipperBaseEstimator would never
predict negative
"""
default_rule = []
features = random.choices(
range(X.shape[1]),
k=random.randint(2, 8)
)
default_rule.append({
'feature': str(features[0]),
'operator': '>',
'pivot': str(min(X[:, features[0]]))
})
for i, x in enumerate(features):
if i % 2:
default_rule.append({
'feature': x,
'operator': '<',
'pivot': str(max(X[:, x]))
})
else:
default_rule.append({
'feature': x,
'operator': '>',
'pivot': str(min(X[:, x]))
})
return default_rule
| (self, X, y) |
17,923 | imodels.rule_set.slipper_util | _make_feature_dict |
Map features to place holder names
| def _make_feature_dict(self, num_features, features):
"""
Map features to place holder names
"""
if features is None:
new_feats = ['X_' + str(i) for i in range(num_features)]
else:
new_feats = features
self.feature_dict = {
old_feat: new_feat for old_feat, new_feat in enumerate(new_feats)
}
| (self, num_features, features) |
17,925 | imodels.rule_set.slipper_util | _pop_condition |
Remove a condition from an existing Rule object
| def _pop_condition(self, rule, condition):
"""
Remove a condition from an existing Rule object
"""
temp = rule.copy()
temp.remove(condition)
return temp
| (self, rule, condition) |
17,926 | imodels.rule_set.slipper_util | _prune_rule | Remove conditions from greedily built rule until
objective does not improve
| def _prune_rule(self, X, y, rule):
""" Remove conditions from greedily built rule until
objective does not improve
"""
stop_condition = False
curr_rule = rule.copy()
while not stop_condition:
candidate_rules = []
if len(curr_rule) == 1:
return curr_rule
candidate_rules = [
self._pop_condition(curr_rule, condition)
for condition in curr_rule
]
prune_objs = [self._prune_rule_obj(X, y, rule) for x in candidate_rules]
best_prune = candidate_rules[
prune_objs.index(min(prune_objs))
]
if self._prune_rule_obj(X, y, rule) > self._prune_rule_obj(X, y, rule):
curr_rule = best_prune.copy()
else:
stop_condition = True
return curr_rule
| (self, X, y, rule) |
17,927 | imodels.rule_set.slipper_util | _prune_rule_obj |
objective function for prune rule routine
eq 7 from Cohen & Singer (1999)
| def _prune_rule_obj(self, X, y, rule):
"""
objective function for prune rule routine
eq 7 from Cohen & Singer (1999)
"""
V_plus, V_minus = self._get_design_matrices(X, y, rule)
C_R = self._sample_weight(V_plus, V_minus)
return (1 - V_plus - V_minus) + V_plus * np.exp(-C_R) \
+ V_minus * np.exp(C_R)
| (self, X, y, rule) |
17,930 | imodels.rule_set.slipper_util | _rule_predict | return all indices for which the passed rule holds on X | def _rule_predict(self, X, rule):
""" return all indices for which the passed rule holds on X """
preds = np.zeros(X.shape[0])
positive_cases = set(range(X.shape[0]))
for condition in rule:
outputs = set(list(self._condition_classify(X, condition)))
positive_cases = positive_cases.intersection(outputs)
preds[list(positive_cases)] = 1
return preds
| (self, X, rule) |
17,931 | imodels.rule_set.slipper_util | _sample_weight | Calculate learner sample weight
in paper this is C_R, which is confidence of learner
| def _sample_weight(self, plus, minus):
""" Calculate learner sample weight
in paper this is C_R, which is confidence of learner
"""
return 0.5 * np.log((plus + (1 / (2 * len(self.D)))) /
(minus + 1 / (2 * len(self.D))))
| (self, plus, minus) |
17,932 | imodels.rule_set.slipper_util | _set_rule_or_default |
Compare output of eq 5 between learned rule and default rule
return rule that minimizes eq 5
| def _set_rule_or_default(self, X, y, learned_rule):
"""
Compare output of eq 5 between learned rule and default rule
return rule that minimizes eq 5
"""
rules = [self._make_default_rule(X, y), learned_rule]
scores = [self._eq_5(X, y, rule) for rule in rules]
self.rule = rules[scores.index(min(scores))]
| (self, X, y, learned_rule) |
17,935 | imodels.rule_set.slipper_util | fit |
Main loop for training
| def fit(self, X, y, sample_weight=None, feature_names=None):
"""
Main loop for training
"""
X, y, feature_names = check_fit_arguments(self, X, y, feature_names)
if sample_weight is not None:
self.D = sample_weight
X_grow, X_prune, y_grow, y_prune = \
train_test_split(X, y, test_size=0.33)
self._make_feature_dict(X.shape[1], feature_names)
rule = self._grow_rule(X_grow, y_grow)
rule = self._prune_rule(X_prune, y_prune, rule)
self._set_rule_or_default(X, y, rule)
return self
| (self, X, y, sample_weight=None, feature_names=None) |
17,938 | imodels.rule_set.slipper_util | predict |
external predict function that returns predictions
using estimators rule
| def predict(self, X):
"""
external predict function that returns predictions
using estimators rule
"""
return self._rule_predict(X, self.rule)
| (self, X) |
17,939 | imodels.rule_set.slipper_util | predict_proba | null | def predict_proba(self, X):
proba = self.predict(X)
proba = proba.reshape(-1, 1)
proba = np.hstack([
np.zeros(proba.shape), proba
])
return proba
| (self, X) |
17,944 | imodels.rule_set.slipper | SlipperClassifier | null | class SlipperClassifier(BoostedRulesClassifier):
def __init__(self, n_estimators=10, **kwargs):
'''
An estimator that supports building rules as described in
A Simple, Fast, and Effective Rule Learner (1999).
Parameters
----------
n_estimators
'''
super().__init__(estimator=SlipperBaseEstimator(), n_estimators=n_estimators, **kwargs)
# super().__init__(n_estimators, SlipperBaseEstimator)
| (n_estimators=10, **kwargs) |
17,947 | imodels.rule_set.slipper | __init__ |
An estimator that supports building rules as described in
A Simple, Fast, and Effective Rule Learner (1999).
Parameters
----------
n_estimators
| def __init__(self, n_estimators=10, **kwargs):
'''
An estimator that supports building rules as described in
A Simple, Fast, and Effective Rule Learner (1999).
Parameters
----------
n_estimators
'''
super().__init__(estimator=SlipperBaseEstimator(), n_estimators=n_estimators, **kwargs)
# super().__init__(n_estimators, SlipperBaseEstimator)
| (self, n_estimators=10, **kwargs) |
17,985 | imodels.tree.tao | TaoTreeClassifier | null | class TaoTreeClassifier(TaoTree, ClassifierMixin):
pass
| (model_type: str = 'CART', n_iters: int = 20, model_args: dict = {'max_leaf_nodes': 15}, randomize_tree=False, update_scoring='accuracy', min_node_samples_tao=3, min_leaf_samples_tao=2, node_model='stump', node_model_args: dict = {}, reg_param: float = 0.001, weight_errors: bool = False, verbose: int = 0) |
17,987 | imodels.tree.tao | __init__ | TAO: Alternating optimization of decision trees, with application to learning sparse oblique trees (Neurips 2018)
https://proceedings.neurips.cc/paper/2018/hash/185c29dc24325934ee377cfda20e414c-Abstract.html
Note: this implementation learns single-feature splits rather than oblique trees.
Currently supports
- given a CART tree, posthoc improve it with TAO
- also works with HSTreeCV
Todo
- update bottom to top otherwise input points don't get updated
- update leaf nodes
- support regression
- support FIGS
- support error-weighting
- support oblique trees
- support generic models at decision node
- support pruning (e.g. if weights -> 0, then remove a node)
- support classifiers in leaves
Parameters
----------
model_type: str
'CART' or 'FIGS'
n_iters
Number of iterations to run TAO
model_args
Arguments to pass to the model
randomize_tree
Whether to randomize the tree before each iteration
min_node_samples_tao: int
Minimum number of samples in a node to apply tao
min_leaf_samples_tao: int
node_model: str
'stump' or 'linear'
reg_param
Regularization parameter for node-wise linear model (if node_model is 'linear')
verbose: int
Verbosity level
| def __init__(self, model_type: str = 'CART',
n_iters: int = 20,
model_args: dict = {'max_leaf_nodes': 15},
randomize_tree=False,
update_scoring='accuracy',
min_node_samples_tao=3,
min_leaf_samples_tao=2,
node_model='stump',
node_model_args: dict = {},
reg_param: float = 1e-3,
weight_errors: bool = False,
verbose: int = 0,
):
"""TAO: Alternating optimization of decision trees, with application to learning sparse oblique trees (Neurips 2018)
https://proceedings.neurips.cc/paper/2018/hash/185c29dc24325934ee377cfda20e414c-Abstract.html
Note: this implementation learns single-feature splits rather than oblique trees.
Currently supports
- given a CART tree, posthoc improve it with TAO
- also works with HSTreeCV
Todo
- update bottom to top otherwise input points don't get updated
- update leaf nodes
- support regression
- support FIGS
- support error-weighting
- support oblique trees
- support generic models at decision node
- support pruning (e.g. if weights -> 0, then remove a node)
- support classifiers in leaves
Parameters
----------
model_type: str
'CART' or 'FIGS'
n_iters
Number of iterations to run TAO
model_args
Arguments to pass to the model
randomize_tree
Whether to randomize the tree before each iteration
min_node_samples_tao: int
Minimum number of samples in a node to apply tao
min_leaf_samples_tao: int
node_model: str
'stump' or 'linear'
reg_param
Regularization parameter for node-wise linear model (if node_model is 'linear')
verbose: int
Verbosity level
"""
super().__init__()
self.model_type = model_type
self.n_iters = n_iters
self.model_args = model_args
self.randomize_tree = randomize_tree
self.update_scoring = update_scoring
self.min_node_samples_tao = min_node_samples_tao
self.min_leaf_samples_tao = min_leaf_samples_tao
self.node_model = node_model
self.node_model_args = node_model_args
self.reg_param = reg_param
self.weight_errors = weight_errors
self.verbose = verbose
self._init_prediction_task() # decides between regressor and classifier
| (self, model_type: str = 'CART', n_iters: int = 20, model_args: dict = {'max_leaf_nodes': 15}, randomize_tree=False, update_scoring='accuracy', min_node_samples_tao=3, min_leaf_samples_tao=2, node_model='stump', node_model_args: dict = {}, reg_param: float = 0.001, weight_errors: bool = False, verbose: int = 0) |
17,996 | imodels.tree.tao | _init_prediction_task |
TaoRegressor and TaoClassifier override this method
to alter the prediction task. When using this class directly,
it is equivalent to SuperCARTRegressor
| def _init_prediction_task(self):
"""
TaoRegressor and TaoClassifier override this method
to alter the prediction task. When using this class directly,
it is equivalent to SuperCARTRegressor
"""
self.prediction_task = 'classification'
| (self) |
18,000 | imodels.tree.tao | _tao_iter_cart | Updates tree by applying the tao algorithm to the tree
Params
------
X: array-like of shape (n_samples, n_features)
The input samples.
y: array-like of shape (n_samples,)
The target values.
model: DecisionTreeClassifier.tree_ or DecisionTreeRegressor.tree_
The model to be post-hoc improved
| def _tao_iter_cart(self, X, y, tree, X_score=None, y_score=None, sample_weight=None):
"""Updates tree by applying the tao algorithm to the tree
Params
------
X: array-like of shape (n_samples, n_features)
The input samples.
y: array-like of shape (n_samples,)
The target values.
model: DecisionTreeClassifier.tree_ or DecisionTreeRegressor.tree_
The model to be post-hoc improved
"""
# Tree properties
children_left = tree.children_left
children_right = tree.children_right
feature = tree.feature
threshold = tree.threshold
value = tree.value
# For each node, store the path to that node #######################################################
indexes_with_prefix_paths = [] # data structure with (index, path_to_node_index)
# e.g. if if node 3 is the left child of node 1 which is the right child of node 0
# then we get (3, [(0, R), (1, L)])
# start with the root node id (0) and its depth (0)
queue = deque()
queue.append((0, []))
while len(queue) > 0:
node_id, path_to_node_index = queue.popleft()
indexes_with_prefix_paths.append((node_id, path_to_node_index))
# If a split node, append left and right children and depth to queue
if children_left[node_id] != children_right[node_id]:
queue.append((children_left[node_id], path_to_node_index + [(node_id, 'L')]))
queue.append((children_right[node_id], path_to_node_index + [(node_id, 'R')]))
# print(indexes_with_prefix_paths)
num_updates = 0
# Reversing BFS queue presents nodes bottom -> top one level at a time
for (node_id, path_to_node_index) in reversed(indexes_with_prefix_paths):
# For each each node, try a TAO update
# print('node_id', node_id, path_to_node_index)
# Compute the points being input to the node ######################################
def filter_points_by_path(X, y, path_to_node_index):
"""Returns the points in X that are in the path to the node"""
for node_id, direction in path_to_node_index:
idxs = X[:, feature[node_id]] <= threshold[node_id]
if direction == 'R':
idxs = ~idxs
# print('idxs', idxs.size, idxs.sum())
X = X[idxs]
y = y[idxs]
return X, y
X_node, y_node = filter_points_by_path(X, y, path_to_node_index)
if sample_weight is not None:
sample_weight_node = filter_points_by_path(X, sample_weight, path_to_node_index)[1]
else:
sample_weight_node = np.ones(y_node.size)
# Skip over leaf nodes and nodes with too few samples ######################################
if children_left[node_id] == children_right[node_id]: # is leaf node
if isinstance(self, RegressorMixin) and X_node.shape[0] >= self.min_leaf_samples_tao:
# old_score = self.model.score(X, y)
value[node_id] = np.mean(y_node)
"""
new_score = self.model.score(X, y)
if new_score > old_score:
print(f'\tLeaf improved score from {old_score:0.3f} to {new_score:0.3f}')
if new_score < old_score:
print(f'\tLeaf reduced score from {old_score:0.3f} to {new_score:0.3f}')
# raise ValueError('Leaf update reduced score')
"""
# print('\tshapes', X_node.shape, y_node.shape)
# print('\tvals:', value[node_id][0][0], np.mean(y_node))
# assert value[node_id][0][0] == np.mean(y_node), 'unless tree changed, vals should be leaf means'
continue
elif X_node.shape[0] < self.min_node_samples_tao:
continue
# Compute the outputs for these points if they go left or right ######################################
def predict_from_node(X, node_id):
"""Returns predictions for X starting at node node_id"""
def predict_from_node(x, node_id):
"""Returns predictions for x starting at node node_id"""
if children_left[node_id] == children_right[node_id]:
if isinstance(self, RegressorMixin):
return value[node_id]
if isinstance(self, ClassifierMixin):
return np.argmax(value[node_id]) # note value stores counts for each class
if x[feature[node_id]] <= threshold[node_id]:
return predict_from_node(x, children_left[node_id])
else:
return predict_from_node(x, children_right[node_id])
preds = np.zeros(X.shape[0])
for i in range(X.shape[0]):
preds[i] = predict_from_node(X[i], node_id)
return preds
y_node_left = predict_from_node(X_node, children_left[node_id])
y_node_right = predict_from_node(X_node, children_right[node_id])
if node_id == 0: # root node
assert np.all(np.logical_or(self.model.predict(X_node) == y_node_left,
self.model.predict(
X_node) == y_node_right)), \
'actual predictions should match either predict_from_node left or right'
# Decide on prediction target (want to go left (0) / right (1) when advantageous)
# TAO paper binarizes these (e.g. predict 0 or 1 depending on which of these is better)
y_node_absolute_errors = np.abs(np.vstack((y_node - y_node_left,
y_node - y_node_right))).T
# screen out indexes where going left/right has no effect
idxs_relevant = y_node_absolute_errors[:, 0] != y_node_absolute_errors[:, 1]
if idxs_relevant.sum() <= 1: # nothing to change
if self.verbose:
print('no errors to change')
continue
# assert np.all((self.model.predict(X) != y)[idxs_relevant]), 'relevant indexes should be errors'
y_node_target = np.argmin(y_node_absolute_errors, axis=1)
y_node_target = y_node_target[idxs_relevant]
# here, we optionally weight these errors by the size of the error
# if we want this to work for classification, must switch to predict_proba
# if self.prediction_task == 'regression':
# weight by the difference in error ###############################################################
if self.weight_errors:
sample_weight_node *= np.abs(y_node_absolute_errors[:, 1] - y_node_absolute_errors[:, 0])
sample_weight_node_target = sample_weight_node[idxs_relevant]
X_node = X_node[idxs_relevant]
# Fit a 1-variable binary classification model on these outputs ######################################
# Note: this could be customized (e.g. for sparse oblique trees)
best_score = -np.inf
best_feat_num = None
for feat_num in range(X.shape[1]):
if isinstance(self, ClassifierMixin):
if self.node_model == 'linear':
m = LogisticRegression(**self.node_model_args)
elif self.node_model == 'stump':
m = DecisionTreeClassifier(max_depth=1, **self.node_model_args)
if isinstance(self, RegressorMixin):
if self.node_model == 'linear':
m = LinearRegression(**self.node_model_args)
elif self.node_model == 'stump':
m = DecisionTreeRegressor(max_depth=1, **self.node_model_args)
X_node_single_feat = X_node[:, feat_num: feat_num + 1]
m.fit(X_node_single_feat, y_node_target, sample_weight=sample_weight_node_target)
score = m.score(X_node_single_feat, y_node_target, sample_weight=sample_weight_node_target)
if score > best_score:
best_score = score
best_feat_num = feat_num
best_model = deepcopy(m)
if self.node_model == 'linear':
best_threshold = -best_model.intercept_ / best_model.coef_[0]
elif self.node_model == 'stump':
best_threshold = best_model.tree_.threshold[0]
# print((feature[node_id], threshold[node_id]), '\n->',
# (best_feat_num, best_threshold))
# Update the node with the new feature / threshold ######################################
old_feat_num = feature[node_id]
old_threshold = threshold[node_id]
# print(X.sum(), y.sum())
if X_score is None:
X_score = X
if y_score is None:
y_score = y
scorer = get_scorer(self.update_scoring)
old_score = scorer(self.model, X_score, y_score)
feature[node_id] = best_feat_num
threshold[node_id] = best_threshold
new_score = scorer(self.model, X_score, y_score)
# debugging
if self.verbose > 1:
if old_score == new_score:
print('\tno change', best_feat_num, old_feat_num)
print(f'\tscore_total {old_score:0.4f} -> {new_score:0.4f}')
if old_score >= new_score:
feature[node_id] = old_feat_num
threshold[node_id] = old_threshold
else:
# (Track if any updates were necessary)
num_updates += 1
if self.verbose > 0:
print(f'Improved score from {old_score:0.4f} to {new_score:0.4f}')
# debugging snippet (if score_m_new > score_m_old, then new_score should be > old_score, but it isn't!!!!)
if self.verbose > 1:
"""
X_node_single_feat = X_node[:, best_feat_num: best_feat_num + 1]
score_m_new = best_model.score(X_node_single_feat, y_node_target, sample_weight=sample_weight)
best_model.tree_.feature[0] = old_feat_num
best_model.tree_.threshold[0] = old_threshold
X_node_single_feat = X_node[:, old_feat_num: old_feat_num + 1]
score_m_old = best_model.score(X_node_single_feat, y_node_target, sample_weight=sample_weight)
print('\t\t', f'score_local {score_m_old:0.4f} -> {score_m_new:0.4f}')
"""
return num_updates
| (self, X, y, tree, X_score=None, y_score=None, sample_weight=None) |
18,003 | imodels.tree.tao | fit |
Params
------
_sample_weight: array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Splits that would create child nodes with net zero or negative weight
are ignored while searching for a split in each node.
| def fit(self, X, y=None, feature_names=None, sample_weight=None):
"""
Params
------
_sample_weight: array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Splits that would create child nodes with net zero or negative weight
are ignored while searching for a split in each node.
"""
X, y, feature_names = check_fit_arguments(self, X, y, feature_names)
if isinstance(self, RegressorMixin):
raise Warning('TAO Regression is not yet tested')
X, y = check_X_y(X, y)
y = y.astype(float)
if feature_names is not None:
self.feature_names_ = feature_names
if self.model_type == 'CART':
if isinstance(self, ClassifierMixin):
self.model = DecisionTreeClassifier(**self.model_args)
elif isinstance(self, RegressorMixin):
self.model = DecisionTreeRegressor(**self.model_args)
self.model.fit(X, y, sample_weight=sample_weight)
if self.verbose:
print(export_text(self.model))
# plot_tree(self.model)
# plt.savefig('/Users/chandan/Desktop/tree.png', dpi=300)
# plt.show()
if self.randomize_tree:
np.random.shuffle(self.model.tree_.feature) # shuffle CART features
# np.random.shuffle(self.model.tree_.threshold)
for i in range(self.model.tree_.node_count): # split on feature medians
self.model.tree_.threshold[i] = np.median(
X[:, self.model.tree_.feature[i]])
if self.verbose:
print('starting score', self.model.score(X, y))
for i in range(self.n_iters):
num_updates = self._tao_iter_cart(X, y, self.model.tree_, sample_weight=sample_weight)
if num_updates == 0:
break
return self
| (self, X, y=None, feature_names=None, sample_weight=None) |
18,006 | imodels.tree.tao | predict | null | def predict(self, X):
return self.model.predict(X)
| (self, X) |
18,007 | imodels.tree.tao | predict_proba | null | def predict_proba(self, X):
return self.model.predict_proba(X)
| (self, X) |
18,008 | imodels.tree.tao | score | null | def score(self, X, y):
return self.model.score(X, y)
| (self, X, y) |
18,011 | imodels.tree.tao | TaoTreeRegressor | null | class TaoTreeRegressor(TaoTree, RegressorMixin):
pass
| (model_type: str = 'CART', n_iters: int = 20, model_args: dict = {'max_leaf_nodes': 15}, randomize_tree=False, update_scoring='accuracy', min_node_samples_tao=3, min_leaf_samples_tao=2, node_model='stump', node_model_args: dict = {}, reg_param: float = 0.001, weight_errors: bool = False, verbose: int = 0) |
18,037 | imodels.algebraic.tree_gam | TreeGAMClassifier | null | class TreeGAMClassifier(TreeGAM, ClassifierMixin):
...
| (n_boosting_rounds=100, max_leaf_nodes=3, reg_param=0.0, learning_rate: float = 0.01, n_boosting_rounds_marginal=0, max_leaf_nodes_marginal=2, reg_param_marginal=0.0, fit_linear_marginal=None, select_linear_marginal=False, decay_rate_towards_marginal=1.0, fit_posthoc_tree_coefs=None, boosting_strategy='cyclic', validation_frac=0.15, random_state=None) |
18,039 | imodels.algebraic.tree_gam | __init__ |
Params
------
n_boosting_rounds : int
Number of boosting rounds for the cyclic boosting.
max_leaf_nodes : int
Maximum number of leaf nodes for the trees in the cyclic boosting.
reg_param : float
Regularization parameter for the cyclic boosting.
learning_rate: float
Learning rate for the cyclic boosting.
n_boosting_rounds_marginal : int
Number of boosting rounds for the marginal boosting.
max_leaf_nodes_marginal : int
Maximum number of leaf nodes for the trees in the marginal boosting.
reg_param_marginal : float
Regularization parameter for the marginal boosting.
fit_linear_marginal : str [None, "None", "ridge", "NNLS"]
Whether to fit a linear model to the marginal effects.
NNLS for non-negative least squares
ridge for ridge regression
None for no linear model
select_linear_marginal: bool
Whether to restrict features to those with non-negative coefficients in the linear model.
Requires that fit_linear_marginal is NNLS.
decay_rate_towards_marginal: float, [0, 1]
Decay rate for regularizing each shape function towards the marginal shape function after each step
1 means no decay, 0 means only use marginal effects
shape = (1 - decay_rate_towards_marginal) * shape + decay_rate_towards_marginal * marginal_shape
The way this is implemented is by keeping track of how many times to multiply decay_rate_towards_marginal for each cyclic estimator
fit_posthoc_tree_coefs: str [None, "ridge"]
Whether to fit a linear model to the tree coefficients after fitting the cyclic boosting.
boosting_strategy : str ["cyclic", "greedy"]
Whether to use cyclic boosting (cycle over features) or greedy boosting (select best feature at each step)
validation_frac: float
Fraction of data to use for early stopping.
random_state : int
Random seed.
| def __init__(
self,
n_boosting_rounds=100,
max_leaf_nodes=3,
reg_param=0.0,
learning_rate: float = 0.01,
n_boosting_rounds_marginal=0,
max_leaf_nodes_marginal=2,
reg_param_marginal=0.0,
fit_linear_marginal=None,
select_linear_marginal=False,
decay_rate_towards_marginal=1.0,
fit_posthoc_tree_coefs=None,
boosting_strategy="cyclic",
validation_frac=0.15,
random_state=None,
):
"""
Params
------
n_boosting_rounds : int
Number of boosting rounds for the cyclic boosting.
max_leaf_nodes : int
Maximum number of leaf nodes for the trees in the cyclic boosting.
reg_param : float
Regularization parameter for the cyclic boosting.
learning_rate: float
Learning rate for the cyclic boosting.
n_boosting_rounds_marginal : int
Number of boosting rounds for the marginal boosting.
max_leaf_nodes_marginal : int
Maximum number of leaf nodes for the trees in the marginal boosting.
reg_param_marginal : float
Regularization parameter for the marginal boosting.
fit_linear_marginal : str [None, "None", "ridge", "NNLS"]
Whether to fit a linear model to the marginal effects.
NNLS for non-negative least squares
ridge for ridge regression
None for no linear model
select_linear_marginal: bool
Whether to restrict features to those with non-negative coefficients in the linear model.
Requires that fit_linear_marginal is NNLS.
decay_rate_towards_marginal: float, [0, 1]
Decay rate for regularizing each shape function towards the marginal shape function after each step
1 means no decay, 0 means only use marginal effects
shape = (1 - decay_rate_towards_marginal) * shape + decay_rate_towards_marginal * marginal_shape
The way this is implemented is by keeping track of how many times to multiply decay_rate_towards_marginal for each cyclic estimator
fit_posthoc_tree_coefs: str [None, "ridge"]
Whether to fit a linear model to the tree coefficients after fitting the cyclic boosting.
boosting_strategy : str ["cyclic", "greedy"]
Whether to use cyclic boosting (cycle over features) or greedy boosting (select best feature at each step)
validation_frac: float
Fraction of data to use for early stopping.
random_state : int
Random seed.
"""
self.n_boosting_rounds = n_boosting_rounds
self.max_leaf_nodes = max_leaf_nodes
self.reg_param = reg_param
self.learning_rate = learning_rate
self.max_leaf_nodes_marginal = max_leaf_nodes_marginal
self.reg_param_marginal = reg_param_marginal
self.n_boosting_rounds_marginal = n_boosting_rounds_marginal
self.fit_linear_marginal = fit_linear_marginal
self.select_linear_marginal = select_linear_marginal
self.decay_rate_towards_marginal = decay_rate_towards_marginal
self.fit_posthoc_tree_coefs = fit_posthoc_tree_coefs
self.boosting_strategy = boosting_strategy
self.validation_frac = validation_frac
self.random_state = random_state
| (self, n_boosting_rounds=100, max_leaf_nodes=3, reg_param=0.0, learning_rate: float = 0.01, n_boosting_rounds_marginal=0, max_leaf_nodes_marginal=2, reg_param_marginal=0.0, fit_linear_marginal=None, select_linear_marginal=False, decay_rate_towards_marginal=1.0, fit_posthoc_tree_coefs=None, boosting_strategy='cyclic', validation_frac=0.15, random_state=None) |
18,043 | imodels.algebraic.tree_gam | _calc_mse | null | def _calc_mse(self, X, y, sample_weight=None):
return np.average(
np.square(y - self.predict_proba(X)[:, 1]),
weights=sample_weight,
)
| (self, X, y, sample_weight=None) |
18,046 | imodels.algebraic.tree_gam | _cyclic_boost | Apply cyclic boosting, storing trees in self.estimators_ | def _cyclic_boost(
self, X_train, y_train, sample_weight_train, X_val, y_val, sample_weight_val
):
"""Apply cyclic boosting, storing trees in self.estimators_"""
residuals_train = y_train - self.predict_proba(X_train)[:, 1]
mse_val = self._calc_mse(X_val, y_val, sample_weight_val)
self.decay_coef_towards_marginal_ = []
for _ in range(self.n_boosting_rounds):
boosting_round_ests = []
boosting_round_mses = []
feature_nums = np.arange(X_train.shape[1])
if self.select_linear_marginal:
assert (
self.fit_linear_marginal == "NNLS"
and self.n_boosting_rounds_marginal > 0
), "select_linear_marginal requires fit_linear_marginal to be NNLS and for n_boosting_rounds_marginal > 0"
feature_nums = np.where(self.marginal_coef_ > 0)[0]
for feature_num in feature_nums:
X_ = np.zeros_like(X_train)
X_[:, feature_num] = X_train[:, feature_num]
est = DecisionTreeRegressor(
max_leaf_nodes=self.max_leaf_nodes,
random_state=self.random_state,
)
est.fit(X_, residuals_train, sample_weight=sample_weight_train)
succesfully_split_on_feature = np.all(
(est.tree_.feature[0] == feature_num) | (
est.tree_.feature[0] == -2)
)
if not succesfully_split_on_feature:
continue
if self.reg_param > 0:
est = imodels.HSTreeRegressor(
est, reg_param=self.reg_param)
self.estimators_.append(est)
residuals_train_new = (
residuals_train - self.learning_rate * est.predict(X_train)
)
if self.boosting_strategy == "cyclic":
residuals_train = residuals_train_new
elif self.boosting_strategy == "greedy":
mse_train_new = self._calc_mse(
X_train, y_train, sample_weight_train
)
# don't add each estimator for greedy
boosting_round_ests.append(
deepcopy(self.estimators_.pop()))
boosting_round_mses.append(mse_train_new)
if self.boosting_strategy == "greedy":
best_est = boosting_round_ests[np.argmin(boosting_round_mses)]
self.estimators_.append(best_est)
residuals_train = (
residuals_train - self.learning_rate *
best_est.predict(X_train)
)
# decay marginal effects
if self.decay_rate_towards_marginal < 1.0:
new_decay_coefs = [self.decay_rate_towards_marginal] * (
len(self.estimators_) -
len(self.decay_coef_towards_marginal_)
)
# print(self.decay_coef_towards_marginal_)
# print('new_decay_coefs', new_decay_coefs)
self.decay_coef_towards_marginal_ = [
x * self.decay_rate_towards_marginal
for x in self.decay_coef_towards_marginal_
] + new_decay_coefs
# print(self.decay_coef_towards_marginal_)
# early stopping if validation error does not decrease
mse_val_new = self._calc_mse(X_val, y_val, sample_weight_val)
if mse_val_new >= mse_val:
# print("early stop!")
return
else:
mse_val = mse_val_new
| (self, X_train, y_train, sample_weight_train, X_val, y_val, sample_weight_val) |
18,047 | imodels.algebraic.tree_gam | _fit_posthoc_tree_coefs | null | def _fit_posthoc_tree_coefs(self, X, y, sample_weight=None):
# extract predictions from each tree
X_pred_tree = np.array([est.predict(X) for est in self.estimators_]).T
print('shapes', X.shape, X_pred_tree.shape,
y.shape, len(self.estimators_))
coef_prior = np.ones(len(self.estimators_)) * self.learning_rate
y = y - self.bias_ - X_pred_tree @ coef_prior
if self.fit_posthoc_tree_coefs.lower() == "ridge":
m = RidgeCV(fit_intercept=False)
elif self.fit_posthoc_tree_coefs.lower() == "nnls":
m = LinearRegression(fit_intercept=False, positive=True)
elif self.fit_posthoc_tree_coefs.lower() == "elasticnet":
m = ElasticNetCV(fit_intercept=False, positive=True)
m.fit(X_pred_tree, y, sample_weight=sample_weight)
self.cyclic_coef_ = m.coef_ + coef_prior
| (self, X, y, sample_weight=None) |
18,051 | imodels.algebraic.tree_gam | _marginal_fit | Fit a gbdt estimator for each feature independently.
Store in self.estimators_marginal | def _marginal_fit(
self,
X_train,
y_train,
sample_weight_train,
):
"""Fit a gbdt estimator for each feature independently.
Store in self.estimators_marginal"""
residuals_train = y_train - self.predict_proba(X_train)[:, 1]
p = X_train.shape[1]
for feature_num in range(p):
X_ = np.zeros_like(X_train)
X_[:, feature_num] = X_train[:, feature_num]
est = GradientBoostingRegressor(
max_leaf_nodes=self.max_leaf_nodes_marginal,
random_state=self.random_state,
n_estimators=self.n_boosting_rounds_marginal,
)
est.fit(X_, residuals_train, sample_weight=sample_weight_train)
if self.reg_param_marginal > 0:
est = imodels.HSTreeRegressor(
est, reg_param=self.reg_param_marginal)
self.estimators_marginal.append(est)
if (
self.fit_linear_marginal is not None
and not self.fit_linear_marginal == "None"
):
if self.fit_linear_marginal.lower() == "ridge":
linear_marginal = RidgeCV(fit_intercept=False)
elif self.fit_linear_marginal == "NNLS":
linear_marginal = LinearRegression(
fit_intercept=False, positive=True)
linear_marginal.fit(
np.array([est.predict(X_train)
for est in self.estimators_marginal]).T,
residuals_train,
sample_weight_train,
)
self.marginal_coef_ = linear_marginal.coef_
self.lin = linear_marginal
else:
self.marginal_coef_ = np.ones(p) / p
| (self, X_train, y_train, sample_weight_train) |
18,057 | imodels.algebraic.tree_gam | fit | null | def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, accept_sparse=False, multi_output=False)
if isinstance(self, ClassifierMixin):
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
sample_weight = _check_sample_weight(sample_weight, X, dtype=None)
# split into train and validation for early stopping
(
X_train,
X_val,
y_train,
y_val,
sample_weight_train,
sample_weight_val,
) = train_test_split(
X,
y,
sample_weight,
test_size=self.validation_frac,
random_state=self.random_state,
stratify=y if isinstance(self, ClassifierMixin) else None,
)
self.estimators_marginal = []
self.estimators_ = []
self.bias_ = np.mean(y)
if self.n_boosting_rounds_marginal > 0:
self._marginal_fit(
X_train,
y_train,
sample_weight_train,
)
if self.n_boosting_rounds > 0:
self._cyclic_boost(
X_train,
y_train,
sample_weight_train,
X_val,
y_val,
sample_weight_val,
)
if self.fit_posthoc_tree_coefs is not None:
self._fit_posthoc_tree_coefs(X_train, y_train, sample_weight_train)
self.mse_val_ = self._calc_mse(X_val, y_val, sample_weight_val)
return self
| (self, X, y, sample_weight=None) |
18,060 | imodels.algebraic.tree_gam | predict | null | def predict(self, X, marginal_only=False):
if isinstance(self, RegressorMixin):
return self.predict_proba(X, marginal_only=marginal_only)[:, 1]
elif isinstance(self, ClassifierMixin):
return np.argmax(self.predict_proba(X, marginal_only=marginal_only), axis=1)
| (self, X, marginal_only=False) |
18,061 | imodels.algebraic.tree_gam | predict_proba |
Params
------
marginal_only: bool
If True, only use the marginal effects.
| def predict_proba(self, X, marginal_only=False):
"""
Params
------
marginal_only: bool
If True, only use the marginal effects.
"""
X = check_array(X, accept_sparse=False, dtype=None)
check_is_fitted(self)
probs1 = np.ones(X.shape[0]) * self.bias_
# marginal prediction
for i, est in enumerate(self.estimators_marginal):
probs1 += est.predict(X) * self.marginal_coef_[i]
# cyclic coefs prediction
if not marginal_only:
if not hasattr(self, "cyclic_coef_"):
cyclic_coef_ = np.ones(
len(self.estimators_)) * self.learning_rate
else:
cyclic_coef_ = self.cyclic_coef_
# print('coef', cyclic_coef_)
if self.decay_rate_towards_marginal < 1.0:
for i, est in enumerate(self.estimators_):
if i < len(self.decay_coef_towards_marginal_):
probs1 += (
cyclic_coef_[i]
* self.decay_coef_towards_marginal_[i]
* est.predict(X)
)
else:
probs1 += cyclic_coef_[i] * est.predict(X)
else:
for i, est in enumerate(self.estimators_):
probs1 += cyclic_coef_[i] * est.predict(X)
probs1 = np.clip(probs1, a_min=0, a_max=1)
return np.array([1 - probs1, probs1]).T
| (self, X, marginal_only=False) |
18,065 | sklearn.utils._metadata_requests | set_predict_proba_request | Request metadata passed to the ``predict_proba`` method.
Note that this method is only relevant if
``enable_metadata_routing=True`` (see :func:`sklearn.set_config`).
Please see :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
The options for each parameter are:
- ``True``: metadata is requested, and passed to ``predict_proba`` if provided. The request is ignored if metadata is not provided.
- ``False``: metadata is not requested and the meta-estimator will not pass it to ``predict_proba``.
- ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it.
- ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name.
The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the
existing request. This allows you to change the request for some
parameters and not others.
.. versionadded:: 1.3
.. note::
This method is only relevant if this estimator is used as a
sub-estimator of a meta-estimator, e.g. used inside a
:class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect.
Parameters
----------
marginal_only : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``marginal_only`` parameter in ``predict_proba``.
Returns
-------
self : object
The updated object.
| def __get__(self, instance, owner):
# we would want to have a method which accepts only the expected args
def func(**kw):
"""Updates the request for provided parameters
This docstring is overwritten below.
See REQUESTER_DOC for expected functionality
"""
if not _routing_enabled():
raise RuntimeError(
"This method is only available when metadata routing is enabled."
" You can enable it using"
" sklearn.set_config(enable_metadata_routing=True)."
)
if self.validate_keys and (set(kw) - set(self.keys)):
raise TypeError(
f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments"
f" are: {set(self.keys)}"
)
requests = instance._get_metadata_request()
method_metadata_request = getattr(requests, self.name)
for prop, alias in kw.items():
if alias is not UNCHANGED:
method_metadata_request.add_request(param=prop, alias=alias)
instance._metadata_request = requests
return instance
# Now we set the relevant attributes of the function so that it seems
# like a normal method to the end user, with known expected arguments.
func.__name__ = f"set_{self.name}_request"
params = [
inspect.Parameter(
name="self",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=owner,
)
]
params.extend(
[
inspect.Parameter(
k,
inspect.Parameter.KEYWORD_ONLY,
default=UNCHANGED,
annotation=Optional[Union[bool, None, str]],
)
for k in self.keys
]
)
func.__signature__ = inspect.Signature(
params,
return_annotation=owner,
)
doc = REQUESTER_DOC.format(method=self.name)
for metadata in self.keys:
doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name)
doc += REQUESTER_DOC_RETURN
func.__doc__ = doc
return func
| (self: imodels.algebraic.tree_gam.TreeGAMClassifier, *, marginal_only: Union[bool, NoneType, str] = '$UNCHANGED$') -> imodels.algebraic.tree_gam.TreeGAMClassifier |
18,066 | sklearn.utils._metadata_requests | set_predict_request | Request metadata passed to the ``predict`` method.
Note that this method is only relevant if
``enable_metadata_routing=True`` (see :func:`sklearn.set_config`).
Please see :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
The options for each parameter are:
- ``True``: metadata is requested, and passed to ``predict`` if provided. The request is ignored if metadata is not provided.
- ``False``: metadata is not requested and the meta-estimator will not pass it to ``predict``.
- ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it.
- ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name.
The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the
existing request. This allows you to change the request for some
parameters and not others.
.. versionadded:: 1.3
.. note::
This method is only relevant if this estimator is used as a
sub-estimator of a meta-estimator, e.g. used inside a
:class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect.
Parameters
----------
marginal_only : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``marginal_only`` parameter in ``predict``.
Returns
-------
self : object
The updated object.
| def __get__(self, instance, owner):
# we would want to have a method which accepts only the expected args
def func(**kw):
"""Updates the request for provided parameters
This docstring is overwritten below.
See REQUESTER_DOC for expected functionality
"""
if not _routing_enabled():
raise RuntimeError(
"This method is only available when metadata routing is enabled."
" You can enable it using"
" sklearn.set_config(enable_metadata_routing=True)."
)
if self.validate_keys and (set(kw) - set(self.keys)):
raise TypeError(
f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments"
f" are: {set(self.keys)}"
)
requests = instance._get_metadata_request()
method_metadata_request = getattr(requests, self.name)
for prop, alias in kw.items():
if alias is not UNCHANGED:
method_metadata_request.add_request(param=prop, alias=alias)
instance._metadata_request = requests
return instance
# Now we set the relevant attributes of the function so that it seems
# like a normal method to the end user, with known expected arguments.
func.__name__ = f"set_{self.name}_request"
params = [
inspect.Parameter(
name="self",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=owner,
)
]
params.extend(
[
inspect.Parameter(
k,
inspect.Parameter.KEYWORD_ONLY,
default=UNCHANGED,
annotation=Optional[Union[bool, None, str]],
)
for k in self.keys
]
)
func.__signature__ = inspect.Signature(
params,
return_annotation=owner,
)
doc = REQUESTER_DOC.format(method=self.name)
for metadata in self.keys:
doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name)
doc += REQUESTER_DOC_RETURN
func.__doc__ = doc
return func
| (self: imodels.algebraic.tree_gam.TreeGAMClassifier, *, marginal_only: Union[bool, NoneType, str] = '$UNCHANGED$') -> imodels.algebraic.tree_gam.TreeGAMClassifier |
18,068 | imodels.algebraic.tree_gam | TreeGAMRegressor | null | class TreeGAMRegressor(TreeGAM, RegressorMixin):
...
| (n_boosting_rounds=100, max_leaf_nodes=3, reg_param=0.0, learning_rate: float = 0.01, n_boosting_rounds_marginal=0, max_leaf_nodes_marginal=2, reg_param_marginal=0.0, fit_linear_marginal=None, select_linear_marginal=False, decay_rate_towards_marginal=1.0, fit_posthoc_tree_coefs=None, boosting_strategy='cyclic', validation_frac=0.15, random_state=None) |
18,101 | sklearn.utils.validation | check_X_y | Input validation for standard estimators.
Checks X and y for consistent length, enforces X to be 2D and y 1D. By
default, X is checked to be non-empty and containing only finite values.
Standard input checks are also applied to y, such as checking that y
does not have np.nan or np.inf targets. For multi-label y, set
multi_output=True to allow 2D and sparse y. If the dtype of X is
object, attempt converting to float, raising on failure.
Parameters
----------
X : {ndarray, list, sparse matrix}
Input data.
y : {ndarray, list, sparse matrix}
Labels.
accept_sparse : str, bool or list of str, default=False
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
accept_large_sparse : bool, default=True
If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by
accept_sparse, accept_large_sparse will cause it to be accepted only
if its indices are stored with a 32-bit dtype.
.. versionadded:: 0.20
dtype : 'numeric', type, list of type or None, default='numeric'
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : {'F', 'C'}, default=None
Whether an array will be forced to be fortran or c-style. If
`None`, then the input data's order is preserved when possible.
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in X. This parameter
does not influence whether y can have np.inf, np.nan, pd.NA values.
The possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan or pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
ensure_2d : bool, default=True
Whether to raise a value error if X is not 2D.
allow_nd : bool, default=False
Whether to allow X.ndim > 2.
multi_output : bool, default=False
Whether to allow 2D y (array or sparse matrix). If false, y will be
validated as a vector. y cannot have np.nan or np.inf values if
multi_output=True.
ensure_min_samples : int, default=1
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int, default=1
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : bool, default=False
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
estimator : str or estimator instance, default=None
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
Examples
--------
>>> from sklearn.utils.validation import check_X_y
>>> X = [[1, 2], [3, 4], [5, 6]]
>>> y = [1, 2, 3]
>>> X, y = check_X_y(X, y)
>>> X
array([[1, 2],
[3, 4],
[5, 6]])
>>> y
array([1, 2, 3])
| def check_X_y(
X,
y,
accept_sparse=False,
*,
accept_large_sparse=True,
dtype="numeric",
order=None,
copy=False,
force_all_finite=True,
ensure_2d=True,
allow_nd=False,
multi_output=False,
ensure_min_samples=1,
ensure_min_features=1,
y_numeric=False,
estimator=None,
):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X to be 2D and y 1D. By
default, X is checked to be non-empty and containing only finite values.
Standard input checks are also applied to y, such as checking that y
does not have np.nan or np.inf targets. For multi-label y, set
multi_output=True to allow 2D and sparse y. If the dtype of X is
object, attempt converting to float, raising on failure.
Parameters
----------
X : {ndarray, list, sparse matrix}
Input data.
y : {ndarray, list, sparse matrix}
Labels.
accept_sparse : str, bool or list of str, default=False
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
accept_large_sparse : bool, default=True
If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by
accept_sparse, accept_large_sparse will cause it to be accepted only
if its indices are stored with a 32-bit dtype.
.. versionadded:: 0.20
dtype : 'numeric', type, list of type or None, default='numeric'
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : {'F', 'C'}, default=None
Whether an array will be forced to be fortran or c-style. If
`None`, then the input data's order is preserved when possible.
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in X. This parameter
does not influence whether y can have np.inf, np.nan, pd.NA values.
The possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan or pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
ensure_2d : bool, default=True
Whether to raise a value error if X is not 2D.
allow_nd : bool, default=False
Whether to allow X.ndim > 2.
multi_output : bool, default=False
Whether to allow 2D y (array or sparse matrix). If false, y will be
validated as a vector. y cannot have np.nan or np.inf values if
multi_output=True.
ensure_min_samples : int, default=1
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int, default=1
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : bool, default=False
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
estimator : str or estimator instance, default=None
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
Examples
--------
>>> from sklearn.utils.validation import check_X_y
>>> X = [[1, 2], [3, 4], [5, 6]]
>>> y = [1, 2, 3]
>>> X, y = check_X_y(X, y)
>>> X
array([[1, 2],
[3, 4],
[5, 6]])
>>> y
array([1, 2, 3])
"""
if y is None:
if estimator is None:
estimator_name = "estimator"
else:
estimator_name = _check_estimator_name(estimator)
raise ValueError(
f"{estimator_name} requires y to be passed, but the target y is None"
)
X = check_array(
X,
accept_sparse=accept_sparse,
accept_large_sparse=accept_large_sparse,
dtype=dtype,
order=order,
copy=copy,
force_all_finite=force_all_finite,
ensure_2d=ensure_2d,
allow_nd=allow_nd,
ensure_min_samples=ensure_min_samples,
ensure_min_features=ensure_min_features,
estimator=estimator,
input_name="X",
)
y = _check_y(y, multi_output=multi_output, y_numeric=y_numeric, estimator=estimator)
check_consistent_length(X, y)
return X, y
| (X, y, accept_sparse=False, *, accept_large_sparse=True, dtype='numeric', order=None, copy=False, force_all_finite=True, ensure_2d=True, allow_nd=False, multi_output=False, ensure_min_samples=1, ensure_min_features=1, y_numeric=False, estimator=None) |
18,102 | sklearn.utils.validation | check_array | Input validation on an array, list, sparse matrix or similar.
By default, the input is checked to be a non-empty 2D array containing
only finite values. If the dtype of the array is object, attempt
converting to float, raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : str, bool or list/tuple of str, default=False
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
accept_large_sparse : bool, default=True
If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by
accept_sparse, accept_large_sparse=False will cause it to be accepted
only if its indices are stored with a 32-bit dtype.
.. versionadded:: 0.20
dtype : 'numeric', type, list of type or None, default='numeric'
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : {'F', 'C'} or None, default=None
Whether an array will be forced to be fortran or c-style.
When order is None (default), then if copy=False, nothing is ensured
about the memory layout of the output array; otherwise (copy=True)
the memory layout of the returned array is kept as close as possible
to the original array.
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
ensure_2d : bool, default=True
Whether to raise a value error if array is not 2D.
allow_nd : bool, default=False
Whether to allow array.ndim > 2.
ensure_min_samples : int, default=1
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int, default=1
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
estimator : str or estimator instance, default=None
If passed, include the name of the estimator in warning messages.
input_name : str, default=""
The data name used to construct the error message. In particular
if `input_name` is "X" and the data has NaN values and
allow_nan is False, the error message will link to the imputer
documentation.
.. versionadded:: 1.1.0
Returns
-------
array_converted : object
The converted and validated array.
Examples
--------
>>> from sklearn.utils.validation import check_array
>>> X = [[1, 2, 3], [4, 5, 6]]
>>> X_checked = check_array(X)
>>> X_checked
array([[1, 2, 3], [4, 5, 6]])
| def check_array(
array,
accept_sparse=False,
*,
accept_large_sparse=True,
dtype="numeric",
order=None,
copy=False,
force_all_finite=True,
ensure_2d=True,
allow_nd=False,
ensure_min_samples=1,
ensure_min_features=1,
estimator=None,
input_name="",
):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is checked to be a non-empty 2D array containing
only finite values. If the dtype of the array is object, attempt
converting to float, raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : str, bool or list/tuple of str, default=False
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
accept_large_sparse : bool, default=True
If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by
accept_sparse, accept_large_sparse=False will cause it to be accepted
only if its indices are stored with a 32-bit dtype.
.. versionadded:: 0.20
dtype : 'numeric', type, list of type or None, default='numeric'
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : {'F', 'C'} or None, default=None
Whether an array will be forced to be fortran or c-style.
When order is None (default), then if copy=False, nothing is ensured
about the memory layout of the output array; otherwise (copy=True)
the memory layout of the returned array is kept as close as possible
to the original array.
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
ensure_2d : bool, default=True
Whether to raise a value error if array is not 2D.
allow_nd : bool, default=False
Whether to allow array.ndim > 2.
ensure_min_samples : int, default=1
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int, default=1
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
estimator : str or estimator instance, default=None
If passed, include the name of the estimator in warning messages.
input_name : str, default=""
The data name used to construct the error message. In particular
if `input_name` is "X" and the data has NaN values and
allow_nan is False, the error message will link to the imputer
documentation.
.. versionadded:: 1.1.0
Returns
-------
array_converted : object
The converted and validated array.
Examples
--------
>>> from sklearn.utils.validation import check_array
>>> X = [[1, 2, 3], [4, 5, 6]]
>>> X_checked = check_array(X)
>>> X_checked
array([[1, 2, 3], [4, 5, 6]])
"""
if isinstance(array, np.matrix):
raise TypeError(
"np.matrix is not supported. Please convert to a numpy array with "
"np.asarray. For more information see: "
"https://numpy.org/doc/stable/reference/generated/numpy.matrix.html"
)
xp, is_array_api_compliant = get_namespace(array)
# store reference to original array to check if copy is needed when
# function returns
array_orig = array
# store whether originally we wanted numeric dtype
dtype_numeric = isinstance(dtype, str) and dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not is_array_api_compliant and not hasattr(dtype_orig, "kind"):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
# check if the object contains several dtypes (typically a pandas
# DataFrame), and store them. If not, store None.
dtypes_orig = None
pandas_requires_conversion = False
# track if we have a Series-like object to raise a better error message
type_if_series = None
if hasattr(array, "dtypes") and hasattr(array.dtypes, "__array__"):
# throw warning if columns are sparse. If all columns are sparse, then
# array.sparse exists and sparsity will be preserved (later).
with suppress(ImportError):
from pandas import SparseDtype
def is_sparse(dtype):
return isinstance(dtype, SparseDtype)
if not hasattr(array, "sparse") and array.dtypes.apply(is_sparse).any():
warnings.warn(
"pandas.DataFrame with sparse columns found."
"It will be converted to a dense numpy array."
)
dtypes_orig = list(array.dtypes)
pandas_requires_conversion = any(
_pandas_dtype_needs_early_conversion(i) for i in dtypes_orig
)
if all(isinstance(dtype_iter, np.dtype) for dtype_iter in dtypes_orig):
dtype_orig = np.result_type(*dtypes_orig)
elif pandas_requires_conversion and any(d == object for d in dtypes_orig):
# Force object if any of the dtypes is an object
dtype_orig = object
elif (_is_extension_array_dtype(array) or hasattr(array, "iloc")) and hasattr(
array, "dtype"
):
# array is a pandas series
type_if_series = type(array)
pandas_requires_conversion = _pandas_dtype_needs_early_conversion(array.dtype)
if isinstance(array.dtype, np.dtype):
dtype_orig = array.dtype
else:
# Set to None to let array.astype work out the best dtype
dtype_orig = None
if dtype_numeric:
if (
dtype_orig is not None
and hasattr(dtype_orig, "kind")
and dtype_orig.kind == "O"
):
# if input is object, convert to float.
dtype = xp.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if pandas_requires_conversion:
# pandas dataframe requires conversion earlier to handle extension dtypes with
# nans
# Use the original dtype for conversion if dtype is None
new_dtype = dtype_orig if dtype is None else dtype
array = array.astype(new_dtype)
# Since we converted here, we do not need to convert again later
dtype = None
if force_all_finite not in (True, False, "allow-nan"):
raise ValueError(
'force_all_finite should be a bool or "allow-nan". Got {!r} instead'.format(
force_all_finite
)
)
if dtype is not None and _is_numpy_namespace(xp):
# convert to dtype object to conform to Array API to be use `xp.isdtype` later
dtype = np.dtype(dtype)
estimator_name = _check_estimator_name(estimator)
context = " by %s" % estimator_name if estimator is not None else ""
# When all dataframe columns are sparse, convert to a sparse array
if hasattr(array, "sparse") and array.ndim > 1:
with suppress(ImportError):
from pandas import SparseDtype # noqa: F811
def is_sparse(dtype):
return isinstance(dtype, SparseDtype)
if array.dtypes.apply(is_sparse).all():
# DataFrame.sparse only supports `to_coo`
array = array.sparse.to_coo()
if array.dtype == np.dtype("object"):
unique_dtypes = set([dt.subtype.name for dt in array_orig.dtypes])
if len(unique_dtypes) > 1:
raise ValueError(
"Pandas DataFrame with mixed sparse extension arrays "
"generated a sparse matrix with object dtype which "
"can not be converted to a scipy sparse matrix."
"Sparse extension arrays should all have the same "
"numeric type."
)
if sp.issparse(array):
_ensure_no_complex_data(array)
array = _ensure_sparse_format(
array,
accept_sparse=accept_sparse,
dtype=dtype,
copy=copy,
force_all_finite=force_all_finite,
accept_large_sparse=accept_large_sparse,
estimator_name=estimator_name,
input_name=input_name,
)
else:
# If np.array(..) gives ComplexWarning, then we convert the warning
# to an error. This is needed because specifying a non complex
# dtype to the function converts complex to real dtype,
# thereby passing the test made in the lines following the scope
# of warnings context manager.
with warnings.catch_warnings():
try:
warnings.simplefilter("error", ComplexWarning)
if dtype is not None and xp.isdtype(dtype, "integral"):
# Conversion float -> int should not contain NaN or
# inf (numpy#14412). We cannot use casting='safe' because
# then conversion float -> int would be disallowed.
array = _asarray_with_order(array, order=order, xp=xp)
if xp.isdtype(array.dtype, ("real floating", "complex floating")):
_assert_all_finite(
array,
allow_nan=False,
msg_dtype=dtype,
estimator_name=estimator_name,
input_name=input_name,
)
array = xp.astype(array, dtype, copy=False)
else:
array = _asarray_with_order(array, order=order, dtype=dtype, xp=xp)
except ComplexWarning as complex_warning:
raise ValueError(
"Complex data not supported\n{}\n".format(array)
) from complex_warning
# It is possible that the np.array(..) gave no warning. This happens
# when no dtype conversion happened, for example dtype = None. The
# result is that np.array(..) produces an array of complex dtype
# and we need to catch and raise exception for such cases.
_ensure_no_complex_data(array)
if ensure_2d:
# If input is scalar raise error
if array.ndim == 0:
raise ValueError(
"Expected 2D array, got scalar array instead:\narray={}.\n"
"Reshape your data either using array.reshape(-1, 1) if "
"your data has a single feature or array.reshape(1, -1) "
"if it contains a single sample.".format(array)
)
# If input is 1D raise error
if array.ndim == 1:
# If input is a Series-like object (eg. pandas Series or polars Series)
if type_if_series is not None:
msg = (
f"Expected a 2-dimensional container but got {type_if_series} "
"instead. Pass a DataFrame containing a single row (i.e. "
"single sample) or a single column (i.e. single feature) "
"instead."
)
else:
msg = (
f"Expected 2D array, got 1D array instead:\narray={array}.\n"
"Reshape your data either using array.reshape(-1, 1) if "
"your data has a single feature or array.reshape(1, -1) "
"if it contains a single sample."
)
raise ValueError(msg)
if dtype_numeric and hasattr(array.dtype, "kind") and array.dtype.kind in "USV":
raise ValueError(
"dtype='numeric' is not compatible with arrays of bytes/strings."
"Convert your data to numeric values explicitly instead."
)
if not allow_nd and array.ndim >= 3:
raise ValueError(
"Found array with dim %d. %s expected <= 2."
% (array.ndim, estimator_name)
)
if force_all_finite:
_assert_all_finite(
array,
input_name=input_name,
estimator_name=estimator_name,
allow_nan=force_all_finite == "allow-nan",
)
if copy:
if _is_numpy_namespace(xp):
# only make a copy if `array` and `array_orig` may share memory`
if np.may_share_memory(array, array_orig):
array = _asarray_with_order(
array, dtype=dtype, order=order, copy=True, xp=xp
)
else:
# always make a copy for non-numpy arrays
array = _asarray_with_order(
array, dtype=dtype, order=order, copy=True, xp=xp
)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError(
"Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required%s."
% (n_samples, array.shape, ensure_min_samples, context)
)
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError(
"Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required%s."
% (n_features, array.shape, ensure_min_features, context)
)
# With an input pandas dataframe or series, we know we can always make the
# resulting array writeable:
# - if copy=True, we have already made a copy so it is fine to make the
# array writeable
# - if copy=False, the caller is telling us explicitly that we can do
# in-place modifications
# See https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html#read-only-numpy-arrays
# for more details about pandas copy-on-write mechanism, that is enabled by
# default in pandas 3.0.0.dev.
if _is_pandas_df_or_series(array_orig) and hasattr(array, "flags"):
array.flags.writeable = True
return array
| (array, accept_sparse=False, *, accept_large_sparse=True, dtype='numeric', order=None, copy=False, force_all_finite=True, ensure_2d=True, allow_nd=False, ensure_min_samples=1, ensure_min_features=1, estimator=None, input_name='') |
18,103 | sklearn.utils.multiclass | check_classification_targets | Ensure that target y is of a non-regression type.
Only the following target types (as defined in type_of_target) are allowed:
'binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences'
Parameters
----------
y : array-like
Target values.
| def check_classification_targets(y):
"""Ensure that target y is of a non-regression type.
Only the following target types (as defined in type_of_target) are allowed:
'binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences'
Parameters
----------
y : array-like
Target values.
"""
y_type = type_of_target(y, input_name="y")
if y_type not in [
"binary",
"multiclass",
"multiclass-multioutput",
"multilabel-indicator",
"multilabel-sequences",
]:
raise ValueError(
f"Unknown label type: {y_type}. Maybe you are trying to fit a "
"classifier, which expects discrete classes on a "
"regression target with continuous values."
)
| (y) |
18,104 | imodels.util.arguments | check_fit_arguments | Process arguments for fit and predict methods.
| def check_fit_arguments(model, X, y, feature_names):
"""Process arguments for fit and predict methods.
"""
if isinstance(model, ClassifierMixin):
model.classes_, y = np.unique(y, return_inverse=True) # deals with str inputs
check_classification_targets(y)
if feature_names is None:
if isinstance(X, pd.DataFrame):
model.feature_names_ = X.columns
elif isinstance(X, list):
model.feature_names_ = ['X' + str(i) for i in range(len(X[0]))]
else:
model.feature_names_ = ['X' + str(i) for i in range(X.shape[1])]
else:
model.feature_names_ = feature_names
if scipy.sparse.issparse(X):
X = X.toarray()
X, y = check_X_y(X, y)
_, model.n_features_in_ = X.shape
assert len(model.feature_names_) == model.n_features_in_, 'feature_names should be same size as X.shape[1]'
y = y.astype(float)
return X, y, model.feature_names_
| (model, X, y, feature_names) |
18,105 | sklearn.utils.validation | check_is_fitted | Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
fitted attributes (ending with a trailing underscore) and otherwise
raises a NotFittedError with the given message.
If an estimator does not set any attributes with a trailing underscore, it
can define a ``__sklearn_is_fitted__`` method returning a boolean to
specify if the estimator is fitted or not. See
:ref:`sphx_glr_auto_examples_developing_estimators_sklearn_is_fitted.py`
for an example on how to use the API.
Parameters
----------
estimator : estimator instance
Estimator instance for which the check is performed.
attributes : str, list or tuple of str, default=None
Attribute name(s) given as string or a list/tuple of strings
Eg.: ``["coef_", "estimator_", ...], "coef_"``
If `None`, `estimator` is considered fitted if there exist an
attribute that ends with a underscore and does not start with double
underscore.
msg : str, default=None
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this
estimator."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default=all
Specify whether all or any of the given attributes must exist.
Raises
------
TypeError
If the estimator is a class or not an estimator instance
NotFittedError
If the attributes are not found.
Examples
--------
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.utils.validation import check_is_fitted
>>> from sklearn.exceptions import NotFittedError
>>> lr = LogisticRegression()
>>> try:
... check_is_fitted(lr)
... except NotFittedError as exc:
... print(f"Model is not fitted yet.")
Model is not fitted yet.
>>> lr.fit([[1, 2], [1, 3]], [1, 0])
LogisticRegression()
>>> check_is_fitted(lr)
| def check_is_fitted(estimator, attributes=None, *, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
fitted attributes (ending with a trailing underscore) and otherwise
raises a NotFittedError with the given message.
If an estimator does not set any attributes with a trailing underscore, it
can define a ``__sklearn_is_fitted__`` method returning a boolean to
specify if the estimator is fitted or not. See
:ref:`sphx_glr_auto_examples_developing_estimators_sklearn_is_fitted.py`
for an example on how to use the API.
Parameters
----------
estimator : estimator instance
Estimator instance for which the check is performed.
attributes : str, list or tuple of str, default=None
Attribute name(s) given as string or a list/tuple of strings
Eg.: ``["coef_", "estimator_", ...], "coef_"``
If `None`, `estimator` is considered fitted if there exist an
attribute that ends with a underscore and does not start with double
underscore.
msg : str, default=None
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this
estimator."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default=all
Specify whether all or any of the given attributes must exist.
Raises
------
TypeError
If the estimator is a class or not an estimator instance
NotFittedError
If the attributes are not found.
Examples
--------
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.utils.validation import check_is_fitted
>>> from sklearn.exceptions import NotFittedError
>>> lr = LogisticRegression()
>>> try:
... check_is_fitted(lr)
... except NotFittedError as exc:
... print(f"Model is not fitted yet.")
Model is not fitted yet.
>>> lr.fit([[1, 2], [1, 3]], [1, 0])
LogisticRegression()
>>> check_is_fitted(lr)
"""
if isclass(estimator):
raise TypeError("{} is a class, not an instance.".format(estimator))
if msg is None:
msg = (
"This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this estimator."
)
if not hasattr(estimator, "fit"):
raise TypeError("%s is not an estimator instance." % (estimator))
if not _is_fitted(estimator, attributes, all_or_any):
raise NotFittedError(msg % {"name": type(estimator).__name__})
| (estimator, attributes=None, *, msg=None, all_or_any=<built-in function all>) |
18,106 | copy | deepcopy | Deep copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
| def deepcopy(x, memo=None, _nil=[]):
"""Deep copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
if memo is None:
memo = {}
d = id(x)
y = memo.get(d, _nil)
if y is not _nil:
return y
cls = type(x)
copier = _deepcopy_dispatch.get(cls)
if copier is not None:
y = copier(x, memo)
else:
if issubclass(cls, type):
y = _deepcopy_atomic(x, memo)
else:
copier = getattr(x, "__deepcopy__", None)
if copier is not None:
y = copier(memo)
else:
reductor = dispatch_table.get(cls)
if reductor:
rv = reductor(x)
else:
reductor = getattr(x, "__reduce_ex__", None)
if reductor is not None:
rv = reductor(4)
else:
reductor = getattr(x, "__reduce__", None)
if reductor:
rv = reductor()
else:
raise Error(
"un(deep)copyable object of type %s" % cls)
if isinstance(rv, str):
y = x
else:
y = _reconstruct(x, memo, *rv)
# If is its own copy, don't memoize.
if y is not x:
memo[d] = y
_keep_alive(x, memo) # Make sure x lives at least as long as d
return y
| (x, memo=None, _nil=[]) |
18,107 | imodels.util.convert | dict_to_rule |
Function to accept rule dict and convert to Rule object
Parameters:
rule: list of dict of schema
[
{
'feature': int,
'operator': str,
'value': float
},
]
| def dict_to_rule(rule, clf_feature_dict):
"""
Function to accept rule dict and convert to Rule object
Parameters:
rule: list of dict of schema
[
{
'feature': int,
'operator': str,
'value': float
},
]
"""
output = ''
for condition in rule:
output += '{} {} {} and '.format(
clf_feature_dict[int(condition['feature'])],
condition['operator'],
condition['pivot']
)
return output[:-5]
| (rule, clf_feature_dict) |
18,110 | imodels.util.explain_errors | explain_classification_errors | Explains the classification errors of a model by fitting an interpretable model to them.
Currently only supports binary classification.
Parameters
----------
X: array_like
(n, n_features)
predictions: array_like
(n, 1) predictions
y
(n, 1) targets with integer values representing class
feature_names
n_features
Returns
-------
model: BaseEstimator
| def explain_classification_errors(X, predictions, y,
feature_names: list = None,
target_name: str = None,
classifier: BaseEstimator = imodels.GreedyTreeClassifier(),
target_one_hot_encode: bool = False,
print_rules: bool = True):
"""Explains the classification errors of a model by fitting an interpretable model to them.
Currently only supports binary classification.
Parameters
----------
X: array_like
(n, n_features)
predictions: array_like
(n, 1) predictions
y
(n, 1) targets with integer values representing class
feature_names
n_features
Returns
-------
model: BaseEstimator
"""
# deal with names
if feature_names is None:
if isinstance(X, pd.DataFrame):
feature_names = X.columns.tolist()
else:
feature_names = [f'X{i + 1}' for i in range(X.shape[1])]
if target_name is None:
if isinstance(y, pd.DataFrame):
target_name = y.columns[0]
elif isinstance(y, pd.Series):
target_name = y.name
else:
target_name = 'target'
if isinstance(predictions, pd.Series) or isinstance(predictions, pd.DataFrame):
predictions = predictions.values
X, y = check_X_y(X, y) # converts to np
if len(y.shape) == 1:
y = y.reshape(-1, 1)
if len(predictions.shape) == 1:
predictions = predictions.reshape(-1, 1)
errors = np.array(predictions != y).astype(int)
features = pd.DataFrame(np.hstack((X, y)))
features.columns = [*feature_names, target_name]
classifier.fit(features, errors.flatten())
if print_rules:
print(classifier)
return classifier, features.columns
| (X, predictions, y, feature_names: Optional[list] = None, target_name: Optional[str] = None, classifier: sklearn.base.BaseEstimator = GreedyTreeClassifier(), target_one_hot_encode: bool = False, print_rules: bool = True) |
18,111 | imodels.util.data_util | get_clean_dataset | Fetch clean data (as numpy arrays) from various sources including imodels, pmlb, openml, and sklearn.
If data is not downloaded, will download and cache. Otherwise will load locally.
Cleans features so that they are type float and features names don't start with a digit.
Parameters
----------
dataset_name: str
Checks for unique identifier in imodels.util.data_util.DSET_KWARGS
Otherwise, unique dataset identifier (see https://github.com/csinva/imodels-data for unique identifiers)
data_source: str
options: 'imodels', 'pmlb', 'sklearn', 'openml', 'synthetic'
data_path: str
path to load/save data (default: 'data')
test_size: float, optional
if not None, will split data into train and test sets (with fraction test_size in test set)
& change the return signature to `X_train, X_test, y_train, y_test, feature_names`
random_state: int, optional
if test_size is not None, will use this random state to split data
Returns
-------
X: np.ndarray
features
y: np.ndarray
outcome
feature_names: list
Example
-------
```
# download compas dataset from imodels
X, y, feature_names = imodels.get_clean_dataset('compas_two_year_clean', data_source='imodels')
# download ionosphere dataset from pmlb
X, y, feature_names = imodels.get_clean_dataset('ionosphere', data_source='pmlb')
# download liver dataset from openml
X, y, feature_names = imodels.get_clean_dataset('8', data_source='openml')
# download ca housing from sklearn
X, y, feature_names = imodels.get_clean_dataset('california_housing', data_source='sklearn')
```
| def get_clean_dataset(
dataset_name: str,
data_source: str = "imodels",
data_path=os.path.expanduser("~/cache_imodels_data"),
convertna: bool = True,
test_size: float = None,
random_state: int = 42,
verbose=True,
) -> Tuple[np.ndarray, np.ndarray, list]:
"""Fetch clean data (as numpy arrays) from various sources including imodels, pmlb, openml, and sklearn.
If data is not downloaded, will download and cache. Otherwise will load locally.
Cleans features so that they are type float and features names don't start with a digit.
Parameters
----------
dataset_name: str
Checks for unique identifier in imodels.util.data_util.DSET_KWARGS
Otherwise, unique dataset identifier (see https://github.com/csinva/imodels-data for unique identifiers)
data_source: str
options: 'imodels', 'pmlb', 'sklearn', 'openml', 'synthetic'
data_path: str
path to load/save data (default: 'data')
test_size: float, optional
if not None, will split data into train and test sets (with fraction test_size in test set)
& change the return signature to `X_train, X_test, y_train, y_test, feature_names`
random_state: int, optional
if test_size is not None, will use this random state to split data
Returns
-------
X: np.ndarray
features
y: np.ndarray
outcome
feature_names: list
Example
-------
```
# download compas dataset from imodels
X, y, feature_names = imodels.get_clean_dataset('compas_two_year_clean', data_source='imodels')
# download ionosphere dataset from pmlb
X, y, feature_names = imodels.get_clean_dataset('ionosphere', data_source='pmlb')
# download liver dataset from openml
X, y, feature_names = imodels.get_clean_dataset('8', data_source='openml')
# download ca housing from sklearn
X, y, feature_names = imodels.get_clean_dataset('california_housing', data_source='sklearn')
```
"""
if dataset_name in DSET_KWARGS:
if verbose:
data_source = DSET_KWARGS[dataset_name]["data_source"]
dataset_name = DSET_KWARGS[dataset_name]["dataset_name"]
print(f"fetching {dataset_name} from {data_source}")
assert data_source in ["imodels", "pmlb", "imodels-multitask", "sklearn", "openml", "synthetic"], (
data_source + " not correct"
)
if test_size is not None:
def _split(X, y, feature_names):
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=test_size, random_state=random_state
)
return X_train, X_test, y_train, y_test, feature_names
else:
def _split(X, y, feature_names):
return X, y, feature_names
if data_source == "imodels":
if not dataset_name.endswith("csv"):
dataset_name = dataset_name + ".csv"
if not os.path.isfile(dataset_name):
_download_imodels_dataset(dataset_name, data_path)
df = pd.read_csv(oj(data_path, "imodels_data", dataset_name))
X, y = df.iloc[:, :-1].values, df.iloc[:, -1].values
feature_names = df.columns.values[:-1]
if convertna:
X = np.nan_to_num(X.astype("float32"))
return _split(X, y, _clean_feat_names(feature_names))
elif data_source == 'imodels-multitask':
if not dataset_name.endswith("csv"):
dataset_name = dataset_name + ".csv"
if not os.path.isfile(dataset_name):
_download_imodels_multitask_dataset(dataset_name, data_path)
df = pd.read_csv(oj(data_path, "imodels_multitask_data", dataset_name))
target_cols = [col for col in df.columns if col.endswith('__target')]
feature_names = [col for col in df.columns if col not in target_cols]
X, y = df[feature_names].values, df[target_cols].values
if convertna:
X = np.nan_to_num(X.astype("float32"))
return _split(X, y, _clean_feat_names(feature_names))
elif data_source == "pmlb":
from pmlb import fetch_data
feature_names = list(
fetch_data(
dataset_name,
return_X_y=False,
local_cache_dir=oj(data_path, "pmlb_data"),
).columns
)
feature_names.remove("target")
X, y = fetch_data(
dataset_name, return_X_y=True, local_cache_dir=oj(data_path, "pmlb_data")
)
if (
np.unique(y).size == 2
): # if binary classification, ensure that the classes are 0 and 1
y -= np.min(y)
return _split(_clean_features(X), y, _clean_feat_names(feature_names))
elif data_source == "sklearn":
if dataset_name == "diabetes":
data = sklearn.datasets.load_diabetes()
elif dataset_name == "california_housing":
data = sklearn.datasets.fetch_california_housing(
data_home=oj(data_path, "sklearn_data")
)
elif dataset_name == "breast_cancer":
data = sklearn.datasets.load_breast_cancer()
return data["data"], data["target"], _clean_feat_names(data["feature_names"])
elif (
data_source == "openml"
): # note this api might change in newer sklearn - should give dataset-id not name
data = sklearn.datasets.fetch_openml(
data_id=dataset_name, data_home=oj(data_path, "openml_data"), parser="auto"
)
X, y, feature_names = (
data["data"],
data["target"],
_clean_feat_names(data["feature_names"]),
)
if isinstance(X, pd.DataFrame):
X = X.values
if isinstance(y, pd.Series):
y = y.values
y = _define_openml_outcomes(y, dataset_name)
return _split(_clean_features(X), y, _clean_feat_names(feature_names))
elif data_source == "synthetic":
if dataset_name == "friedman1":
X, y = sklearn.datasets.make_friedman1(
n_samples=200, n_features=10)
elif dataset_name == "friedman2":
X, y = sklearn.datasets.make_friedman2(n_samples=200)
elif dataset_name == "friedman3":
X, y = sklearn.datasets.make_friedman3(n_samples=200)
elif dataset_name == "radchenko_james":
X, y = make_rj()
elif dataset_name == "vo_pati":
X, y = make_vp()
return _split(X, y, ["X_" + str(i + 1) for i in range(X.shape[1])])
| (dataset_name: str, data_source: str = 'imodels', data_path='/root/cache_imodels_data', convertna: bool = True, test_size: Optional[float] = None, random_state: int = 42, verbose=True) -> Tuple[numpy.ndarray, numpy.ndarray, list] |
18,112 | imodels.util.rule | get_feature_dict | null | def get_feature_dict(num_features: int, feature_names: Iterable[str] = None) -> Dict[str, str]:
feature_dict = OrderedDict()
if feature_names is not None:
for i in range(num_features):
feature_dict[f'X_{i}'] = feature_names[i]
else:
for i in range(num_features):
feature_dict[f'X_{i}'] = f'X_{i}'
return feature_dict
| (num_features: int, feature_names: Optional[Iterable[str]] = None) -> Dict[str, str] |
18,113 | sklearn.preprocessing._data | normalize | Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : {'l1', 'l2', 'max'}, default='l2'
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : {0, 1}, default=1
Define axis used to normalize the data along. If 1, independently
normalize each sample, otherwise (if 0) normalize each feature.
copy : bool, default=True
If False, try to avoid a copy and normalize in place.
This is not guaranteed to always work in place; e.g. if the data is
a numpy array with an int dtype, a copy will be returned even with
copy=False.
return_norm : bool, default=False
Whether to return the computed norms.
Returns
-------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Normalized input X.
norms : ndarray of shape (n_samples, ) if axis=1 else (n_features, )
An array of norms along given axis for X.
When X is sparse, a NotImplementedError will be raised
for norm 'l1' or 'l2'.
See Also
--------
Normalizer : Performs normalization using the Transformer API
(e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`.
Examples
--------
>>> from sklearn.preprocessing import normalize
>>> X = [[-2, 1, 2], [-1, 0, 1]]
>>> normalize(X, norm="l1") # L1 normalization each row independently
array([[-0.4, 0.2, 0.4],
[-0.5, 0. , 0.5]])
>>> normalize(X, norm="l2") # L2 normalization each row independently
array([[-0.66..., 0.33..., 0.66...],
[-0.70..., 0. , 0.70...]])
| def scale(X, *, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis.
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to center and scale.
axis : {0, 1}, default=0
Axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : bool, default=True
If True, center the data before scaling.
with_std : bool, default=True
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : bool, default=True
If False, try to avoid a copy and scale in place.
This is not guaranteed to always work in place; e.g. if the data is
a numpy array with an int dtype, a copy will be returned even with
copy=False.
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
The transformed data.
See Also
--------
StandardScaler : Performs scaling to unit variance using the Transformer
API (e.g. as part of a preprocessing
:class:`~sklearn.pipeline.Pipeline`).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
NaNs are treated as missing values: disregarded to compute the statistics,
and maintained during the data transformation.
We use a biased estimator for the standard deviation, equivalent to
`numpy.std(x, ddof=0)`. Note that the choice of `ddof` is unlikely to
affect model performance.
For a comparison of the different scalers, transformers, and normalizers,
see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`.
.. warning:: Risk of data leak
Do not use :func:`~sklearn.preprocessing.scale` unless you know
what you are doing. A common mistake is to apply it to the entire data
*before* splitting into training and test sets. This will bias the
model evaluation because information would have leaked from the test
set to the training set.
In general, we recommend using
:class:`~sklearn.preprocessing.StandardScaler` within a
:ref:`Pipeline <pipeline>` in order to prevent most risks of data
leaking: `pipe = make_pipeline(StandardScaler(), LogisticRegression())`.
Examples
--------
>>> from sklearn.preprocessing import scale
>>> X = [[-2, 1, 2], [-1, 0, 1]]
>>> scale(X, axis=0) # scaling each column independently
array([[-1., 1., 1.],
[ 1., -1., -1.]])
>>> scale(X, axis=1) # scaling each row independently
array([[-1.37..., 0.39..., 0.98...],
[-1.22..., 0. , 1.22...]])
"""
X = check_array(
X,
accept_sparse="csc",
copy=copy,
ensure_2d=False,
estimator="the scale function",
dtype=FLOAT_DTYPES,
force_all_finite="allow-nan",
)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives."
)
if axis != 0:
raise ValueError(
"Can only scale sparse matrix on axis=0, got axis=%d" % axis
)
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.nanmean(X, axis)
if with_std:
scale_ = np.nanstd(X, axis)
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = np.nanmean(Xr, axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn(
"Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features."
)
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = np.nanmean(Xr, axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# subtract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn(
"Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. "
)
Xr -= mean_2
return X
| (X, norm='l2', *, axis=1, copy=True, return_norm=False) |
18,116 | imodels.util.rule | replace_feature_name | null | def replace_feature_name(rule: Rule, replace_dict: Dict[str, str]) -> Rule:
def replace(match):
return replace_dict[match.group(0)]
rule_replaced = copy.copy(rule)
rule_replaced.rule = re.sub('|'.join(r'\b%s\b' % re.escape(s) for s in replace_dict), replace, rule.rule)
replaced_agg_dict = {}
for feature, symbol in rule_replaced.agg_dict:
replaced_agg_dict[(replace_dict[feature], symbol)] = rule_replaced.agg_dict[(feature, symbol)]
rule_replaced.agg_dict = replaced_agg_dict
return rule_replaced
| (rule: imodels.util.rule.Rule, replace_dict: Dict[str, str]) -> imodels.util.rule.Rule |
18,120 | sklearn.model_selection._split | train_test_split | Split arrays or matrices into random train and test subsets.
Quick utility that wraps input validation,
``next(ShuffleSplit().split(X, y))``, and application to input data
into a single call for splitting (and optionally subsampling) data into a
one-liner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
test_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to include in the test split. If int, represents the
absolute number of test samples. If None, the value is set to the
complement of the train size. If ``train_size`` is also None, it will
be set to 0.25.
train_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int, RandomState instance or None, default=None
Controls the shuffling applied to the data before applying the split.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
shuffle : bool, default=True
Whether or not to shuffle the data before splitting. If shuffle=False
then stratify must be None.
stratify : array-like, default=None
If not None, data is split in a stratified fashion, using this as
the class labels.
Read more in the :ref:`User Guide <stratification>`.
Returns
-------
splitting : list, length=2 * len(arrays)
List containing train-test split of inputs.
.. versionadded:: 0.16
If the input is sparse, the output will be a
``scipy.sparse.csr_matrix``. Else, output type is the same as the
input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
>>> train_test_split(y, shuffle=False)
[[0, 1, 2], [3, 4]]
| def __repr__(self):
return _build_repr(self)
| (*arrays, test_size=None, train_size=None, random_state=None, shuffle=True, stratify=None) |
18,122 | imodels.util.convert | tree_to_code | Prints a tree with a single split
| def tree_to_code(clf, feature_names):
'''Prints a tree with a single split
'''
n_nodes = clf.tree_.node_count
children_left = clf.tree_.children_left
children_right = clf.tree_.children_right
feature = clf.tree_.feature
threshold = clf.tree_.threshold
node_depth = np.zeros(shape=n_nodes, dtype=np.int64)
is_leaves = np.zeros(shape=n_nodes, dtype=bool)
stack = [(0, 0)] # start with the root node id (0) and its depth (0)
s = ''
while len(stack) > 0:
# `pop` ensures each node is only visited once
node_id, depth = stack.pop()
node_depth[node_id] = depth
# If the left and right child of a node is not the same we have a split
# node
is_split_node = children_left[node_id] != children_right[node_id]
# If a split node, append left and right children and depth to `stack`
# so we can loop through them
if is_split_node:
stack.append((children_left[node_id], depth + 1))
stack.append((children_right[node_id], depth + 1))
else:
is_leaves[node_id] = True
# print("The binary tree structure has {n} nodes and has "
# "the following tree structure:\n".format(n=n_nodes))
for i in range(n_nodes):
if is_leaves[i]:
pass
# print("{space}node={node} is a leaf node.".format(
# space=node_depth[i] * "\t", node=i))
else:
s += f"{feature_names[feature[i]]} <= {threshold[i]}"
return f"\033[96m{s}\033[00m\n"
| (clf, feature_names) |
18,123 | imodels.util.convert | tree_to_rules |
Return a list of rules from a tree
Parameters
----------
tree : Decision Tree Classifier/Regressor
feature_names: list of variable names
Returns
-------
rules : list of rules.
| def tree_to_rules(tree: Union[DecisionTreeClassifier, DecisionTreeRegressor],
feature_names: List[str],
prediction_values: bool = False, round_thresholds=True) -> List[str]:
"""
Return a list of rules from a tree
Parameters
----------
tree : Decision Tree Classifier/Regressor
feature_names: list of variable names
Returns
-------
rules : list of rules.
"""
# XXX todo: check the case where tree is build on subset of features,
# ie max_features != None
tree_ = tree.tree_
feature_name = [
feature_names[i] if i != _tree.TREE_UNDEFINED else "undefined!"
for i in tree_.feature
]
rules = []
def recurse(node, base_name):
if tree_.feature[node] != _tree.TREE_UNDEFINED:
name = feature_name[node]
symbol = '<='
symbol2 = '>'
threshold = tree_.threshold[node]
if round_thresholds:
threshold = np.round(threshold, decimals=5)
text = base_name + ["{} {} {}".format(name, symbol, threshold)]
recurse(tree_.children_left[node], text)
text = base_name + ["{} {} {}".format(name, symbol2,
threshold)]
recurse(tree_.children_right[node], text)
else:
rule = str.join(' and ', base_name)
rule = (rule if rule != ''
else ' == '.join([feature_names[0]] * 2))
# a rule selecting all is set to "c0==c0"
if prediction_values:
rules.append((rule, tree_.value[node][0].tolist()))
else:
rules.append(rule)
recurse(0, [])
return rules if len(rules) > 0 else 'True'
| (tree: Union[sklearn.tree._classes.DecisionTreeClassifier, sklearn.tree._classes.DecisionTreeRegressor], feature_names: List[str], prediction_values: bool = False, round_thresholds=True) -> List[str] |
18,124 | sklearn.utils.multiclass | unique_labels | Extract an ordered array of unique labels.
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes
Label values.
Returns
-------
out : ndarray of shape (n_unique_labels,)
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
| def unique_labels(*ys):
"""Extract an ordered array of unique labels.
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes
Label values.
Returns
-------
out : ndarray of shape (n_unique_labels,)
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
xp, is_array_api_compliant = get_namespace(*ys)
if not ys:
raise ValueError("No argument has been passed.")
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == {"binary", "multiclass"}:
ys_types = {"multiclass"}
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (
label_type == "multilabel-indicator"
and len(
set(
check_array(y, accept_sparse=["csr", "csc", "coo"]).shape[1] for y in ys
)
)
> 1
):
raise ValueError(
"Multi-label binary indicator input with different numbers of labels"
)
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
if is_array_api_compliant:
# array_api does not allow for mixed dtypes
unique_ys = xp.concat([_unique_labels(y) for y in ys])
return xp.unique_values(unique_ys)
ys_labels = set(chain.from_iterable((i for i in _unique_labels(y)) for y in ys))
# Check that we don't mix string type with number type
if len(set(isinstance(label, str) for label in ys_labels)) > 1:
raise ValueError("Mix of label input types (string and number)")
return xp.asarray(sorted(ys_labels))
| (*ys) |
18,126 | jsondiff | CompactJsonDiffSyntax | null | class CompactJsonDiffSyntax(object):
def emit_set_diff(self, a, b, s, added, removed):
if s == 0.0 or len(removed) == len(a):
return {replace: b} if isinstance(b, dict) else b
else:
d = {}
if removed:
d[discard] = removed
if added:
d[add] = added
return d
def emit_list_diff(self, a, b, s, inserted, changed, deleted):
if s == 0.0:
return {replace: b} if isinstance(b, dict) else b
elif s == 1.0:
return {}
else:
d = changed
if inserted:
d[insert] = inserted
if deleted:
d[delete] = [pos for pos, value in deleted]
return d
def emit_dict_diff(self, a, b, s, added, changed, removed):
if s == 0.0:
return {replace: b} if isinstance(b, dict) else b
elif s == 1.0:
return {}
else:
changed.update(added)
if removed:
changed[delete] = list(removed.keys())
return changed
def emit_value_diff(self, a, b, s):
if s == 1.0:
return {}
else:
return {replace: b} if isinstance(b, dict) else b
def patch(self, a, d):
if isinstance(d, dict):
if not d:
return a
if replace in d:
return d[replace]
if isinstance(a, dict):
a = dict(a)
for k, v in d.items():
if k is delete:
for kdel in v:
del a[kdel]
else:
av = a.get(k, missing)
if av is missing:
a[k] = v
else:
a[k] = self.patch(av, v)
return a
elif isinstance(a, (list, tuple)):
original_type = type(a)
a = list(a)
if delete in d:
for pos in d[delete]:
a.pop(pos)
if insert in d:
for pos, value in d[insert]:
a.insert(pos, value)
for k, v in d.items():
if k is not delete and k is not insert:
k = int(k)
a[k] = self.patch(a[k], v)
if original_type is not list:
a = original_type(a)
return a
elif isinstance(a, set):
a = set(a)
if discard in d:
for x in d[discard]:
a.discard(x)
if add in d:
for x in d[add]:
a.add(x)
return a
return d
| () |
18,127 | jsondiff | emit_dict_diff | null | def emit_dict_diff(self, a, b, s, added, changed, removed):
if s == 0.0:
return {replace: b} if isinstance(b, dict) else b
elif s == 1.0:
return {}
else:
changed.update(added)
if removed:
changed[delete] = list(removed.keys())
return changed
| (self, a, b, s, added, changed, removed) |
18,128 | jsondiff | emit_list_diff | null | def emit_list_diff(self, a, b, s, inserted, changed, deleted):
if s == 0.0:
return {replace: b} if isinstance(b, dict) else b
elif s == 1.0:
return {}
else:
d = changed
if inserted:
d[insert] = inserted
if deleted:
d[delete] = [pos for pos, value in deleted]
return d
| (self, a, b, s, inserted, changed, deleted) |
18,129 | jsondiff | emit_set_diff | null | def emit_set_diff(self, a, b, s, added, removed):
if s == 0.0 or len(removed) == len(a):
return {replace: b} if isinstance(b, dict) else b
else:
d = {}
if removed:
d[discard] = removed
if added:
d[add] = added
return d
| (self, a, b, s, added, removed) |
18,130 | jsondiff | emit_value_diff | null | def emit_value_diff(self, a, b, s):
if s == 1.0:
return {}
else:
return {replace: b} if isinstance(b, dict) else b
| (self, a, b, s) |
18,131 | jsondiff | patch | null | def patch(self, a, d):
if isinstance(d, dict):
if not d:
return a
if replace in d:
return d[replace]
if isinstance(a, dict):
a = dict(a)
for k, v in d.items():
if k is delete:
for kdel in v:
del a[kdel]
else:
av = a.get(k, missing)
if av is missing:
a[k] = v
else:
a[k] = self.patch(av, v)
return a
elif isinstance(a, (list, tuple)):
original_type = type(a)
a = list(a)
if delete in d:
for pos in d[delete]:
a.pop(pos)
if insert in d:
for pos, value in d[insert]:
a.insert(pos, value)
for k, v in d.items():
if k is not delete and k is not insert:
k = int(k)
a[k] = self.patch(a[k], v)
if original_type is not list:
a = original_type(a)
return a
elif isinstance(a, set):
a = set(a)
if discard in d:
for x in d[discard]:
a.discard(x)
if add in d:
for x in d[add]:
a.add(x)
return a
return d
| (self, a, d) |
18,132 | jsondiff | ExplicitJsonDiffSyntax | null | class ExplicitJsonDiffSyntax(object):
def emit_set_diff(self, a, b, s, added, removed):
if s == 0.0 or len(removed) == len(a):
return b
else:
d = {}
if removed:
d[discard] = removed
if added:
d[add] = added
return d
def emit_list_diff(self, a, b, s, inserted, changed, deleted):
if s == 0.0:
return b
elif s == 1.0:
return {}
else:
d = changed
if inserted:
d[insert] = inserted
if deleted:
d[delete] = [pos for pos, value in deleted]
return d
def emit_dict_diff(self, a, b, s, added, changed, removed):
if s == 0.0:
return b
elif s == 1.0:
return {}
else:
d = {}
if added:
d[insert] = added
if changed:
d[update] = changed
if removed:
d[delete] = list(removed.keys())
return d
def emit_value_diff(self, a, b, s):
if s == 1.0:
return {}
else:
return b
| () |
18,133 | jsondiff | emit_dict_diff | null | def emit_dict_diff(self, a, b, s, added, changed, removed):
if s == 0.0:
return b
elif s == 1.0:
return {}
else:
d = {}
if added:
d[insert] = added
if changed:
d[update] = changed
if removed:
d[delete] = list(removed.keys())
return d
| (self, a, b, s, added, changed, removed) |
18,134 | jsondiff | emit_list_diff | null | def emit_list_diff(self, a, b, s, inserted, changed, deleted):
if s == 0.0:
return b
elif s == 1.0:
return {}
else:
d = changed
if inserted:
d[insert] = inserted
if deleted:
d[delete] = [pos for pos, value in deleted]
return d
| (self, a, b, s, inserted, changed, deleted) |
18,135 | jsondiff | emit_set_diff | null | def emit_set_diff(self, a, b, s, added, removed):
if s == 0.0 or len(removed) == len(a):
return b
else:
d = {}
if removed:
d[discard] = removed
if added:
d[add] = added
return d
| (self, a, b, s, added, removed) |
18,136 | jsondiff | emit_value_diff | null | def emit_value_diff(self, a, b, s):
if s == 1.0:
return {}
else:
return b
| (self, a, b, s) |
18,137 | jsondiff | JsonDiffSyntax | null | class JsonDiffSyntax(object):
def emit_set_diff(self, a, b, s, added, removed):
raise NotImplementedError()
def emit_list_diff(self, a, b, s, inserted, changed, deleted):
raise NotImplementedError()
def emit_dict_diff(self, a, b, s, added, changed, removed):
raise NotImplementedError()
def emit_value_diff(self, a, b, s):
raise NotImplementedError()
def patch(self, a, d):
raise NotImplementedError()
def unpatch(self, a, d):
raise NotImplementedError()
| () |
18,138 | jsondiff | emit_dict_diff | null | def emit_dict_diff(self, a, b, s, added, changed, removed):
raise NotImplementedError()
| (self, a, b, s, added, changed, removed) |
18,139 | jsondiff | emit_list_diff | null | def emit_list_diff(self, a, b, s, inserted, changed, deleted):
raise NotImplementedError()
| (self, a, b, s, inserted, changed, deleted) |
18,140 | jsondiff | emit_set_diff | null | def emit_set_diff(self, a, b, s, added, removed):
raise NotImplementedError()
| (self, a, b, s, added, removed) |
18,141 | jsondiff | emit_value_diff | null | def emit_value_diff(self, a, b, s):
raise NotImplementedError()
| (self, a, b, s) |
18,142 | jsondiff | patch | null | def patch(self, a, d):
raise NotImplementedError()
| (self, a, d) |
18,143 | jsondiff | unpatch | null | def unpatch(self, a, d):
raise NotImplementedError()
| (self, a, d) |
18,144 | jsondiff | JsonDiffer | null | class JsonDiffer(object):
class Options(object):
pass
def __init__(self, syntax='compact', load=False, dump=False, marshal=False,
loader=default_loader, dumper=default_dumper, escape_str='$'):
self.options = JsonDiffer.Options()
self.options.syntax = builtin_syntaxes.get(syntax, syntax)
self.options.load = load
self.options.dump = dump
self.options.marshal = marshal
self.options.loader = loader
self.options.dumper = dumper
self.options.escape_str = escape_str
self._symbol_map = {
escape_str + symbol.label: symbol
for symbol in _all_symbols_
}
def _list_diff_0(self, C, X, Y):
i, j = len(X), len(Y)
r = []
while True:
if i > 0 and j > 0:
d, s = self._obj_diff(X[i-1], Y[j-1])
if s > 0 and C[i][j] == C[i-1][j-1] + s:
r.append((0, d, j-1, s))
i, j = i - 1, j - 1
continue
if j > 0 and (i == 0 or C[i][j-1] >= C[i-1][j]):
r.append((1, Y[j-1], j-1, 0.0))
j = j - 1
continue
if i > 0 and (j == 0 or C[i][j-1] < C[i-1][j]):
r.append((-1, X[i-1], i-1, 0.0))
i = i - 1
continue
return reversed(r)
def _list_diff(self, X, Y):
# LCS
m = len(X)
n = len(Y)
# An (m+1) times (n+1) matrix
C = [[0 for j in range(n+1)] for i in range(m+1)]
for i in range(1, m+1):
for j in range(1, n+1):
_, s = self._obj_diff(X[i-1], Y[j-1])
# Following lines are part of the original LCS algorithm
# left in the code in case modification turns out to be problematic
#if X[i-1] == Y[j-1]:
# C[i][j] = C[i-1][j-1] + 1
#else:
C[i][j] = max(C[i][j-1], C[i-1][j], C[i-1][j-1] + s)
inserted = []
deleted = []
changed = {}
tot_s = 0.0
for sign, value, pos, s in self._list_diff_0(C, X, Y):
if sign == 1:
inserted.append((pos, value))
elif sign == -1:
deleted.insert(0, (pos, value))
elif sign == 0 and s < 1:
changed[pos] = value
tot_s += s
tot_n = len(X) + len(inserted)
if tot_n == 0:
s = 1.0
else:
s = tot_s / tot_n
return self.options.syntax.emit_list_diff(X, Y, s, inserted, changed, deleted), s
def _set_diff(self, a, b):
removed = a.difference(b)
added = b.difference(a)
if not removed and not added:
return {}, 1.0
ranking = sorted(
(
(self._obj_diff(x, y)[1], x, y)
for x in removed
for y in added
),
reverse=True,
key=lambda x: x[0]
)
r2 = set(removed)
a2 = set(added)
n_common = len(a) - len(removed)
s_common = float(n_common)
for s, x, y in ranking:
if x in r2 and y in a2:
r2.discard(x)
a2.discard(y)
s_common += s
n_common += 1
if not r2 or not a2:
break
n_tot = len(a) + len(added)
s = s_common / n_tot if n_tot != 0 else 1.0
return self.options.syntax.emit_set_diff(a, b, s, added, removed), s
def _dict_diff(self, a, b):
removed = {}
nremoved = 0
nadded = 0
nmatched = 0
smatched = 0.0
added = {}
changed = {}
for k, v in a.items():
w = b.get(k, missing)
if w is missing:
nremoved += 1
removed[k] = v
else:
nmatched += 1
d, s = self._obj_diff(v, w)
if s < 1.0:
changed[k] = d
smatched += 0.5 + 0.5 * s
for k, v in b.items():
if k not in a:
nadded += 1
added[k] = v
n_tot = nremoved + nmatched + nadded
s = smatched / n_tot if n_tot != 0 else 1.0
return self.options.syntax.emit_dict_diff(a, b, s, added, changed, removed), s
def _obj_diff(self, a, b):
if a is b:
return self.options.syntax.emit_value_diff(a, b, 1.0), 1.0
if isinstance(a, dict) and isinstance(b, dict):
return self._dict_diff(a, b)
elif isinstance(a, tuple) and isinstance(b, tuple):
return self._list_diff(a, b)
elif isinstance(a, list) and isinstance(b, list):
return self._list_diff(a, b)
elif isinstance(a, set) and isinstance(b, set):
return self._set_diff(a, b)
elif a != b:
return self.options.syntax.emit_value_diff(a, b, 0.0), 0.0
else:
return self.options.syntax.emit_value_diff(a, b, 1.0), 1.0
def diff(self, a, b, fp=None):
if self.options.load:
a = self.options.loader(a)
b = self.options.loader(b)
d, s = self._obj_diff(a, b)
if self.options.marshal or self.options.dump:
d = self.marshal(d)
if self.options.dump:
return self.options.dumper(d, fp)
else:
return d
def similarity(self, a, b):
if self.options.load:
a = self.options.loader(a)
b = self.options.loader(b)
d, s = self._obj_diff(a, b)
return s
def patch(self, a, d, fp=None):
if self.options.load:
a = self.options.loader(a)
d = self.options.loader(d)
if self.options.marshal or self.options.load:
d = self.unmarshal(d)
b = self.options.syntax.patch(a, d)
if self.options.dump:
return self.options.dumper(b, fp)
else:
return b
def unpatch(self, b, d, fp=None):
if self.options.load:
b = self.options.loader(b)
d = self.options.loader(d)
if self.options.marshal or self.options.load:
d = self.unmarshal(d)
a = self.options.syntax.unpatch(b, d)
if self.options.dump:
return self.options.dumper(a, fp)
else:
return a
def _unescape(self, x):
if isinstance(x, string_types):
sym = self._symbol_map.get(x, None)
if sym is not None:
return sym
if x.startswith(self.options.escape_str):
return x[1:]
return x
def unmarshal(self, d):
if isinstance(d, dict):
return {
self._unescape(k): self.unmarshal(v)
for k, v in d.items()
}
elif isinstance(d, (list, tuple)):
return type(d)(
self.unmarshal(x)
for x in d
)
else:
return self._unescape(d)
def _escape(self, o):
if type(o) is Symbol:
return self.options.escape_str + o.label
if isinstance(o, string_types) and o.startswith(self.options.escape_str):
return self.options.escape_str + o
return o
def marshal(self, d):
if isinstance(d, dict):
return {
self._escape(k): self.marshal(v)
for k, v in d.items()
}
elif isinstance(d, (list, tuple)):
return type(d)(
self.marshal(x)
for x in d
)
else:
return self._escape(d)
| (syntax='compact', load=False, dump=False, marshal=False, loader=<jsondiff.JsonLoader object at 0x7f5da7f7c0a0>, dumper=<jsondiff.JsonDumper object at 0x7f5da7f7c040>, escape_str='$') |
18,145 | jsondiff | __init__ | null | def __init__(self, syntax='compact', load=False, dump=False, marshal=False,
loader=default_loader, dumper=default_dumper, escape_str='$'):
self.options = JsonDiffer.Options()
self.options.syntax = builtin_syntaxes.get(syntax, syntax)
self.options.load = load
self.options.dump = dump
self.options.marshal = marshal
self.options.loader = loader
self.options.dumper = dumper
self.options.escape_str = escape_str
self._symbol_map = {
escape_str + symbol.label: symbol
for symbol in _all_symbols_
}
| (self, syntax='compact', load=False, dump=False, marshal=False, loader=<jsondiff.JsonLoader object at 0x7f5da7f7c0a0>, dumper=<jsondiff.JsonDumper object at 0x7f5da7f7c040>, escape_str='$') |
18,146 | jsondiff | _dict_diff | null | def _dict_diff(self, a, b):
removed = {}
nremoved = 0
nadded = 0
nmatched = 0
smatched = 0.0
added = {}
changed = {}
for k, v in a.items():
w = b.get(k, missing)
if w is missing:
nremoved += 1
removed[k] = v
else:
nmatched += 1
d, s = self._obj_diff(v, w)
if s < 1.0:
changed[k] = d
smatched += 0.5 + 0.5 * s
for k, v in b.items():
if k not in a:
nadded += 1
added[k] = v
n_tot = nremoved + nmatched + nadded
s = smatched / n_tot if n_tot != 0 else 1.0
return self.options.syntax.emit_dict_diff(a, b, s, added, changed, removed), s
| (self, a, b) |
18,147 | jsondiff | _escape | null | def _escape(self, o):
if type(o) is Symbol:
return self.options.escape_str + o.label
if isinstance(o, string_types) and o.startswith(self.options.escape_str):
return self.options.escape_str + o
return o
| (self, o) |
18,148 | jsondiff | _list_diff | null | def _list_diff(self, X, Y):
# LCS
m = len(X)
n = len(Y)
# An (m+1) times (n+1) matrix
C = [[0 for j in range(n+1)] for i in range(m+1)]
for i in range(1, m+1):
for j in range(1, n+1):
_, s = self._obj_diff(X[i-1], Y[j-1])
# Following lines are part of the original LCS algorithm
# left in the code in case modification turns out to be problematic
#if X[i-1] == Y[j-1]:
# C[i][j] = C[i-1][j-1] + 1
#else:
C[i][j] = max(C[i][j-1], C[i-1][j], C[i-1][j-1] + s)
inserted = []
deleted = []
changed = {}
tot_s = 0.0
for sign, value, pos, s in self._list_diff_0(C, X, Y):
if sign == 1:
inserted.append((pos, value))
elif sign == -1:
deleted.insert(0, (pos, value))
elif sign == 0 and s < 1:
changed[pos] = value
tot_s += s
tot_n = len(X) + len(inserted)
if tot_n == 0:
s = 1.0
else:
s = tot_s / tot_n
return self.options.syntax.emit_list_diff(X, Y, s, inserted, changed, deleted), s
| (self, X, Y) |
18,149 | jsondiff | _list_diff_0 | null | def _list_diff_0(self, C, X, Y):
i, j = len(X), len(Y)
r = []
while True:
if i > 0 and j > 0:
d, s = self._obj_diff(X[i-1], Y[j-1])
if s > 0 and C[i][j] == C[i-1][j-1] + s:
r.append((0, d, j-1, s))
i, j = i - 1, j - 1
continue
if j > 0 and (i == 0 or C[i][j-1] >= C[i-1][j]):
r.append((1, Y[j-1], j-1, 0.0))
j = j - 1
continue
if i > 0 and (j == 0 or C[i][j-1] < C[i-1][j]):
r.append((-1, X[i-1], i-1, 0.0))
i = i - 1
continue
return reversed(r)
| (self, C, X, Y) |
18,150 | jsondiff | _obj_diff | null | def _obj_diff(self, a, b):
if a is b:
return self.options.syntax.emit_value_diff(a, b, 1.0), 1.0
if isinstance(a, dict) and isinstance(b, dict):
return self._dict_diff(a, b)
elif isinstance(a, tuple) and isinstance(b, tuple):
return self._list_diff(a, b)
elif isinstance(a, list) and isinstance(b, list):
return self._list_diff(a, b)
elif isinstance(a, set) and isinstance(b, set):
return self._set_diff(a, b)
elif a != b:
return self.options.syntax.emit_value_diff(a, b, 0.0), 0.0
else:
return self.options.syntax.emit_value_diff(a, b, 1.0), 1.0
| (self, a, b) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.