code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def test_tuned_threshold_classifier_error_constant_predictor(): """Check that we raise a ValueError if the underlying classifier returns constant probabilities such that we cannot find any threshold. """ X, y = make_classification(random_state=0) estimator = DummyClassifier(strategy="constant", constant=1) tuned_model = TunedThresholdClassifierCV(estimator, response_method="predict_proba") err_msg = "The provided estimator makes constant predictions" with pytest.raises(ValueError, match=err_msg): tuned_model.fit(X, y)
Check that we raise a ValueError if the underlying classifier returns constant probabilities such that we cannot find any threshold.
test_tuned_threshold_classifier_error_constant_predictor
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_classification_threshold.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_classification_threshold.py
BSD-3-Clause
def test_fixed_threshold_classifier_equivalence_default(response_method): """Check that `FixedThresholdClassifier` has the same behaviour as the vanilla classifier. """ X, y = make_classification(random_state=0) classifier = LogisticRegression().fit(X, y) classifier_default_threshold = FixedThresholdClassifier( estimator=clone(classifier), response_method=response_method ) classifier_default_threshold.fit(X, y) # emulate the response method that should take into account the `pos_label` if response_method in ("auto", "predict_proba"): y_score = classifier_default_threshold.predict_proba(X)[:, 1] threshold = 0.5 else: # response_method == "decision_function" y_score = classifier_default_threshold.decision_function(X) threshold = 0.0 y_pred_lr = (y_score >= threshold).astype(int) assert_allclose(classifier_default_threshold.predict(X), y_pred_lr)
Check that `FixedThresholdClassifier` has the same behaviour as the vanilla classifier.
test_fixed_threshold_classifier_equivalence_default
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_classification_threshold.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_classification_threshold.py
BSD-3-Clause
def test_fixed_threshold_classifier(response_method, threshold, pos_label): """Check that applying `predict` lead to the same prediction as applying the threshold to the output of the response method. """ X, y = make_classification(n_samples=50, random_state=0) logistic_regression = LogisticRegression().fit(X, y) model = FixedThresholdClassifier( estimator=clone(logistic_regression), threshold=threshold, response_method=response_method, pos_label=pos_label, ).fit(X, y) # check that the underlying estimator is the same assert_allclose(model.estimator_.coef_, logistic_regression.coef_) # emulate the response method that should take into account the `pos_label` if response_method == "predict_proba": y_score = model.predict_proba(X)[:, pos_label] else: # response_method == "decision_function" y_score = model.decision_function(X) y_score = y_score if pos_label == 1 else -y_score # create a mapping from boolean values to class labels map_to_label = np.array([0, 1]) if pos_label == 1 else np.array([1, 0]) y_pred_lr = map_to_label[(y_score >= threshold).astype(int)] assert_allclose(model.predict(X), y_pred_lr) for method in ("predict_proba", "predict_log_proba", "decision_function"): assert_allclose( getattr(model, method)(X), getattr(logistic_regression, method)(X) ) assert_allclose( getattr(model.estimator_, method)(X), getattr(logistic_regression, method)(X), )
Check that applying `predict` lead to the same prediction as applying the threshold to the output of the response method.
test_fixed_threshold_classifier
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_classification_threshold.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_classification_threshold.py
BSD-3-Clause
def test_fixed_threshold_classifier_metadata_routing(): """Check that everything works with metadata routing.""" X, y = make_classification(random_state=0) sample_weight = np.ones_like(y) sample_weight[::2] = 2 classifier = LogisticRegression().set_fit_request(sample_weight=True) classifier.fit(X, y, sample_weight=sample_weight) classifier_default_threshold = FixedThresholdClassifier(estimator=clone(classifier)) classifier_default_threshold.fit(X, y, sample_weight=sample_weight) assert_allclose(classifier_default_threshold.estimator_.coef_, classifier.coef_)
Check that everything works with metadata routing.
test_fixed_threshold_classifier_metadata_routing
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_classification_threshold.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_classification_threshold.py
BSD-3-Clause
def test_fixed_threshold_classifier_fitted_estimator(method): """Check that if the underlying estimator is already fitted, no fit is required.""" X, y = make_classification(random_state=0) classifier = LogisticRegression().fit(X, y) fixed_threshold_classifier = FixedThresholdClassifier(estimator=classifier) # This should not raise an error getattr(fixed_threshold_classifier, method)(X)
Check that if the underlying estimator is already fitted, no fit is required.
test_fixed_threshold_classifier_fitted_estimator
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_classification_threshold.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_classification_threshold.py
BSD-3-Clause
def test_fixed_threshold_classifier_classes_(): """Check that the classes_ attribute is properly set.""" X, y = make_classification(random_state=0) with pytest.raises( AttributeError, match="The underlying estimator is not fitted yet." ): FixedThresholdClassifier(estimator=LogisticRegression()).classes_ classifier = LogisticRegression().fit(X, y) fixed_threshold_classifier = FixedThresholdClassifier(estimator=classifier) assert_array_equal(fixed_threshold_classifier.classes_, classifier.classes_)
Check that the classes_ attribute is properly set.
test_fixed_threshold_classifier_classes_
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_classification_threshold.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_classification_threshold.py
BSD-3-Clause
def test_curve_display_parameters_validation( pyplot, data, params, err_type, err_msg, CurveDisplay, specific_params ): """Check that we raise a proper error when passing invalid parameters.""" X, y = data estimator = DecisionTreeClassifier(random_state=0) with pytest.raises(err_type, match=err_msg): CurveDisplay.from_estimator(estimator, X, y, **specific_params, **params)
Check that we raise a proper error when passing invalid parameters.
test_curve_display_parameters_validation
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_plot.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_plot.py
BSD-3-Clause
def test_learning_curve_display_default_usage(pyplot, data): """Check the default usage of the LearningCurveDisplay class.""" X, y = data estimator = DecisionTreeClassifier(random_state=0) train_sizes = [0.3, 0.6, 0.9] display = LearningCurveDisplay.from_estimator( estimator, X, y, train_sizes=train_sizes ) import matplotlib as mpl assert display.errorbar_ is None assert isinstance(display.lines_, list) for line in display.lines_: assert isinstance(line, mpl.lines.Line2D) assert isinstance(display.fill_between_, list) for fill in display.fill_between_: assert isinstance(fill, mpl.collections.PolyCollection) assert fill.get_alpha() == 0.5 assert display.score_name == "Score" assert display.ax_.get_xlabel() == "Number of samples in the training set" assert display.ax_.get_ylabel() == "Score" _, legend_labels = display.ax_.get_legend_handles_labels() assert legend_labels == ["Train", "Test"] train_sizes_abs, train_scores, test_scores = learning_curve( estimator, X, y, train_sizes=train_sizes ) assert_array_equal(display.train_sizes, train_sizes_abs) assert_allclose(display.train_scores, train_scores) assert_allclose(display.test_scores, test_scores)
Check the default usage of the LearningCurveDisplay class.
test_learning_curve_display_default_usage
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_plot.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_plot.py
BSD-3-Clause
def test_validation_curve_display_default_usage(pyplot, data): """Check the default usage of the ValidationCurveDisplay class.""" X, y = data estimator = DecisionTreeClassifier(random_state=0) param_name, param_range = "max_depth", [1, 3, 5] display = ValidationCurveDisplay.from_estimator( estimator, X, y, param_name=param_name, param_range=param_range ) import matplotlib as mpl assert display.errorbar_ is None assert isinstance(display.lines_, list) for line in display.lines_: assert isinstance(line, mpl.lines.Line2D) assert isinstance(display.fill_between_, list) for fill in display.fill_between_: assert isinstance(fill, mpl.collections.PolyCollection) assert fill.get_alpha() == 0.5 assert display.score_name == "Score" assert display.ax_.get_xlabel() == f"{param_name}" assert display.ax_.get_ylabel() == "Score" _, legend_labels = display.ax_.get_legend_handles_labels() assert legend_labels == ["Train", "Test"] train_scores, test_scores = validation_curve( estimator, X, y, param_name=param_name, param_range=param_range ) assert_array_equal(display.param_range, param_range) assert_allclose(display.train_scores, train_scores) assert_allclose(display.test_scores, test_scores)
Check the default usage of the ValidationCurveDisplay class.
test_validation_curve_display_default_usage
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_plot.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_plot.py
BSD-3-Clause
def test_curve_display_negate_score(pyplot, data, CurveDisplay, specific_params): """Check the behaviour of the `negate_score` parameter calling `from_estimator` and `plot`. """ X, y = data estimator = DecisionTreeClassifier(max_depth=1, random_state=0) negate_score = False display = CurveDisplay.from_estimator( estimator, X, y, **specific_params, negate_score=negate_score ) positive_scores = display.lines_[0].get_data()[1] assert (positive_scores >= 0).all() assert display.ax_.get_ylabel() == "Score" negate_score = True display = CurveDisplay.from_estimator( estimator, X, y, **specific_params, negate_score=negate_score ) negative_scores = display.lines_[0].get_data()[1] assert (negative_scores <= 0).all() assert_allclose(negative_scores, -positive_scores) assert display.ax_.get_ylabel() == "Negative score" negate_score = False display = CurveDisplay.from_estimator( estimator, X, y, **specific_params, negate_score=negate_score ) assert display.ax_.get_ylabel() == "Score" display.plot(negate_score=not negate_score) assert display.ax_.get_ylabel() == "Score" assert (display.lines_[0].get_data()[1] < 0).all()
Check the behaviour of the `negate_score` parameter calling `from_estimator` and `plot`.
test_curve_display_negate_score
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_plot.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_plot.py
BSD-3-Clause
def test_curve_display_score_name( pyplot, data, score_name, ylabel, CurveDisplay, specific_params ): """Check that we can overwrite the default score name shown on the y-axis.""" X, y = data estimator = DecisionTreeClassifier(random_state=0) display = CurveDisplay.from_estimator( estimator, X, y, **specific_params, score_name=score_name ) assert display.ax_.get_ylabel() == ylabel X, y = data estimator = DecisionTreeClassifier(max_depth=1, random_state=0) display = CurveDisplay.from_estimator( estimator, X, y, **specific_params, score_name=score_name ) assert display.score_name == ylabel
Check that we can overwrite the default score name shown on the y-axis.
test_curve_display_score_name
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_plot.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_plot.py
BSD-3-Clause
def test_learning_curve_display_score_type(pyplot, data, std_display_style): """Check the behaviour of setting the `score_type` parameter.""" X, y = data estimator = DecisionTreeClassifier(random_state=0) train_sizes = [0.3, 0.6, 0.9] train_sizes_abs, train_scores, test_scores = learning_curve( estimator, X, y, train_sizes=train_sizes ) score_type = "train" display = LearningCurveDisplay.from_estimator( estimator, X, y, train_sizes=train_sizes, score_type=score_type, std_display_style=std_display_style, ) _, legend_label = display.ax_.get_legend_handles_labels() assert legend_label == ["Train"] if std_display_style is None: assert len(display.lines_) == 1 assert display.errorbar_ is None x_data, y_data = display.lines_[0].get_data() else: assert display.lines_ is None assert len(display.errorbar_) == 1 x_data, y_data = display.errorbar_[0].lines[0].get_data() assert_array_equal(x_data, train_sizes_abs) assert_allclose(y_data, train_scores.mean(axis=1)) score_type = "test" display = LearningCurveDisplay.from_estimator( estimator, X, y, train_sizes=train_sizes, score_type=score_type, std_display_style=std_display_style, ) _, legend_label = display.ax_.get_legend_handles_labels() assert legend_label == ["Test"] if std_display_style is None: assert len(display.lines_) == 1 assert display.errorbar_ is None x_data, y_data = display.lines_[0].get_data() else: assert display.lines_ is None assert len(display.errorbar_) == 1 x_data, y_data = display.errorbar_[0].lines[0].get_data() assert_array_equal(x_data, train_sizes_abs) assert_allclose(y_data, test_scores.mean(axis=1)) score_type = "both" display = LearningCurveDisplay.from_estimator( estimator, X, y, train_sizes=train_sizes, score_type=score_type, std_display_style=std_display_style, ) _, legend_label = display.ax_.get_legend_handles_labels() assert legend_label == ["Train", "Test"] if std_display_style is None: assert len(display.lines_) == 2 assert display.errorbar_ is None x_data_train, y_data_train = display.lines_[0].get_data() x_data_test, y_data_test = display.lines_[1].get_data() else: assert display.lines_ is None assert len(display.errorbar_) == 2 x_data_train, y_data_train = display.errorbar_[0].lines[0].get_data() x_data_test, y_data_test = display.errorbar_[1].lines[0].get_data() assert_array_equal(x_data_train, train_sizes_abs) assert_allclose(y_data_train, train_scores.mean(axis=1)) assert_array_equal(x_data_test, train_sizes_abs) assert_allclose(y_data_test, test_scores.mean(axis=1))
Check the behaviour of setting the `score_type` parameter.
test_learning_curve_display_score_type
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_plot.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_plot.py
BSD-3-Clause
def test_validation_curve_display_score_type(pyplot, data, std_display_style): """Check the behaviour of setting the `score_type` parameter.""" X, y = data estimator = DecisionTreeClassifier(random_state=0) param_name, param_range = "max_depth", [1, 3, 5] train_scores, test_scores = validation_curve( estimator, X, y, param_name=param_name, param_range=param_range ) score_type = "train" display = ValidationCurveDisplay.from_estimator( estimator, X, y, param_name=param_name, param_range=param_range, score_type=score_type, std_display_style=std_display_style, ) _, legend_label = display.ax_.get_legend_handles_labels() assert legend_label == ["Train"] if std_display_style is None: assert len(display.lines_) == 1 assert display.errorbar_ is None x_data, y_data = display.lines_[0].get_data() else: assert display.lines_ is None assert len(display.errorbar_) == 1 x_data, y_data = display.errorbar_[0].lines[0].get_data() assert_array_equal(x_data, param_range) assert_allclose(y_data, train_scores.mean(axis=1)) score_type = "test" display = ValidationCurveDisplay.from_estimator( estimator, X, y, param_name=param_name, param_range=param_range, score_type=score_type, std_display_style=std_display_style, ) _, legend_label = display.ax_.get_legend_handles_labels() assert legend_label == ["Test"] if std_display_style is None: assert len(display.lines_) == 1 assert display.errorbar_ is None x_data, y_data = display.lines_[0].get_data() else: assert display.lines_ is None assert len(display.errorbar_) == 1 x_data, y_data = display.errorbar_[0].lines[0].get_data() assert_array_equal(x_data, param_range) assert_allclose(y_data, test_scores.mean(axis=1)) score_type = "both" display = ValidationCurveDisplay.from_estimator( estimator, X, y, param_name=param_name, param_range=param_range, score_type=score_type, std_display_style=std_display_style, ) _, legend_label = display.ax_.get_legend_handles_labels() assert legend_label == ["Train", "Test"] if std_display_style is None: assert len(display.lines_) == 2 assert display.errorbar_ is None x_data_train, y_data_train = display.lines_[0].get_data() x_data_test, y_data_test = display.lines_[1].get_data() else: assert display.lines_ is None assert len(display.errorbar_) == 2 x_data_train, y_data_train = display.errorbar_[0].lines[0].get_data() x_data_test, y_data_test = display.errorbar_[1].lines[0].get_data() assert_array_equal(x_data_train, param_range) assert_allclose(y_data_train, train_scores.mean(axis=1)) assert_array_equal(x_data_test, param_range) assert_allclose(y_data_test, test_scores.mean(axis=1))
Check the behaviour of setting the `score_type` parameter.
test_validation_curve_display_score_type
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_plot.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_plot.py
BSD-3-Clause
def test_curve_display_xscale_auto( pyplot, data, CurveDisplay, specific_params, expected_xscale ): """Check the behaviour of the x-axis scaling depending on the data provided.""" X, y = data estimator = DecisionTreeClassifier(random_state=0) display = CurveDisplay.from_estimator(estimator, X, y, **specific_params) assert display.ax_.get_xscale() == expected_xscale
Check the behaviour of the x-axis scaling depending on the data provided.
test_curve_display_xscale_auto
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_plot.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_plot.py
BSD-3-Clause
def test_curve_display_std_display_style(pyplot, data, CurveDisplay, specific_params): """Check the behaviour of the parameter `std_display_style`.""" X, y = data estimator = DecisionTreeClassifier(random_state=0) import matplotlib as mpl std_display_style = None display = CurveDisplay.from_estimator( estimator, X, y, **specific_params, std_display_style=std_display_style, ) assert len(display.lines_) == 2 for line in display.lines_: assert isinstance(line, mpl.lines.Line2D) assert display.errorbar_ is None assert display.fill_between_ is None _, legend_label = display.ax_.get_legend_handles_labels() assert len(legend_label) == 2 std_display_style = "fill_between" display = CurveDisplay.from_estimator( estimator, X, y, **specific_params, std_display_style=std_display_style, ) assert len(display.lines_) == 2 for line in display.lines_: assert isinstance(line, mpl.lines.Line2D) assert display.errorbar_ is None assert len(display.fill_between_) == 2 for fill_between in display.fill_between_: assert isinstance(fill_between, mpl.collections.PolyCollection) _, legend_label = display.ax_.get_legend_handles_labels() assert len(legend_label) == 2 std_display_style = "errorbar" display = CurveDisplay.from_estimator( estimator, X, y, **specific_params, std_display_style=std_display_style, ) assert display.lines_ is None assert len(display.errorbar_) == 2 for errorbar in display.errorbar_: assert isinstance(errorbar, mpl.container.ErrorbarContainer) assert display.fill_between_ is None _, legend_label = display.ax_.get_legend_handles_labels() assert len(legend_label) == 2
Check the behaviour of the parameter `std_display_style`.
test_curve_display_std_display_style
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_plot.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_plot.py
BSD-3-Clause
def test_curve_display_plot_kwargs(pyplot, data, CurveDisplay, specific_params): """Check the behaviour of the different plotting keyword arguments: `line_kw`, `fill_between_kw`, and `errorbar_kw`.""" X, y = data estimator = DecisionTreeClassifier(random_state=0) std_display_style = "fill_between" line_kw = {"color": "red"} fill_between_kw = {"color": "red", "alpha": 1.0} display = CurveDisplay.from_estimator( estimator, X, y, **specific_params, std_display_style=std_display_style, line_kw=line_kw, fill_between_kw=fill_between_kw, ) assert display.lines_[0].get_color() == "red" assert_allclose( display.fill_between_[0].get_facecolor(), [[1.0, 0.0, 0.0, 1.0]], # trust me, it's red ) std_display_style = "errorbar" errorbar_kw = {"color": "red"} display = CurveDisplay.from_estimator( estimator, X, y, **specific_params, std_display_style=std_display_style, errorbar_kw=errorbar_kw, ) assert display.errorbar_[0].lines[0].get_color() == "red"
Check the behaviour of the different plotting keyword arguments: `line_kw`, `fill_between_kw`, and `errorbar_kw`.
test_curve_display_plot_kwargs
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_plot.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_plot.py
BSD-3-Clause
def test_validation_curve_xscale_from_param_range_provided_as_a_list( pyplot, data, param_range, xscale ): """Check the induced xscale from the provided param_range values.""" X, y = data estimator = DecisionTreeClassifier(random_state=0) param_name = "max_depth" display = ValidationCurveDisplay.from_estimator( estimator, X, y, param_name=param_name, param_range=param_range, ) assert display.ax_.get_xscale() == xscale
Check the induced xscale from the provided param_range values.
test_validation_curve_xscale_from_param_range_provided_as_a_list
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_plot.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_plot.py
BSD-3-Clause
def test_subclassing_displays(pyplot, data, Display, params): """Check that named constructors return the correct type when subclassed. Non-regression test for: https://github.com/scikit-learn/scikit-learn/pull/27675 """ X, y = data estimator = DecisionTreeClassifier(random_state=0) class SubclassOfDisplay(Display): pass display = SubclassOfDisplay.from_estimator(estimator, X, y, **params) assert isinstance(display, SubclassOfDisplay)
Check that named constructors return the correct type when subclassed. Non-regression test for: https://github.com/scikit-learn/scikit-learn/pull/27675
test_subclassing_displays
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_plot.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_plot.py
BSD-3-Clause
def test_refit_callable(): """ Test refit=callable, which adds flexibility in identifying the "best" estimator. """ def refit_callable(cv_results): """ A dummy function tests `refit=callable` interface. Return the index of a model that has the least `mean_test_score`. """ # Fit a dummy clf with `refit=True` to get a list of keys in # clf.cv_results_. X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = GridSearchCV( LinearSVC(random_state=42), {"C": [0.01, 0.1, 1]}, scoring="precision", refit=True, ) clf.fit(X, y) # Ensure that `best_index_ != 0` for this dummy clf assert clf.best_index_ != 0 # Assert every key matches those in `cv_results` for key in clf.cv_results_.keys(): assert key in cv_results return cv_results["mean_test_score"].argmin() X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = GridSearchCV( LinearSVC(random_state=42), {"C": [0.01, 0.1, 1]}, scoring="precision", refit=refit_callable, ) clf.fit(X, y) assert clf.best_index_ == 0 # Ensure `best_score_` is disabled when using `refit=callable` assert not hasattr(clf, "best_score_")
Test refit=callable, which adds flexibility in identifying the "best" estimator.
test_refit_callable
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_search.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_search.py
BSD-3-Clause
def refit_callable(cv_results): """ A dummy function tests `refit=callable` interface. Return the index of a model that has the least `mean_test_score`. """ # Fit a dummy clf with `refit=True` to get a list of keys in # clf.cv_results_. X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = GridSearchCV( LinearSVC(random_state=42), {"C": [0.01, 0.1, 1]}, scoring="precision", refit=True, ) clf.fit(X, y) # Ensure that `best_index_ != 0` for this dummy clf assert clf.best_index_ != 0 # Assert every key matches those in `cv_results` for key in clf.cv_results_.keys(): assert key in cv_results return cv_results["mean_test_score"].argmin()
A dummy function tests `refit=callable` interface. Return the index of a model that has the least `mean_test_score`.
refit_callable
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_search.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_search.py
BSD-3-Clause
def test_refit_callable_invalid_type(): """ Test implementation catches the errors when 'best_index_' returns an invalid result. """ def refit_callable_invalid_type(cv_results): """ A dummy function tests when returned 'best_index_' is not integer. """ return None X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = GridSearchCV( LinearSVC(random_state=42), {"C": [0.1, 1]}, scoring="precision", refit=refit_callable_invalid_type, ) with pytest.raises(TypeError, match="best_index_ returned is not an integer"): clf.fit(X, y)
Test implementation catches the errors when 'best_index_' returns an invalid result.
test_refit_callable_invalid_type
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_search.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_search.py
BSD-3-Clause
def test_refit_callable_out_bound(out_bound_value, search_cv): """ Test implementation catches the errors when 'best_index_' returns an out of bound result. """ def refit_callable_out_bound(cv_results): """ A dummy function tests when returned 'best_index_' is out of bounds. """ return out_bound_value X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = search_cv( LinearSVC(random_state=42), {"C": [0.1, 1]}, scoring="precision", refit=refit_callable_out_bound, ) with pytest.raises(IndexError, match="best_index_ index out of range"): clf.fit(X, y)
Test implementation catches the errors when 'best_index_' returns an out of bound result.
test_refit_callable_out_bound
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_search.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_search.py
BSD-3-Clause
def test_refit_callable_multi_metric(): """ Test refit=callable in multiple metric evaluation setting """ def refit_callable(cv_results): """ A dummy function tests `refit=callable` interface. Return the index of a model that has the least `mean_test_prec`. """ assert "mean_test_prec" in cv_results return cv_results["mean_test_prec"].argmin() X, y = make_classification(n_samples=100, n_features=4, random_state=42) scoring = {"Accuracy": make_scorer(accuracy_score), "prec": "precision"} clf = GridSearchCV( LinearSVC(random_state=42), {"C": [0.01, 0.1, 1]}, scoring=scoring, refit=refit_callable, ) clf.fit(X, y) assert clf.best_index_ == 0 # Ensure `best_score_` is disabled when using `refit=callable` assert not hasattr(clf, "best_score_")
Test refit=callable in multiple metric evaluation setting
test_refit_callable_multi_metric
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_search.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_search.py
BSD-3-Clause
def compare_cv_results_multimetric_with_single(search_multi, search_acc, search_rec): """Compare multi-metric cv_results with the ensemble of multiple single metric cv_results from single metric grid/random search""" assert search_multi.multimetric_ assert_array_equal(sorted(search_multi.scorer_), ("accuracy", "recall")) cv_results_multi = search_multi.cv_results_ cv_results_acc_rec = { re.sub("_score$", "_accuracy", k): v for k, v in search_acc.cv_results_.items() } cv_results_acc_rec.update( {re.sub("_score$", "_recall", k): v for k, v in search_rec.cv_results_.items()} ) # Check if score and timing are reasonable, also checks if the keys # are present assert all( ( np.all(cv_results_multi[k] <= 1) for k in ( "mean_score_time", "std_score_time", "mean_fit_time", "std_fit_time", ) ) ) # Compare the keys, other than time keys, among multi-metric and # single metric grid search results. np.testing.assert_equal performs a # deep nested comparison of the two cv_results dicts np.testing.assert_equal( {k: v for k, v in cv_results_multi.items() if not k.endswith("_time")}, {k: v for k, v in cv_results_acc_rec.items() if not k.endswith("_time")}, )
Compare multi-metric cv_results with the ensemble of multiple single metric cv_results from single metric grid/random search
compare_cv_results_multimetric_with_single
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_search.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_search.py
BSD-3-Clause
def compare_refit_methods_when_refit_with_acc(search_multi, search_acc, refit): """Compare refit multi-metric search methods with single metric methods""" assert search_acc.refit == refit if refit: assert search_multi.refit == "accuracy" else: assert not search_multi.refit return # search cannot predict/score without refit X, y = make_blobs(n_samples=100, n_features=4, random_state=42) for method in ("predict", "predict_proba", "predict_log_proba"): assert_almost_equal( getattr(search_multi, method)(X), getattr(search_acc, method)(X) ) assert_almost_equal(search_multi.score(X, y), search_acc.score(X, y)) for key in ("best_index_", "best_score_", "best_params_"): assert getattr(search_multi, key) == getattr(search_acc, key)
Compare refit multi-metric search methods with single metric methods
compare_refit_methods_when_refit_with_acc
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_search.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_search.py
BSD-3-Clause
def test_unsupported_sample_weight_scorer(): """Checks that fitting with sample_weight raises a warning if the scorer does not support sample_weight""" def fake_score_func(y_true, y_pred): "Fake scoring function that does not support sample_weight" return 0.5 fake_scorer = make_scorer(fake_score_func) X, y = make_classification(n_samples=10, n_features=4, random_state=42) sw = np.ones_like(y) search_cv = GridSearchCV(estimator=LogisticRegression(), param_grid={"C": [1, 10]}) # function search_cv.set_params(scoring=fake_score_func) with pytest.warns(UserWarning, match="does not support sample_weight"): search_cv.fit(X, y, sample_weight=sw) # scorer search_cv.set_params(scoring=fake_scorer) with pytest.warns(UserWarning, match="does not support sample_weight"): search_cv.fit(X, y, sample_weight=sw) # multi-metric evaluation search_cv.set_params( scoring=dict(fake=fake_scorer, accuracy="accuracy"), refit=False ) # only fake scorer does not support sample_weight with pytest.warns( UserWarning, match=r"The scoring fake=.* does not support sample_weight" ): search_cv.fit(X, y, sample_weight=sw)
Checks that fitting with sample_weight raises a warning if the scorer does not support sample_weight
test_unsupported_sample_weight_scorer
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_search.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_search.py
BSD-3-Clause
def test_search_cv_pairwise_property_delegated_to_base_estimator(pairwise): """ Test implementation of BaseSearchCV has the pairwise tag which matches the pairwise tag of its estimator. This test make sure pairwise tag is delegated to the base estimator. Non-regression test for issue #13920. """ class TestEstimator(BaseEstimator): def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.pairwise = pairwise return tags est = TestEstimator() attr_message = "BaseSearchCV pairwise tag must match estimator" cv = GridSearchCV(est, {"n_neighbors": [10]}) assert pairwise == cv.__sklearn_tags__().input_tags.pairwise, attr_message
Test implementation of BaseSearchCV has the pairwise tag which matches the pairwise tag of its estimator. This test make sure pairwise tag is delegated to the base estimator. Non-regression test for issue #13920.
test_search_cv_pairwise_property_delegated_to_base_estimator
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_search.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_search.py
BSD-3-Clause
def test_search_cv__pairwise_property_delegated_to_base_estimator(): """ Test implementation of BaseSearchCV has the pairwise property which matches the pairwise tag of its estimator. This test make sure pairwise tag is delegated to the base estimator. Non-regression test for issue #13920. """ class EstimatorPairwise(BaseEstimator): def __init__(self, pairwise=True): self.pairwise = pairwise def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.pairwise = self.pairwise return tags est = EstimatorPairwise() attr_message = "BaseSearchCV _pairwise property must match estimator" for _pairwise_setting in [True, False]: est.set_params(pairwise=_pairwise_setting) cv = GridSearchCV(est, {"n_neighbors": [10]}) assert _pairwise_setting == cv.__sklearn_tags__().input_tags.pairwise, ( attr_message )
Test implementation of BaseSearchCV has the pairwise property which matches the pairwise tag of its estimator. This test make sure pairwise tag is delegated to the base estimator. Non-regression test for issue #13920.
test_search_cv__pairwise_property_delegated_to_base_estimator
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_search.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_search.py
BSD-3-Clause
def test_search_cv_pairwise_property_equivalence_of_precomputed(): """ Test implementation of BaseSearchCV has the pairwise tag which matches the pairwise tag of its estimator. This test ensures the equivalence of 'precomputed'. Non-regression test for issue #13920. """ n_samples = 50 n_splits = 2 X, y = make_classification(n_samples=n_samples, random_state=0) grid_params = {"n_neighbors": [10]} # defaults to euclidean metric (minkowski p = 2) clf = KNeighborsClassifier() cv = GridSearchCV(clf, grid_params, cv=n_splits) cv.fit(X, y) preds_original = cv.predict(X) # precompute euclidean metric to validate pairwise is working X_precomputed = euclidean_distances(X) clf = KNeighborsClassifier(metric="precomputed") cv = GridSearchCV(clf, grid_params, cv=n_splits) cv.fit(X_precomputed, y) preds_precomputed = cv.predict(X_precomputed) attr_message = "GridSearchCV not identical with precomputed metric" assert (preds_original == preds_precomputed).all(), attr_message
Test implementation of BaseSearchCV has the pairwise tag which matches the pairwise tag of its estimator. This test ensures the equivalence of 'precomputed'. Non-regression test for issue #13920.
test_search_cv_pairwise_property_equivalence_of_precomputed
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_search.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_search.py
BSD-3-Clause
def test_search_cv_verbose_3(capsys, return_train_score): """Check that search cv with verbose>2 shows the score for single metrics. non-regression test for #19658.""" X, y = make_classification(n_samples=100, n_classes=2, flip_y=0.2, random_state=0) clf = LinearSVC(random_state=0) grid = {"C": [0.1]} GridSearchCV( clf, grid, scoring="accuracy", verbose=3, cv=3, return_train_score=return_train_score, ).fit(X, y) captured = capsys.readouterr().out if return_train_score: match = re.findall(r"score=\(train=[\d\.]+, test=[\d.]+\)", captured) else: match = re.findall(r"score=[\d\.]+", captured) assert len(match) == 3
Check that search cv with verbose>2 shows the score for single metrics. non-regression test for #19658.
test_search_cv_verbose_3
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_search.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_search.py
BSD-3-Clause
def test_search_html_repr(): """Test different HTML representations for GridSearchCV.""" X, y = make_classification(random_state=42) pipeline = Pipeline([("scale", StandardScaler()), ("clf", DummyClassifier())]) param_grid = {"clf": [DummyClassifier(), LogisticRegression()]} # Unfitted shows the original pipeline search_cv = GridSearchCV(pipeline, param_grid=param_grid, refit=False) with config_context(display="diagram"): repr_html = search_cv._repr_html_() assert "<div>DummyClassifier</div>" in repr_html # Fitted with `refit=False` shows the original pipeline search_cv.fit(X, y) with config_context(display="diagram"): repr_html = search_cv._repr_html_() assert "<div>DummyClassifier</div>" in repr_html # Fitted with `refit=True` shows the best estimator search_cv = GridSearchCV(pipeline, param_grid=param_grid, refit=True) search_cv.fit(X, y) with config_context(display="diagram"): repr_html = search_cv._repr_html_() assert "<div>DummyClassifier</div>" not in repr_html assert "<div>LogisticRegression</div>" in repr_html
Test different HTML representations for GridSearchCV.
test_search_html_repr
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_search.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_search.py
BSD-3-Clause
def test_multi_metric_search_forwards_metadata(SearchCV, param_search): """Test that *SearchCV forwards metadata correctly when passed multiple metrics.""" X, y = make_classification(random_state=42) n_samples = _num_samples(X) rng = np.random.RandomState(0) score_weights = rng.rand(n_samples) score_metadata = rng.rand(n_samples) est = LinearSVC() param_grid_search = {param_search: {"C": [1]}} scorer_registry = _Registry() scorer = ConsumingScorer(registry=scorer_registry).set_score_request( sample_weight="score_weights", metadata="score_metadata" ) scoring = dict(my_scorer=scorer, accuracy="accuracy") SearchCV(est, refit="accuracy", cv=2, scoring=scoring, **param_grid_search).fit( X, y, score_weights=score_weights, score_metadata=score_metadata ) assert len(scorer_registry) for _scorer in scorer_registry: check_recorded_metadata( obj=_scorer, method="score", parent="_score", split_params=("sample_weight", "metadata"), sample_weight=score_weights, metadata=score_metadata, )
Test that *SearchCV forwards metadata correctly when passed multiple metrics.
test_multi_metric_search_forwards_metadata
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_search.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_search.py
BSD-3-Clause
def test_score_rejects_params_with_no_routing_enabled(SearchCV, param_search): """*SearchCV should reject **params when metadata routing is not enabled since this is added only when routing is enabled.""" X, y = make_classification(random_state=42) est = LinearSVC() param_grid_search = {param_search: {"C": [1]}} gs = SearchCV(est, cv=2, **param_grid_search).fit(X, y) with pytest.raises(ValueError, match="is only supported if"): gs.score(X, y, metadata=1)
*SearchCV should reject **params when metadata routing is not enabled since this is added only when routing is enabled.
test_score_rejects_params_with_no_routing_enabled
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_search.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_search.py
BSD-3-Clause
def test_cv_results_dtype_issue_29074(): """Non-regression test for https://github.com/scikit-learn/scikit-learn/issues/29074""" class MetaEstimator(BaseEstimator, ClassifierMixin): def __init__( self, base_clf, parameter1=None, parameter2=None, parameter3=None, parameter4=None, ): self.base_clf = base_clf self.parameter1 = parameter1 self.parameter2 = parameter2 self.parameter3 = parameter3 self.parameter4 = parameter4 def fit(self, X, y=None): self.base_clf.fit(X, y) return self def score(self, X, y): return self.base_clf.score(X, y) # Values of param_grid are such that np.result_type gives slightly # different errors, in particular ValueError and TypeError param_grid = { "parameter1": [None, {"option": "A"}, {"option": "B"}], "parameter2": [None, [1, 2]], "parameter3": [{"a": 1}], "parameter4": ["str1", "str2"], } grid_search = GridSearchCV( estimator=MetaEstimator(LogisticRegression()), param_grid=param_grid, cv=3, ) X, y = make_blobs(random_state=0) grid_search.fit(X, y) for param in param_grid: assert grid_search.cv_results_[f"param_{param}"].dtype == object
Non-regression test for https://github.com/scikit-learn/scikit-learn/issues/29074
test_cv_results_dtype_issue_29074
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_search.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_search.py
BSD-3-Clause
def test_search_with_estimators_issue_29157(): """Check cv_results_ for estimators with a `dtype` parameter, e.g. OneHotEncoder.""" pd = pytest.importorskip("pandas") df = pd.DataFrame( { "numeric_1": [1, 2, 3, 4, 5], "object_1": ["a", "a", "a", "a", "a"], "target": [1.0, 4.1, 2.0, 3.0, 1.0], } ) X = df.drop("target", axis=1) y = df["target"] enc = ColumnTransformer( [("enc", OneHotEncoder(sparse_output=False), ["object_1"])], remainder="passthrough", ) pipe = Pipeline( [ ("enc", enc), ("regressor", LinearRegression()), ] ) grid_params = { "enc__enc": [ OneHotEncoder(sparse_output=False), OrdinalEncoder(), ] } grid_search = GridSearchCV(pipe, grid_params, cv=2) grid_search.fit(X, y) assert grid_search.cv_results_["param_enc__enc"].dtype == object
Check cv_results_ for estimators with a `dtype` parameter, e.g. OneHotEncoder.
test_search_with_estimators_issue_29157
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_search.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_search.py
BSD-3-Clause
def test_cv_results_multi_size_array(): """Check that GridSearchCV works with params that are arrays of different sizes. Non-regression test for #29277. """ n_features = 10 X, y = make_classification(n_features=10) spline_reg_pipe = make_pipeline( SplineTransformer(extrapolation="periodic"), LogisticRegression(), ) n_knots_list = [n_features * i for i in [10, 11, 12]] knots_list = [ np.linspace(0, np.pi * 2, n_knots).reshape((-1, n_features)) for n_knots in n_knots_list ] spline_reg_pipe_cv = GridSearchCV( estimator=spline_reg_pipe, param_grid={ "splinetransformer__knots": knots_list, }, ) spline_reg_pipe_cv.fit(X, y) assert ( spline_reg_pipe_cv.cv_results_["param_splinetransformer__knots"].dtype == object )
Check that GridSearchCV works with params that are arrays of different sizes. Non-regression test for #29277.
test_cv_results_multi_size_array
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_search.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_search.py
BSD-3-Clause
def test_train_test_split_32bit_overflow(): """Check for integer overflow on 32-bit platforms. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/20774 """ # A number 'n' big enough for expression 'n * n * train_size' to cause # an overflow for signed 32-bit integer big_number = 100000 # Definition of 'y' is a part of reproduction - population for at least # one class should be in the same order of magnitude as size of X X = np.arange(big_number) y = X > (0.99 * big_number) split = train_test_split(X, y, stratify=y, train_size=0.25) X_train, X_test, y_train, y_test = split assert X_train.size + X_test.size == big_number assert y_train.size + y_test.size == big_number
Check for integer overflow on 32-bit platforms. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/20774
test_train_test_split_32bit_overflow
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_split.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_split.py
BSD-3-Clause
def test_splitter_set_split_request(cv): """Check set_split_request is defined for group splitters and not for others.""" if cv in GROUP_SPLITTERS: assert hasattr(cv, "set_split_request") elif cv in NO_GROUP_SPLITTERS: assert not hasattr(cv, "set_split_request")
Check set_split_request is defined for group splitters and not for others.
test_splitter_set_split_request
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_split.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_split.py
BSD-3-Clause
def test_nan_handling(HalvingSearch, fail_at): """Check the selection of the best scores in presence of failure represented by NaN values.""" n_samples = 1_000 X, y = make_classification(n_samples=n_samples, random_state=0) search = HalvingSearch( SometimesFailClassifier(), {f"fail_{fail_at}": [False, True], "a": range(3)}, resource="n_estimators", max_resources=6, min_resources=1, factor=2, ) search.fit(X, y) # estimators that failed during fit/predict should always rank lower # than ones where the fit/predict succeeded assert not search.best_params_[f"fail_{fail_at}"] scores = search.cv_results_["mean_test_score"] ranks = search.cv_results_["rank_test_score"] # some scores should be NaN assert np.isnan(scores).any() unique_nan_ranks = np.unique(ranks[np.isnan(scores)]) # all NaN scores should have the same rank assert unique_nan_ranks.shape[0] == 1 # NaNs should have the lowest rank assert (unique_nan_ranks[0] >= ranks).all()
Check the selection of the best scores in presence of failure represented by NaN values.
test_nan_handling
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_successive_halving.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_successive_halving.py
BSD-3-Clause
def test_min_resources_null(SearchCV): """Check that we raise an error if the minimum resources is set to 0.""" base_estimator = FastClassifier() param_grid = {"a": [1]} X = np.empty(0).reshape(0, 3) search = SearchCV(base_estimator, param_grid, min_resources="smallest") err_msg = "min_resources_=0: you might have passed an empty dataset X." with pytest.raises(ValueError, match=err_msg): search.fit(X, [])
Check that we raise an error if the minimum resources is set to 0.
test_min_resources_null
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_successive_halving.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_successive_halving.py
BSD-3-Clause
def test_select_best_index(SearchCV): """Check the selection strategy of the halving search.""" results = { # this isn't a 'real world' result dict "iter": np.array([0, 0, 0, 0, 1, 1, 2, 2, 2]), "mean_test_score": np.array([4, 3, 5, 1, 11, 10, 5, 6, 9]), "params": np.array(["a", "b", "c", "d", "e", "f", "g", "h", "i"]), } # we expect the index of 'i' best_index = SearchCV._select_best_index(None, None, results) assert best_index == 8
Check the selection strategy of the halving search.
test_select_best_index
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_successive_halving.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_successive_halving.py
BSD-3-Clause
def test_halving_random_search_list_of_dicts(): """Check the behaviour of the `HalvingRandomSearchCV` with `param_distribution` being a list of dictionary. """ X, y = make_classification(n_samples=150, n_features=4, random_state=42) params = [ {"kernel": ["rbf"], "C": expon(scale=10), "gamma": expon(scale=0.1)}, {"kernel": ["poly"], "degree": [2, 3]}, ] param_keys = ( "param_C", "param_degree", "param_gamma", "param_kernel", ) score_keys = ( "mean_test_score", "mean_train_score", "rank_test_score", "split0_test_score", "split1_test_score", "split2_test_score", "split0_train_score", "split1_train_score", "split2_train_score", "std_test_score", "std_train_score", "mean_fit_time", "std_fit_time", "mean_score_time", "std_score_time", ) extra_keys = ("n_resources", "iter") search = HalvingRandomSearchCV( SVC(), cv=3, param_distributions=params, return_train_score=True, random_state=0 ) search.fit(X, y) n_candidates = sum(search.n_candidates_) cv_results = search.cv_results_ # Check results structure check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates, extra_keys) expected_cv_results_kinds = { "param_C": "f", "param_degree": "i", "param_gamma": "f", "param_kernel": "O", } check_cv_results_array_types( search, param_keys, score_keys, expected_cv_results_kinds ) assert all( ( cv_results["param_C"].mask[i] and cv_results["param_gamma"].mask[i] and not cv_results["param_degree"].mask[i] ) for i in range(n_candidates) if cv_results["param_kernel"][i] == "poly" ) assert all( ( not cv_results["param_C"].mask[i] and not cv_results["param_gamma"].mask[i] and cv_results["param_degree"].mask[i] ) for i in range(n_candidates) if cv_results["param_kernel"][i] == "rbf" )
Check the behaviour of the `HalvingRandomSearchCV` with `param_distribution` being a list of dictionary.
test_halving_random_search_list_of_dicts
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_successive_halving.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_successive_halving.py
BSD-3-Clause
def fit( self, X, Y=None, sample_weight=None, class_prior=None, sparse_sample_weight=None, sparse_param=None, dummy_int=None, dummy_str=None, dummy_obj=None, callback=None, ): """The dummy arguments are to test that this fit function can accept non-array arguments through cross-validation, such as: - int - str (this is actually array-like) - object - function """ self.dummy_int = dummy_int self.dummy_str = dummy_str self.dummy_obj = dummy_obj if callback is not None: callback(self) if self.allow_nd: X = X.reshape(len(X), -1) if X.ndim >= 3 and not self.allow_nd: raise ValueError("X cannot be d") if sample_weight is not None: assert sample_weight.shape[0] == X.shape[0], ( "MockClassifier extra fit_param " "sample_weight.shape[0] is {0}, should be {1}".format( sample_weight.shape[0], X.shape[0] ) ) if class_prior is not None: assert class_prior.shape[0] == len(np.unique(y)), ( "MockClassifier extra fit_param class_prior.shape[0]" " is {0}, should be {1}".format(class_prior.shape[0], len(np.unique(y))) ) if sparse_sample_weight is not None: fmt = ( "MockClassifier extra fit_param sparse_sample_weight" ".shape[0] is {0}, should be {1}" ) assert sparse_sample_weight.shape[0] == X.shape[0], fmt.format( sparse_sample_weight.shape[0], X.shape[0] ) if sparse_param is not None: fmt = ( "MockClassifier extra fit_param sparse_param.shape " "is ({0}, {1}), should be ({2}, {3})" ) assert sparse_param.shape == P.shape, fmt.format( sparse_param.shape[0], sparse_param.shape[1], P.shape[0], P.shape[1], ) self.classes_ = np.unique(y) return self
The dummy arguments are to test that this fit function can accept non-array arguments through cross-validation, such as: - int - str (this is actually array-like) - object - function
fit
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_validation.py
BSD-3-Clause
def check_cross_val_predict_binary(est, X, y, method): """Helper for tests of cross_val_predict with binary classification""" cv = KFold(n_splits=3, shuffle=False) # Generate expected outputs if y.ndim == 1: exp_shape = (len(X),) if method == "decision_function" else (len(X), 2) else: exp_shape = y.shape expected_predictions = np.zeros(exp_shape) for train, test in cv.split(X, y): est = clone(est).fit(X[train], y[train]) expected_predictions[test] = getattr(est, method)(X[test]) # Check actual outputs for several representations of y for tg in [y, y + 1, y - 2, y.astype("str")]: assert_allclose( cross_val_predict(est, X, tg, method=method, cv=cv), expected_predictions )
Helper for tests of cross_val_predict with binary classification
check_cross_val_predict_binary
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_validation.py
BSD-3-Clause
def check_cross_val_predict_multiclass(est, X, y, method): """Helper for tests of cross_val_predict with multiclass classification""" cv = KFold(n_splits=3, shuffle=False) # Generate expected outputs float_min = np.finfo(np.float64).min default_values = { "decision_function": float_min, "predict_log_proba": float_min, "predict_proba": 0, } expected_predictions = np.full( (len(X), len(set(y))), default_values[method], dtype=np.float64 ) _, y_enc = np.unique(y, return_inverse=True) for train, test in cv.split(X, y_enc): est = clone(est).fit(X[train], y_enc[train]) fold_preds = getattr(est, method)(X[test]) i_cols_fit = np.unique(y_enc[train]) expected_predictions[np.ix_(test, i_cols_fit)] = fold_preds # Check actual outputs for several representations of y for tg in [y, y + 1, y - 2, y.astype("str")]: assert_allclose( cross_val_predict(est, X, tg, method=method, cv=cv), expected_predictions )
Helper for tests of cross_val_predict with multiclass classification
check_cross_val_predict_multiclass
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_validation.py
BSD-3-Clause
def check_cross_val_predict_multilabel(est, X, y, method): """Check the output of cross_val_predict for 2D targets using Estimators which provide a predictions as a list with one element per class. """ cv = KFold(n_splits=3, shuffle=False) # Create empty arrays of the correct size to hold outputs float_min = np.finfo(np.float64).min default_values = { "decision_function": float_min, "predict_log_proba": float_min, "predict_proba": 0, } n_targets = y.shape[1] expected_preds = [] for i_col in range(n_targets): n_classes_in_label = len(set(y[:, i_col])) if n_classes_in_label == 2 and method == "decision_function": exp_shape = (len(X),) else: exp_shape = (len(X), n_classes_in_label) expected_preds.append( np.full(exp_shape, default_values[method], dtype=np.float64) ) # Generate expected outputs y_enc_cols = [ np.unique(y[:, i], return_inverse=True)[1][:, np.newaxis] for i in range(y.shape[1]) ] y_enc = np.concatenate(y_enc_cols, axis=1) for train, test in cv.split(X, y_enc): est = clone(est).fit(X[train], y_enc[train]) fold_preds = getattr(est, method)(X[test]) for i_col in range(n_targets): fold_cols = np.unique(y_enc[train][:, i_col]) if expected_preds[i_col].ndim == 1: # Decision function with <=2 classes expected_preds[i_col][test] = fold_preds[i_col] else: idx = np.ix_(test, fold_cols) expected_preds[i_col][idx] = fold_preds[i_col] # Check actual outputs for several representations of y for tg in [y, y + 1, y - 2, y.astype("str")]: cv_predict_output = cross_val_predict(est, X, tg, method=method, cv=cv) assert len(cv_predict_output) == len(expected_preds) for i in range(len(cv_predict_output)): assert_allclose(cv_predict_output[i], expected_preds[i])
Check the output of cross_val_predict for 2D targets using Estimators which provide a predictions as a list with one element per class.
check_cross_val_predict_multilabel
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_validation.py
BSD-3-Clause
def test_learning_curve_partial_fit_regressors(): """Check that regressors with partial_fit is supported. Non-regression test for #22981. """ X, y = make_regression(random_state=42) # Does not error learning_curve(MLPRegressor(), X, y, exploit_incremental_learning=True, cv=2)
Check that regressors with partial_fit is supported. Non-regression test for #22981.
test_learning_curve_partial_fit_regressors
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_validation.py
BSD-3-Clause
def test_learning_curve_some_failing_fits_warning(global_random_seed): """Checks for fit failures in `learning_curve` and raises the required warning""" X, y = make_classification( n_samples=30, n_classes=3, n_informative=6, shuffle=False, random_state=global_random_seed, ) # sorting the target to trigger SVC error on the 2 first splits because a single # class is present sorted_idx = np.argsort(y) X, y = X[sorted_idx], y[sorted_idx] svc = SVC() warning_message = "10 fits failed out of a total of 25" with pytest.warns(FitFailedWarning, match=warning_message): _, train_score, test_score, *_ = learning_curve( svc, X, y, cv=5, error_score=np.nan ) # the first 2 splits should lead to warnings and thus np.nan scores for idx in range(2): assert np.isnan(train_score[idx]).all() assert np.isnan(test_score[idx]).all() for idx in range(2, train_score.shape[0]): assert not np.isnan(train_score[idx]).any() assert not np.isnan(test_score[idx]).any()
Checks for fit failures in `learning_curve` and raises the required warning
test_learning_curve_some_failing_fits_warning
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_validation.py
BSD-3-Clause
def test_fit_param_deprecation(func, extra_args): """Check that we warn about deprecating `fit_params`.""" with pytest.warns(FutureWarning, match="`fit_params` is deprecated"): func( estimator=ConsumingClassifier(), X=X, y=y, cv=2, fit_params={}, **extra_args ) with pytest.raises( ValueError, match="`params` and `fit_params` cannot both be provided" ): func( estimator=ConsumingClassifier(), X=X, y=y, fit_params={}, params={}, **extra_args, )
Check that we warn about deprecating `fit_params`.
test_fit_param_deprecation
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_validation.py
BSD-3-Clause
def test_groups_with_routing_validation(func, extra_args): """Check that we raise an error if `groups` are passed to the cv method instead of `params` when metadata routing is enabled. """ with pytest.raises(ValueError, match="`groups` can only be passed if"): func( estimator=ConsumingClassifier(), X=X, y=y, groups=[], **extra_args, )
Check that we raise an error if `groups` are passed to the cv method instead of `params` when metadata routing is enabled.
test_groups_with_routing_validation
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_validation.py
BSD-3-Clause
def test_cross_validate_params_none(func, extra_args): """Test that no errors are raised when passing `params=None`, which is the default value. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/30447 """ X, y = make_classification(n_samples=100, n_classes=2, random_state=0) func(estimator=ConsumingClassifier(), X=X, y=y, **extra_args)
Test that no errors are raised when passing `params=None`, which is the default value. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/30447
test_cross_validate_params_none
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_validation.py
BSD-3-Clause
def test_passed_unrequested_metadata(func, extra_args): """Check that we raise an error when passing metadata that is not requested.""" err_msg = re.escape( "[metadata] are passed but are not explicitly set as requested or not " "requested for ConsumingClassifier.fit, which is used within" ) with pytest.raises(UnsetMetadataPassedError, match=err_msg): func( estimator=ConsumingClassifier(), X=X, y=y2, params=dict(metadata=[]), **extra_args, ) # cross_val_predict doesn't use scoring if func == cross_val_predict: return err_msg = re.escape( "[metadata] are passed but are not explicitly set as requested or not " "requested for ConsumingClassifier.score, which is used within" ) with pytest.raises(UnsetMetadataPassedError, match=err_msg): func( estimator=ConsumingClassifier() .set_fit_request(metadata=True) .set_partial_fit_request(metadata=True), X=X, y=y2, params=dict(metadata=[]), **extra_args, )
Check that we raise an error when passing metadata that is not requested.
test_passed_unrequested_metadata
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_validation.py
BSD-3-Clause
def test_validation_functions_routing(func, extra_args): """Check that the respective cv method is properly dispatching the metadata to the consumer.""" scorer_registry = _Registry() scorer = ConsumingScorer(registry=scorer_registry).set_score_request( sample_weight="score_weights", metadata="score_metadata" ) splitter_registry = _Registry() splitter = ConsumingSplitter(registry=splitter_registry).set_split_request( groups="split_groups", metadata="split_metadata" ) estimator_registry = _Registry() estimator = ConsumingClassifier(registry=estimator_registry).set_fit_request( sample_weight="fit_sample_weight", metadata="fit_metadata" ) n_samples = _num_samples(X) rng = np.random.RandomState(0) score_weights = rng.rand(n_samples) score_metadata = rng.rand(n_samples) split_groups = rng.randint(0, 3, n_samples) split_metadata = rng.rand(n_samples) fit_sample_weight = rng.rand(n_samples) fit_metadata = rng.rand(n_samples) scoring_args = { cross_validate: dict(scoring=dict(my_scorer=scorer, accuracy="accuracy")), cross_val_score: dict(scoring=scorer), learning_curve: dict(scoring=scorer), validation_curve: dict(scoring=scorer), permutation_test_score: dict(scoring=scorer), cross_val_predict: dict(), } params = dict( split_groups=split_groups, split_metadata=split_metadata, fit_sample_weight=fit_sample_weight, fit_metadata=fit_metadata, ) if func is not cross_val_predict: params.update( score_weights=score_weights, score_metadata=score_metadata, ) func( estimator, X=X, y=y, cv=splitter, **scoring_args[func], **extra_args, params=params, ) if func is not cross_val_predict: # cross_val_predict doesn't need a scorer assert len(scorer_registry) for _scorer in scorer_registry: check_recorded_metadata( obj=_scorer, method="score", parent=func.__name__, split_params=("sample_weight", "metadata"), sample_weight=score_weights, metadata=score_metadata, ) assert len(splitter_registry) for _splitter in splitter_registry: check_recorded_metadata( obj=_splitter, method="split", parent=func.__name__, groups=split_groups, metadata=split_metadata, ) assert len(estimator_registry) for _estimator in estimator_registry: check_recorded_metadata( obj=_estimator, method="fit", parent=func.__name__, split_params=("sample_weight", "metadata"), sample_weight=fit_sample_weight, metadata=fit_metadata, )
Check that the respective cv method is properly dispatching the metadata to the consumer.
test_validation_functions_routing
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_validation.py
BSD-3-Clause
def test_learning_curve_exploit_incremental_learning_routing(): """Test that learning_curve routes metadata to the estimator correctly while partial_fitting it with `exploit_incremental_learning=True`.""" n_samples = _num_samples(X) rng = np.random.RandomState(0) fit_sample_weight = rng.rand(n_samples) fit_metadata = rng.rand(n_samples) estimator_registry = _Registry() estimator = ConsumingClassifier( registry=estimator_registry ).set_partial_fit_request( sample_weight="fit_sample_weight", metadata="fit_metadata" ) learning_curve( estimator, X=X, y=y, cv=ConsumingSplitter(), exploit_incremental_learning=True, params=dict(fit_sample_weight=fit_sample_weight, fit_metadata=fit_metadata), ) assert len(estimator_registry) for _estimator in estimator_registry: check_recorded_metadata( obj=_estimator, method="partial_fit", parent="learning_curve", split_params=("sample_weight", "metadata"), sample_weight=fit_sample_weight, metadata=fit_metadata, )
Test that learning_curve routes metadata to the estimator correctly while partial_fitting it with `exploit_incremental_learning=True`.
test_learning_curve_exploit_incremental_learning_routing
python
scikit-learn/scikit-learn
sklearn/model_selection/tests/test_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/model_selection/tests/test_validation.py
BSD-3-Clause
def _get_weights(dist, weights): """Get the weights from an array of distances and a parameter ``weights``. Assume weights have already been validated. Parameters ---------- dist : ndarray The input distances. weights : {'uniform', 'distance'}, callable or None The kind of weighting used. Returns ------- weights_arr : array of the same shape as ``dist`` If ``weights == 'uniform'``, then returns None. """ if weights in (None, "uniform"): return None if weights == "distance": # if user attempts to classify a point that was zero distance from one # or more training points, those training points are weighted as 1.0 # and the other points as 0.0 if dist.dtype is np.dtype(object): for point_dist_i, point_dist in enumerate(dist): # check if point_dist is iterable # (ex: RadiusNeighborClassifier.predict may set an element of # dist to 1e-6 to represent an 'outlier') if hasattr(point_dist, "__contains__") and 0.0 in point_dist: dist[point_dist_i] = point_dist == 0.0 else: dist[point_dist_i] = 1.0 / point_dist else: with np.errstate(divide="ignore"): dist = 1.0 / dist inf_mask = np.isinf(dist) inf_row = np.any(inf_mask, axis=1) dist[inf_row] = inf_mask[inf_row] return dist if callable(weights): return weights(dist)
Get the weights from an array of distances and a parameter ``weights``. Assume weights have already been validated. Parameters ---------- dist : ndarray The input distances. weights : {'uniform', 'distance'}, callable or None The kind of weighting used. Returns ------- weights_arr : array of the same shape as ``dist`` If ``weights == 'uniform'``, then returns None.
_get_weights
python
scikit-learn/scikit-learn
sklearn/neighbors/_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_base.py
BSD-3-Clause
def _is_sorted_by_data(graph): """Return whether the graph's non-zero entries are sorted by data. The non-zero entries are stored in graph.data and graph.indices. For each row (or sample), the non-zero entries can be either: - sorted by indices, as after graph.sort_indices(); - sorted by data, as after _check_precomputed(graph); - not sorted. Parameters ---------- graph : sparse matrix of shape (n_samples, n_samples) Neighbors graph as given by `kneighbors_graph` or `radius_neighbors_graph`. Matrix should be of format CSR format. Returns ------- res : bool Whether input graph is sorted by data. """ assert graph.format == "csr" out_of_order = graph.data[:-1] > graph.data[1:] line_change = np.unique(graph.indptr[1:-1] - 1) line_change = line_change[line_change < out_of_order.shape[0]] return out_of_order.sum() == out_of_order[line_change].sum()
Return whether the graph's non-zero entries are sorted by data. The non-zero entries are stored in graph.data and graph.indices. For each row (or sample), the non-zero entries can be either: - sorted by indices, as after graph.sort_indices(); - sorted by data, as after _check_precomputed(graph); - not sorted. Parameters ---------- graph : sparse matrix of shape (n_samples, n_samples) Neighbors graph as given by `kneighbors_graph` or `radius_neighbors_graph`. Matrix should be of format CSR format. Returns ------- res : bool Whether input graph is sorted by data.
_is_sorted_by_data
python
scikit-learn/scikit-learn
sklearn/neighbors/_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_base.py
BSD-3-Clause
def _check_precomputed(X): """Check precomputed distance matrix. If the precomputed distance matrix is sparse, it checks that the non-zero entries are sorted by distances. If not, the matrix is copied and sorted. Parameters ---------- X : {sparse matrix, array-like}, (n_samples, n_samples) Distance matrix to other samples. X may be a sparse matrix, in which case only non-zero elements may be considered neighbors. Returns ------- X : {sparse matrix, array-like}, (n_samples, n_samples) Distance matrix to other samples. X may be a sparse matrix, in which case only non-zero elements may be considered neighbors. """ if not issparse(X): X = check_array(X, ensure_non_negative=True, input_name="X") return X else: graph = X if graph.format not in ("csr", "csc", "coo", "lil"): raise TypeError( "Sparse matrix in {!r} format is not supported due to " "its handling of explicit zeros".format(graph.format) ) copied = graph.format != "csr" graph = check_array( graph, accept_sparse="csr", ensure_non_negative=True, input_name="precomputed distance matrix", ) graph = sort_graph_by_row_values(graph, copy=not copied, warn_when_not_sorted=True) return graph
Check precomputed distance matrix. If the precomputed distance matrix is sparse, it checks that the non-zero entries are sorted by distances. If not, the matrix is copied and sorted. Parameters ---------- X : {sparse matrix, array-like}, (n_samples, n_samples) Distance matrix to other samples. X may be a sparse matrix, in which case only non-zero elements may be considered neighbors. Returns ------- X : {sparse matrix, array-like}, (n_samples, n_samples) Distance matrix to other samples. X may be a sparse matrix, in which case only non-zero elements may be considered neighbors.
_check_precomputed
python
scikit-learn/scikit-learn
sklearn/neighbors/_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_base.py
BSD-3-Clause
def sort_graph_by_row_values(graph, copy=False, warn_when_not_sorted=True): """Sort a sparse graph such that each row is stored with increasing values. .. versionadded:: 1.2 Parameters ---------- graph : sparse matrix of shape (n_samples, n_samples) Distance matrix to other samples, where only non-zero elements are considered neighbors. Matrix is converted to CSR format if not already. copy : bool, default=False If True, the graph is copied before sorting. If False, the sorting is performed inplace. If the graph is not of CSR format, `copy` must be True to allow the conversion to CSR format, otherwise an error is raised. warn_when_not_sorted : bool, default=True If True, a :class:`~sklearn.exceptions.EfficiencyWarning` is raised when the input graph is not sorted by row values. Returns ------- graph : sparse matrix of shape (n_samples, n_samples) Distance matrix to other samples, where only non-zero elements are considered neighbors. Matrix is in CSR format. Examples -------- >>> from scipy.sparse import csr_matrix >>> from sklearn.neighbors import sort_graph_by_row_values >>> X = csr_matrix( ... [[0., 3., 1.], ... [3., 0., 2.], ... [1., 2., 0.]]) >>> X.data array([3., 1., 3., 2., 1., 2.]) >>> X_ = sort_graph_by_row_values(X) >>> X_.data array([1., 3., 2., 3., 1., 2.]) """ if graph.format == "csr" and _is_sorted_by_data(graph): return graph if warn_when_not_sorted: warnings.warn( ( "Precomputed sparse input was not sorted by row values. Use the" " function sklearn.neighbors.sort_graph_by_row_values to sort the input" " by row values, with warn_when_not_sorted=False to remove this" " warning." ), EfficiencyWarning, ) if graph.format not in ("csr", "csc", "coo", "lil"): raise TypeError( f"Sparse matrix in {graph.format!r} format is not supported due to " "its handling of explicit zeros" ) elif graph.format != "csr": if not copy: raise ValueError( "The input graph is not in CSR format. Use copy=True to allow " "the conversion to CSR format." ) graph = graph.asformat("csr") elif copy: # csr format with copy=True graph = graph.copy() row_nnz = np.diff(graph.indptr) if row_nnz.max() == row_nnz.min(): # if each sample has the same number of provided neighbors n_samples = graph.shape[0] distances = graph.data.reshape(n_samples, -1) order = np.argsort(distances, kind="mergesort") order += np.arange(n_samples)[:, None] * row_nnz[0] order = order.ravel() graph.data = graph.data[order] graph.indices = graph.indices[order] else: for start, stop in zip(graph.indptr, graph.indptr[1:]): order = np.argsort(graph.data[start:stop], kind="mergesort") graph.data[start:stop] = graph.data[start:stop][order] graph.indices[start:stop] = graph.indices[start:stop][order] return graph
Sort a sparse graph such that each row is stored with increasing values. .. versionadded:: 1.2 Parameters ---------- graph : sparse matrix of shape (n_samples, n_samples) Distance matrix to other samples, where only non-zero elements are considered neighbors. Matrix is converted to CSR format if not already. copy : bool, default=False If True, the graph is copied before sorting. If False, the sorting is performed inplace. If the graph is not of CSR format, `copy` must be True to allow the conversion to CSR format, otherwise an error is raised. warn_when_not_sorted : bool, default=True If True, a :class:`~sklearn.exceptions.EfficiencyWarning` is raised when the input graph is not sorted by row values. Returns ------- graph : sparse matrix of shape (n_samples, n_samples) Distance matrix to other samples, where only non-zero elements are considered neighbors. Matrix is in CSR format. Examples -------- >>> from scipy.sparse import csr_matrix >>> from sklearn.neighbors import sort_graph_by_row_values >>> X = csr_matrix( ... [[0., 3., 1.], ... [3., 0., 2.], ... [1., 2., 0.]]) >>> X.data array([3., 1., 3., 2., 1., 2.]) >>> X_ = sort_graph_by_row_values(X) >>> X_.data array([1., 3., 2., 3., 1., 2.])
sort_graph_by_row_values
python
scikit-learn/scikit-learn
sklearn/neighbors/_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_base.py
BSD-3-Clause
def _kneighbors_from_graph(graph, n_neighbors, return_distance): """Decompose a nearest neighbors sparse graph into distances and indices. Parameters ---------- graph : sparse matrix of shape (n_samples, n_samples) Neighbors graph as given by `kneighbors_graph` or `radius_neighbors_graph`. Matrix should be of format CSR format. n_neighbors : int Number of neighbors required for each sample. return_distance : bool Whether or not to return the distances. Returns ------- neigh_dist : ndarray of shape (n_samples, n_neighbors) Distances to nearest neighbors. Only present if `return_distance=True`. neigh_ind : ndarray of shape (n_samples, n_neighbors) Indices of nearest neighbors. """ n_samples = graph.shape[0] assert graph.format == "csr" # number of neighbors by samples row_nnz = np.diff(graph.indptr) row_nnz_min = row_nnz.min() if n_neighbors is not None and row_nnz_min < n_neighbors: raise ValueError( "%d neighbors per samples are required, but some samples have only" " %d neighbors in precomputed graph matrix. Decrease number of " "neighbors used or recompute the graph with more neighbors." % (n_neighbors, row_nnz_min) ) def extract(a): # if each sample has the same number of provided neighbors if row_nnz.max() == row_nnz_min: return a.reshape(n_samples, -1)[:, :n_neighbors] else: idx = np.tile(np.arange(n_neighbors), (n_samples, 1)) idx += graph.indptr[:-1, None] return a.take(idx, mode="clip").reshape(n_samples, n_neighbors) if return_distance: return extract(graph.data), extract(graph.indices) else: return extract(graph.indices)
Decompose a nearest neighbors sparse graph into distances and indices. Parameters ---------- graph : sparse matrix of shape (n_samples, n_samples) Neighbors graph as given by `kneighbors_graph` or `radius_neighbors_graph`. Matrix should be of format CSR format. n_neighbors : int Number of neighbors required for each sample. return_distance : bool Whether or not to return the distances. Returns ------- neigh_dist : ndarray of shape (n_samples, n_neighbors) Distances to nearest neighbors. Only present if `return_distance=True`. neigh_ind : ndarray of shape (n_samples, n_neighbors) Indices of nearest neighbors.
_kneighbors_from_graph
python
scikit-learn/scikit-learn
sklearn/neighbors/_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_base.py
BSD-3-Clause
def _radius_neighbors_from_graph(graph, radius, return_distance): """Decompose a nearest neighbors sparse graph into distances and indices. Parameters ---------- graph : sparse matrix of shape (n_samples, n_samples) Neighbors graph as given by `kneighbors_graph` or `radius_neighbors_graph`. Matrix should be of format CSR format. radius : float Radius of neighborhoods which should be strictly positive. return_distance : bool Whether or not to return the distances. Returns ------- neigh_dist : ndarray of shape (n_samples,) of arrays Distances to nearest neighbors. Only present if `return_distance=True`. neigh_ind : ndarray of shape (n_samples,) of arrays Indices of nearest neighbors. """ assert graph.format == "csr" no_filter_needed = bool(graph.data.max() <= radius) if no_filter_needed: data, indices, indptr = graph.data, graph.indices, graph.indptr else: mask = graph.data <= radius if return_distance: data = np.compress(mask, graph.data) indices = np.compress(mask, graph.indices) indptr = np.concatenate(([0], np.cumsum(mask)))[graph.indptr] indices = indices.astype(np.intp, copy=no_filter_needed) if return_distance: neigh_dist = _to_object_array(np.split(data, indptr[1:-1])) neigh_ind = _to_object_array(np.split(indices, indptr[1:-1])) if return_distance: return neigh_dist, neigh_ind else: return neigh_ind
Decompose a nearest neighbors sparse graph into distances and indices. Parameters ---------- graph : sparse matrix of shape (n_samples, n_samples) Neighbors graph as given by `kneighbors_graph` or `radius_neighbors_graph`. Matrix should be of format CSR format. radius : float Radius of neighborhoods which should be strictly positive. return_distance : bool Whether or not to return the distances. Returns ------- neigh_dist : ndarray of shape (n_samples,) of arrays Distances to nearest neighbors. Only present if `return_distance=True`. neigh_ind : ndarray of shape (n_samples,) of arrays Indices of nearest neighbors.
_radius_neighbors_from_graph
python
scikit-learn/scikit-learn
sklearn/neighbors/_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_base.py
BSD-3-Clause
def _kneighbors_reduce_func(self, dist, start, n_neighbors, return_distance): """Reduce a chunk of distances to the nearest neighbors. Callback to :func:`sklearn.metrics.pairwise.pairwise_distances_chunked` Parameters ---------- dist : ndarray of shape (n_samples_chunk, n_samples) The distance matrix. start : int The index in X which the first row of dist corresponds to. n_neighbors : int Number of neighbors required for each sample. return_distance : bool Whether or not to return the distances. Returns ------- dist : array of shape (n_samples_chunk, n_neighbors) Returned only if `return_distance=True`. neigh : array of shape (n_samples_chunk, n_neighbors) The neighbors indices. """ sample_range = np.arange(dist.shape[0])[:, None] neigh_ind = np.argpartition(dist, n_neighbors - 1, axis=1) neigh_ind = neigh_ind[:, :n_neighbors] # argpartition doesn't guarantee sorted order, so we sort again neigh_ind = neigh_ind[sample_range, np.argsort(dist[sample_range, neigh_ind])] if return_distance: if self.effective_metric_ == "euclidean": result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind else: result = dist[sample_range, neigh_ind], neigh_ind else: result = neigh_ind return result
Reduce a chunk of distances to the nearest neighbors. Callback to :func:`sklearn.metrics.pairwise.pairwise_distances_chunked` Parameters ---------- dist : ndarray of shape (n_samples_chunk, n_samples) The distance matrix. start : int The index in X which the first row of dist corresponds to. n_neighbors : int Number of neighbors required for each sample. return_distance : bool Whether or not to return the distances. Returns ------- dist : array of shape (n_samples_chunk, n_neighbors) Returned only if `return_distance=True`. neigh : array of shape (n_samples_chunk, n_neighbors) The neighbors indices.
_kneighbors_reduce_func
python
scikit-learn/scikit-learn
sklearn/neighbors/_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_base.py
BSD-3-Clause
def kneighbors(self, X=None, n_neighbors=None, return_distance=True): """Find the K-neighbors of a point. Returns indices of and distances to the neighbors of each point. Parameters ---------- X : {array-like, sparse matrix}, shape (n_queries, n_features), \ or (n_queries, n_indexed) if metric == 'precomputed', default=None The query point or points. If not provided, neighbors of each indexed point are returned. In this case, the query point is not considered its own neighbor. n_neighbors : int, default=None Number of neighbors required for each sample. The default is the value passed to the constructor. return_distance : bool, default=True Whether or not to return the distances. Returns ------- neigh_dist : ndarray of shape (n_queries, n_neighbors) Array representing the lengths to points, only present if return_distance=True. neigh_ind : ndarray of shape (n_queries, n_neighbors) Indices of the nearest points in the population matrix. Examples -------- In the following example, we construct a NearestNeighbors class from an array representing our data set and ask who's the closest point to [1,1,1] >>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]] >>> from sklearn.neighbors import NearestNeighbors >>> neigh = NearestNeighbors(n_neighbors=1) >>> neigh.fit(samples) NearestNeighbors(n_neighbors=1) >>> print(neigh.kneighbors([[1., 1., 1.]])) (array([[0.5]]), array([[2]])) As you can see, it returns [[0.5]], and [[2]], which means that the element is at distance 0.5 and is the third element of samples (indexes start at 0). You can also query for multiple points: >>> X = [[0., 1., 0.], [1., 0., 1.]] >>> neigh.kneighbors(X, return_distance=False) array([[1], [2]]...) """ check_is_fitted(self) if n_neighbors is None: n_neighbors = self.n_neighbors elif n_neighbors <= 0: raise ValueError("Expected n_neighbors > 0. Got %d" % n_neighbors) elif not isinstance(n_neighbors, numbers.Integral): raise TypeError( "n_neighbors does not take %s value, enter integer value" % type(n_neighbors) ) ensure_all_finite = "allow-nan" if get_tags(self).input_tags.allow_nan else True query_is_train = X is None if query_is_train: X = self._fit_X # Include an extra neighbor to account for the sample itself being # returned, which is removed later n_neighbors += 1 else: if self.metric == "precomputed": X = _check_precomputed(X) else: X = validate_data( self, X, ensure_all_finite=ensure_all_finite, accept_sparse="csr", reset=False, order="C", ) n_samples_fit = self.n_samples_fit_ if n_neighbors > n_samples_fit: if query_is_train: n_neighbors -= 1 # ok to modify inplace because an error is raised inequality_str = "n_neighbors < n_samples_fit" else: inequality_str = "n_neighbors <= n_samples_fit" raise ValueError( f"Expected {inequality_str}, but " f"n_neighbors = {n_neighbors}, n_samples_fit = {n_samples_fit}, " f"n_samples = {X.shape[0]}" # include n_samples for common tests ) n_jobs = effective_n_jobs(self.n_jobs) chunked_results = None use_pairwise_distances_reductions = ( self._fit_method == "brute" and ArgKmin.is_usable_for( X if X is not None else self._fit_X, self._fit_X, self.effective_metric_ ) ) if use_pairwise_distances_reductions: results = ArgKmin.compute( X=X, Y=self._fit_X, k=n_neighbors, metric=self.effective_metric_, metric_kwargs=self.effective_metric_params_, strategy="auto", return_distance=return_distance, ) elif ( self._fit_method == "brute" and self.metric == "precomputed" and issparse(X) ): results = _kneighbors_from_graph( X, n_neighbors=n_neighbors, return_distance=return_distance ) elif self._fit_method == "brute": # Joblib-based backend, which is used when user-defined callable # are passed for metric. # This won't be used in the future once PairwiseDistancesReductions # support: # - DistanceMetrics which work on supposedly binary data # - CSR-dense and dense-CSR case if 'euclidean' in metric. reduce_func = partial( self._kneighbors_reduce_func, n_neighbors=n_neighbors, return_distance=return_distance, ) # for efficiency, use squared euclidean distances if self.effective_metric_ == "euclidean": kwds = {"squared": True} else: kwds = self.effective_metric_params_ chunked_results = list( pairwise_distances_chunked( X, self._fit_X, reduce_func=reduce_func, metric=self.effective_metric_, n_jobs=n_jobs, **kwds, ) ) elif self._fit_method in ["ball_tree", "kd_tree"]: if issparse(X): raise ValueError( "%s does not work with sparse matrices. Densify the data, " "or set algorithm='brute'" % self._fit_method ) chunked_results = Parallel(n_jobs, prefer="threads")( delayed(self._tree.query)(X[s], n_neighbors, return_distance) for s in gen_even_slices(X.shape[0], n_jobs) ) else: raise ValueError("internal: _fit_method not recognized") if chunked_results is not None: if return_distance: neigh_dist, neigh_ind = zip(*chunked_results) results = np.vstack(neigh_dist), np.vstack(neigh_ind) else: results = np.vstack(chunked_results) if not query_is_train: return results else: # If the query data is the same as the indexed data, we would like # to ignore the first nearest neighbor of every sample, i.e # the sample itself. if return_distance: neigh_dist, neigh_ind = results else: neigh_ind = results n_queries, _ = X.shape sample_range = np.arange(n_queries)[:, None] sample_mask = neigh_ind != sample_range # Corner case: When the number of duplicates are more # than the number of neighbors, the first NN will not # be the sample, but a duplicate. # In that case mask the first duplicate. dup_gr_nbrs = np.all(sample_mask, axis=1) sample_mask[:, 0][dup_gr_nbrs] = False neigh_ind = np.reshape(neigh_ind[sample_mask], (n_queries, n_neighbors - 1)) if return_distance: neigh_dist = np.reshape( neigh_dist[sample_mask], (n_queries, n_neighbors - 1) ) return neigh_dist, neigh_ind return neigh_ind
Find the K-neighbors of a point. Returns indices of and distances to the neighbors of each point. Parameters ---------- X : {array-like, sparse matrix}, shape (n_queries, n_features), or (n_queries, n_indexed) if metric == 'precomputed', default=None The query point or points. If not provided, neighbors of each indexed point are returned. In this case, the query point is not considered its own neighbor. n_neighbors : int, default=None Number of neighbors required for each sample. The default is the value passed to the constructor. return_distance : bool, default=True Whether or not to return the distances. Returns ------- neigh_dist : ndarray of shape (n_queries, n_neighbors) Array representing the lengths to points, only present if return_distance=True. neigh_ind : ndarray of shape (n_queries, n_neighbors) Indices of the nearest points in the population matrix. Examples -------- In the following example, we construct a NearestNeighbors class from an array representing our data set and ask who's the closest point to [1,1,1] >>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]] >>> from sklearn.neighbors import NearestNeighbors >>> neigh = NearestNeighbors(n_neighbors=1) >>> neigh.fit(samples) NearestNeighbors(n_neighbors=1) >>> print(neigh.kneighbors([[1., 1., 1.]])) (array([[0.5]]), array([[2]])) As you can see, it returns [[0.5]], and [[2]], which means that the element is at distance 0.5 and is the third element of samples (indexes start at 0). You can also query for multiple points: >>> X = [[0., 1., 0.], [1., 0., 1.]] >>> neigh.kneighbors(X, return_distance=False) array([[1], [2]]...)
kneighbors
python
scikit-learn/scikit-learn
sklearn/neighbors/_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_base.py
BSD-3-Clause
def kneighbors_graph(self, X=None, n_neighbors=None, mode="connectivity"): """Compute the (weighted) graph of k-Neighbors for points in X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_queries, n_features), \ or (n_queries, n_indexed) if metric == 'precomputed', default=None The query point or points. If not provided, neighbors of each indexed point are returned. In this case, the query point is not considered its own neighbor. For ``metric='precomputed'`` the shape should be (n_queries, n_indexed). Otherwise the shape should be (n_queries, n_features). n_neighbors : int, default=None Number of neighbors for each sample. The default is the value passed to the constructor. mode : {'connectivity', 'distance'}, default='connectivity' Type of returned matrix: 'connectivity' will return the connectivity matrix with ones and zeros, in 'distance' the edges are distances between points, type of distance depends on the selected metric parameter in NearestNeighbors class. Returns ------- A : sparse-matrix of shape (n_queries, n_samples_fit) `n_samples_fit` is the number of samples in the fitted data. `A[i, j]` gives the weight of the edge connecting `i` to `j`. The matrix is of CSR format. See Also -------- NearestNeighbors.radius_neighbors_graph : Compute the (weighted) graph of Neighbors for points in X. Examples -------- >>> X = [[0], [3], [1]] >>> from sklearn.neighbors import NearestNeighbors >>> neigh = NearestNeighbors(n_neighbors=2) >>> neigh.fit(X) NearestNeighbors(n_neighbors=2) >>> A = neigh.kneighbors_graph(X) >>> A.toarray() array([[1., 0., 1.], [0., 1., 1.], [1., 0., 1.]]) """ check_is_fitted(self) if n_neighbors is None: n_neighbors = self.n_neighbors # check the input only in self.kneighbors # construct CSR matrix representation of the k-NN graph if mode == "connectivity": A_ind = self.kneighbors(X, n_neighbors, return_distance=False) n_queries = A_ind.shape[0] A_data = np.ones(n_queries * n_neighbors) elif mode == "distance": A_data, A_ind = self.kneighbors(X, n_neighbors, return_distance=True) A_data = np.ravel(A_data) else: raise ValueError( 'Unsupported mode, must be one of "connectivity", ' f'or "distance" but got "{mode}" instead' ) n_queries = A_ind.shape[0] n_samples_fit = self.n_samples_fit_ n_nonzero = n_queries * n_neighbors A_indptr = np.arange(0, n_nonzero + 1, n_neighbors) kneighbors_graph = csr_matrix( (A_data, A_ind.ravel(), A_indptr), shape=(n_queries, n_samples_fit) ) return kneighbors_graph
Compute the (weighted) graph of k-Neighbors for points in X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_queries, n_features), or (n_queries, n_indexed) if metric == 'precomputed', default=None The query point or points. If not provided, neighbors of each indexed point are returned. In this case, the query point is not considered its own neighbor. For ``metric='precomputed'`` the shape should be (n_queries, n_indexed). Otherwise the shape should be (n_queries, n_features). n_neighbors : int, default=None Number of neighbors for each sample. The default is the value passed to the constructor. mode : {'connectivity', 'distance'}, default='connectivity' Type of returned matrix: 'connectivity' will return the connectivity matrix with ones and zeros, in 'distance' the edges are distances between points, type of distance depends on the selected metric parameter in NearestNeighbors class. Returns ------- A : sparse-matrix of shape (n_queries, n_samples_fit) `n_samples_fit` is the number of samples in the fitted data. `A[i, j]` gives the weight of the edge connecting `i` to `j`. The matrix is of CSR format. See Also -------- NearestNeighbors.radius_neighbors_graph : Compute the (weighted) graph of Neighbors for points in X. Examples -------- >>> X = [[0], [3], [1]] >>> from sklearn.neighbors import NearestNeighbors >>> neigh = NearestNeighbors(n_neighbors=2) >>> neigh.fit(X) NearestNeighbors(n_neighbors=2) >>> A = neigh.kneighbors_graph(X) >>> A.toarray() array([[1., 0., 1.], [0., 1., 1.], [1., 0., 1.]])
kneighbors_graph
python
scikit-learn/scikit-learn
sklearn/neighbors/_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_base.py
BSD-3-Clause
def _radius_neighbors_reduce_func(self, dist, start, radius, return_distance): """Reduce a chunk of distances to the nearest neighbors. Callback to :func:`sklearn.metrics.pairwise.pairwise_distances_chunked` Parameters ---------- dist : ndarray of shape (n_samples_chunk, n_samples) The distance matrix. start : int The index in X which the first row of dist corresponds to. radius : float The radius considered when making the nearest neighbors search. return_distance : bool Whether or not to return the distances. Returns ------- dist : list of ndarray of shape (n_samples_chunk,) Returned only if `return_distance=True`. neigh : list of ndarray of shape (n_samples_chunk,) The neighbors indices. """ neigh_ind = [np.where(d <= radius)[0] for d in dist] if return_distance: if self.effective_metric_ == "euclidean": dist = [np.sqrt(d[neigh_ind[i]]) for i, d in enumerate(dist)] else: dist = [d[neigh_ind[i]] for i, d in enumerate(dist)] results = dist, neigh_ind else: results = neigh_ind return results
Reduce a chunk of distances to the nearest neighbors. Callback to :func:`sklearn.metrics.pairwise.pairwise_distances_chunked` Parameters ---------- dist : ndarray of shape (n_samples_chunk, n_samples) The distance matrix. start : int The index in X which the first row of dist corresponds to. radius : float The radius considered when making the nearest neighbors search. return_distance : bool Whether or not to return the distances. Returns ------- dist : list of ndarray of shape (n_samples_chunk,) Returned only if `return_distance=True`. neigh : list of ndarray of shape (n_samples_chunk,) The neighbors indices.
_radius_neighbors_reduce_func
python
scikit-learn/scikit-learn
sklearn/neighbors/_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_base.py
BSD-3-Clause
def radius_neighbors_graph( self, X=None, radius=None, mode="connectivity", sort_results=False ): """Compute the (weighted) graph of Neighbors for points in X. Neighborhoods are restricted the points at a distance lower than radius. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None The query point or points. If not provided, neighbors of each indexed point are returned. In this case, the query point is not considered its own neighbor. radius : float, default=None Radius of neighborhoods. The default is the value passed to the constructor. mode : {'connectivity', 'distance'}, default='connectivity' Type of returned matrix: 'connectivity' will return the connectivity matrix with ones and zeros, in 'distance' the edges are distances between points, type of distance depends on the selected metric parameter in NearestNeighbors class. sort_results : bool, default=False If True, in each row of the result, the non-zero entries will be sorted by increasing distances. If False, the non-zero entries may not be sorted. Only used with mode='distance'. .. versionadded:: 0.22 Returns ------- A : sparse-matrix of shape (n_queries, n_samples_fit) `n_samples_fit` is the number of samples in the fitted data. `A[i, j]` gives the weight of the edge connecting `i` to `j`. The matrix is of CSR format. See Also -------- kneighbors_graph : Compute the (weighted) graph of k-Neighbors for points in X. Examples -------- >>> X = [[0], [3], [1]] >>> from sklearn.neighbors import NearestNeighbors >>> neigh = NearestNeighbors(radius=1.5) >>> neigh.fit(X) NearestNeighbors(radius=1.5) >>> A = neigh.radius_neighbors_graph(X) >>> A.toarray() array([[1., 0., 1.], [0., 1., 0.], [1., 0., 1.]]) """ check_is_fitted(self) # check the input only in self.radius_neighbors if radius is None: radius = self.radius # construct CSR matrix representation of the NN graph if mode == "connectivity": A_ind = self.radius_neighbors(X, radius, return_distance=False) A_data = None elif mode == "distance": dist, A_ind = self.radius_neighbors( X, radius, return_distance=True, sort_results=sort_results ) A_data = np.concatenate(list(dist)) else: raise ValueError( 'Unsupported mode, must be one of "connectivity", ' f'or "distance" but got "{mode}" instead' ) n_queries = A_ind.shape[0] n_samples_fit = self.n_samples_fit_ n_neighbors = np.array([len(a) for a in A_ind]) A_ind = np.concatenate(list(A_ind)) if A_data is None: A_data = np.ones(len(A_ind)) A_indptr = np.concatenate((np.zeros(1, dtype=int), np.cumsum(n_neighbors))) return csr_matrix((A_data, A_ind, A_indptr), shape=(n_queries, n_samples_fit))
Compute the (weighted) graph of Neighbors for points in X. Neighborhoods are restricted the points at a distance lower than radius. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None The query point or points. If not provided, neighbors of each indexed point are returned. In this case, the query point is not considered its own neighbor. radius : float, default=None Radius of neighborhoods. The default is the value passed to the constructor. mode : {'connectivity', 'distance'}, default='connectivity' Type of returned matrix: 'connectivity' will return the connectivity matrix with ones and zeros, in 'distance' the edges are distances between points, type of distance depends on the selected metric parameter in NearestNeighbors class. sort_results : bool, default=False If True, in each row of the result, the non-zero entries will be sorted by increasing distances. If False, the non-zero entries may not be sorted. Only used with mode='distance'. .. versionadded:: 0.22 Returns ------- A : sparse-matrix of shape (n_queries, n_samples_fit) `n_samples_fit` is the number of samples in the fitted data. `A[i, j]` gives the weight of the edge connecting `i` to `j`. The matrix is of CSR format. See Also -------- kneighbors_graph : Compute the (weighted) graph of k-Neighbors for points in X. Examples -------- >>> X = [[0], [3], [1]] >>> from sklearn.neighbors import NearestNeighbors >>> neigh = NearestNeighbors(radius=1.5) >>> neigh.fit(X) NearestNeighbors(radius=1.5) >>> A = neigh.radius_neighbors_graph(X) >>> A.toarray() array([[1., 0., 1.], [0., 1., 0.], [1., 0., 1.]])
radius_neighbors_graph
python
scikit-learn/scikit-learn
sklearn/neighbors/_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_base.py
BSD-3-Clause
def predict(self, X): """Predict the class labels for the provided data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_queries, n_features), \ or (n_queries, n_indexed) if metric == 'precomputed', or None Test samples. If `None`, predictions for all indexed points are returned; in this case, points are not considered their own neighbors. Returns ------- y : ndarray of shape (n_queries,) or (n_queries, n_outputs) Class labels for each data sample. """ check_is_fitted(self, "_fit_method") if self.weights == "uniform": if self._fit_method == "brute" and ArgKminClassMode.is_usable_for( X, self._fit_X, self.metric ): probabilities = self.predict_proba(X) if self.outputs_2d_: return np.stack( [ self.classes_[idx][np.argmax(probas, axis=1)] for idx, probas in enumerate(probabilities) ], axis=1, ) return self.classes_[np.argmax(probabilities, axis=1)] # In that case, we do not need the distances to perform # the weighting so we do not compute them. neigh_ind = self.kneighbors(X, return_distance=False) neigh_dist = None else: neigh_dist, neigh_ind = self.kneighbors(X) classes_ = self.classes_ _y = self._y if not self.outputs_2d_: _y = self._y.reshape((-1, 1)) classes_ = [self.classes_] n_outputs = len(classes_) n_queries = _num_samples(self._fit_X if X is None else X) weights = _get_weights(neigh_dist, self.weights) if weights is not None and _all_with_any_reduction_axis_1(weights, value=0): raise ValueError( "All neighbors of some sample is getting zero weights. " "Please modify 'weights' to avoid this case if you are " "using a user-defined function." ) y_pred = np.empty((n_queries, n_outputs), dtype=classes_[0].dtype) for k, classes_k in enumerate(classes_): if weights is None: mode, _ = _mode(_y[neigh_ind, k], axis=1) else: mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1) mode = np.asarray(mode.ravel(), dtype=np.intp) y_pred[:, k] = classes_k.take(mode) if not self.outputs_2d_: y_pred = y_pred.ravel() return y_pred
Predict the class labels for the provided data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_queries, n_features), or (n_queries, n_indexed) if metric == 'precomputed', or None Test samples. If `None`, predictions for all indexed points are returned; in this case, points are not considered their own neighbors. Returns ------- y : ndarray of shape (n_queries,) or (n_queries, n_outputs) Class labels for each data sample.
predict
python
scikit-learn/scikit-learn
sklearn/neighbors/_classification.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_classification.py
BSD-3-Clause
def predict_proba(self, X): """Return probability estimates for the test data X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_queries, n_features), \ or (n_queries, n_indexed) if metric == 'precomputed', or None Test samples. If `None`, predictions for all indexed points are returned; in this case, points are not considered their own neighbors. Returns ------- p : ndarray of shape (n_queries, n_classes), or a list of n_outputs \ of such arrays if n_outputs > 1. The class probabilities of the input samples. Classes are ordered by lexicographic order. """ check_is_fitted(self, "_fit_method") if self.weights == "uniform": # TODO: systematize this mapping of metric for # PairwiseDistancesReductions. metric, metric_kwargs = _adjusted_metric( metric=self.metric, metric_kwargs=self.metric_params, p=self.p ) if ( self._fit_method == "brute" and ArgKminClassMode.is_usable_for(X, self._fit_X, metric) # TODO: Implement efficient multi-output solution and not self.outputs_2d_ ): if self.metric == "precomputed": X = _check_precomputed(X) else: X = validate_data( self, X, accept_sparse="csr", reset=False, order="C" ) probabilities = ArgKminClassMode.compute( X, self._fit_X, k=self.n_neighbors, weights=self.weights, Y_labels=self._y, unique_Y_labels=self.classes_, metric=metric, metric_kwargs=metric_kwargs, # `strategy="parallel_on_X"` has in practice be shown # to be more efficient than `strategy="parallel_on_Y`` # on many combination of datasets. # Hence, we choose to enforce it here. # For more information, see: # https://github.com/scikit-learn/scikit-learn/pull/24076#issuecomment-1445258342 # TODO: adapt the heuristic for `strategy="auto"` for # `ArgKminClassMode` and use `strategy="auto"`. strategy="parallel_on_X", ) return probabilities # In that case, we do not need the distances to perform # the weighting so we do not compute them. neigh_ind = self.kneighbors(X, return_distance=False) neigh_dist = None else: neigh_dist, neigh_ind = self.kneighbors(X) classes_ = self.classes_ _y = self._y if not self.outputs_2d_: _y = self._y.reshape((-1, 1)) classes_ = [self.classes_] n_queries = _num_samples(self._fit_X if X is None else X) weights = _get_weights(neigh_dist, self.weights) if weights is None: weights = np.ones_like(neigh_ind) elif _all_with_any_reduction_axis_1(weights, value=0): raise ValueError( "All neighbors of some sample is getting zero weights. " "Please modify 'weights' to avoid this case if you are " "using a user-defined function." ) all_rows = np.arange(n_queries) probabilities = [] for k, classes_k in enumerate(classes_): pred_labels = _y[:, k][neigh_ind] proba_k = np.zeros((n_queries, classes_k.size)) # a simple ':' index doesn't work right for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors) proba_k[all_rows, idx] += weights[:, i] # normalize 'votes' into real [0,1] probabilities normalizer = proba_k.sum(axis=1)[:, np.newaxis] proba_k /= normalizer probabilities.append(proba_k) if not self.outputs_2d_: probabilities = probabilities[0] return probabilities
Return probability estimates for the test data X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_queries, n_features), or (n_queries, n_indexed) if metric == 'precomputed', or None Test samples. If `None`, predictions for all indexed points are returned; in this case, points are not considered their own neighbors. Returns ------- p : ndarray of shape (n_queries, n_classes), or a list of n_outputs of such arrays if n_outputs > 1. The class probabilities of the input samples. Classes are ordered by lexicographic order.
predict_proba
python
scikit-learn/scikit-learn
sklearn/neighbors/_classification.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_classification.py
BSD-3-Clause
def fit(self, X, y): """Fit the radius neighbors classifier from the training dataset. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ (n_samples, n_samples) if metric='precomputed' Training data. y : {array-like, sparse matrix} of shape (n_samples,) or \ (n_samples, n_outputs) Target values. Returns ------- self : RadiusNeighborsClassifier The fitted radius neighbors classifier. """ self._fit(X, y) classes_ = self.classes_ _y = self._y if not self.outputs_2d_: _y = self._y.reshape((-1, 1)) classes_ = [self.classes_] if self.outlier_label is None: outlier_label_ = None elif self.outlier_label == "most_frequent": outlier_label_ = [] # iterate over multi-output, get the most frequent label for each # output. for k, classes_k in enumerate(classes_): label_count = np.bincount(_y[:, k]) outlier_label_.append(classes_k[label_count.argmax()]) else: if _is_arraylike(self.outlier_label) and not isinstance( self.outlier_label, str ): if len(self.outlier_label) != len(classes_): raise ValueError( "The length of outlier_label: {} is " "inconsistent with the output " "length: {}".format(self.outlier_label, len(classes_)) ) outlier_label_ = self.outlier_label else: outlier_label_ = [self.outlier_label] * len(classes_) for classes, label in zip(classes_, outlier_label_): if _is_arraylike(label) and not isinstance(label, str): # ensure the outlier label for each output is a scalar. raise TypeError( "The outlier_label of classes {} is " "supposed to be a scalar, got " "{}.".format(classes, label) ) if np.append(classes, label).dtype != classes.dtype: # ensure the dtype of outlier label is consistent with y. raise TypeError( "The dtype of outlier_label {} is " "inconsistent with classes {} in " "y.".format(label, classes) ) self.outlier_label_ = outlier_label_ return self
Fit the radius neighbors classifier from the training dataset. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) if metric='precomputed' Training data. y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_outputs) Target values. Returns ------- self : RadiusNeighborsClassifier The fitted radius neighbors classifier.
fit
python
scikit-learn/scikit-learn
sklearn/neighbors/_classification.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_classification.py
BSD-3-Clause
def predict(self, X): """Predict the class labels for the provided data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_queries, n_features), \ or (n_queries, n_indexed) if metric == 'precomputed', or None Test samples. If `None`, predictions for all indexed points are returned; in this case, points are not considered their own neighbors. Returns ------- y : ndarray of shape (n_queries,) or (n_queries, n_outputs) Class labels for each data sample. """ probs = self.predict_proba(X) classes_ = self.classes_ if not self.outputs_2d_: probs = [probs] classes_ = [self.classes_] n_outputs = len(classes_) n_queries = probs[0].shape[0] y_pred = np.empty((n_queries, n_outputs), dtype=classes_[0].dtype) for k, prob in enumerate(probs): # iterate over multi-output, assign labels based on probabilities # of each output. max_prob_index = prob.argmax(axis=1) y_pred[:, k] = classes_[k].take(max_prob_index) outlier_zero_probs = (prob == 0).all(axis=1) if outlier_zero_probs.any(): zero_prob_index = np.flatnonzero(outlier_zero_probs) y_pred[zero_prob_index, k] = self.outlier_label_[k] if not self.outputs_2d_: y_pred = y_pred.ravel() return y_pred
Predict the class labels for the provided data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_queries, n_features), or (n_queries, n_indexed) if metric == 'precomputed', or None Test samples. If `None`, predictions for all indexed points are returned; in this case, points are not considered their own neighbors. Returns ------- y : ndarray of shape (n_queries,) or (n_queries, n_outputs) Class labels for each data sample.
predict
python
scikit-learn/scikit-learn
sklearn/neighbors/_classification.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_classification.py
BSD-3-Clause
def predict_proba(self, X): """Return probability estimates for the test data X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_queries, n_features), \ or (n_queries, n_indexed) if metric == 'precomputed', or None Test samples. If `None`, predictions for all indexed points are returned; in this case, points are not considered their own neighbors. Returns ------- p : ndarray of shape (n_queries, n_classes), or a list of \ n_outputs of such arrays if n_outputs > 1. The class probabilities of the input samples. Classes are ordered by lexicographic order. """ check_is_fitted(self, "_fit_method") n_queries = _num_samples(self._fit_X if X is None else X) metric, metric_kwargs = _adjusted_metric( metric=self.metric, metric_kwargs=self.metric_params, p=self.p ) if ( self.weights == "uniform" and self._fit_method == "brute" and not self.outputs_2d_ and RadiusNeighborsClassMode.is_usable_for(X, self._fit_X, metric) ): probabilities = RadiusNeighborsClassMode.compute( X=X, Y=self._fit_X, radius=self.radius, weights=self.weights, Y_labels=self._y, unique_Y_labels=self.classes_, outlier_label=self.outlier_label, metric=metric, metric_kwargs=metric_kwargs, strategy="parallel_on_X", # `strategy="parallel_on_X"` has in practice be shown # to be more efficient than `strategy="parallel_on_Y`` # on many combination of datasets. # Hence, we choose to enforce it here. # For more information, see: # https://github.com/scikit-learn/scikit-learn/pull/26828/files#r1282398471 ) return probabilities neigh_dist, neigh_ind = self.radius_neighbors(X) outlier_mask = np.zeros(n_queries, dtype=bool) outlier_mask[:] = [len(nind) == 0 for nind in neigh_ind] outliers = np.flatnonzero(outlier_mask) inliers = np.flatnonzero(~outlier_mask) classes_ = self.classes_ _y = self._y if not self.outputs_2d_: _y = self._y.reshape((-1, 1)) classes_ = [self.classes_] if self.outlier_label_ is None and outliers.size > 0: raise ValueError( "No neighbors found for test samples %r, " "you can try using larger radius, " "giving a label for outliers, " "or considering removing them from your dataset." % outliers ) weights = _get_weights(neigh_dist, self.weights) if weights is not None: weights = weights[inliers] probabilities = [] # iterate over multi-output, measure probabilities of the k-th output. for k, classes_k in enumerate(classes_): pred_labels = np.zeros(len(neigh_ind), dtype=object) pred_labels[:] = [_y[ind, k] for ind in neigh_ind] proba_k = np.zeros((n_queries, classes_k.size)) proba_inl = np.zeros((len(inliers), classes_k.size)) # samples have different size of neighbors within the same radius if weights is None: for i, idx in enumerate(pred_labels[inliers]): proba_inl[i, :] = np.bincount(idx, minlength=classes_k.size) else: for i, idx in enumerate(pred_labels[inliers]): proba_inl[i, :] = np.bincount( idx, weights[i], minlength=classes_k.size ) proba_k[inliers, :] = proba_inl if outliers.size > 0: _outlier_label = self.outlier_label_[k] label_index = np.flatnonzero(classes_k == _outlier_label) if label_index.size == 1: proba_k[outliers, label_index[0]] = 1.0 else: warnings.warn( "Outlier label {} is not in training " "classes. All class probabilities of " "outliers will be assigned with 0." "".format(self.outlier_label_[k]) ) # normalize 'votes' into real [0,1] probabilities normalizer = proba_k.sum(axis=1)[:, np.newaxis] normalizer[normalizer == 0.0] = 1.0 proba_k /= normalizer probabilities.append(proba_k) if not self.outputs_2d_: probabilities = probabilities[0] return probabilities
Return probability estimates for the test data X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_queries, n_features), or (n_queries, n_indexed) if metric == 'precomputed', or None Test samples. If `None`, predictions for all indexed points are returned; in this case, points are not considered their own neighbors. Returns ------- p : ndarray of shape (n_queries, n_classes), or a list of n_outputs of such arrays if n_outputs > 1. The class probabilities of the input samples. Classes are ordered by lexicographic order.
predict_proba
python
scikit-learn/scikit-learn
sklearn/neighbors/_classification.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_classification.py
BSD-3-Clause
def _check_params(X, metric, p, metric_params): """Check the validity of the input parameters""" params = zip(["metric", "p", "metric_params"], [metric, p, metric_params]) est_params = X.get_params() for param_name, func_param in params: if func_param != est_params[param_name]: raise ValueError( "Got %s for %s, while the estimator has %s for the same parameter." % (func_param, param_name, est_params[param_name]) )
Check the validity of the input parameters
_check_params
python
scikit-learn/scikit-learn
sklearn/neighbors/_graph.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_graph.py
BSD-3-Clause
def _query_include_self(X, include_self, mode): """Return the query based on include_self param""" if include_self == "auto": include_self = mode == "connectivity" # it does not include each sample as its own neighbors if not include_self: X = None return X
Return the query based on include_self param
_query_include_self
python
scikit-learn/scikit-learn
sklearn/neighbors/_graph.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_graph.py
BSD-3-Clause
def kneighbors_graph( X, n_neighbors, *, mode="connectivity", metric="minkowski", p=2, metric_params=None, include_self=False, n_jobs=None, ): """Compute the (weighted) graph of k-Neighbors for points in X. Read more in the :ref:`User Guide <unsupervised_neighbors>`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Sample data. n_neighbors : int Number of neighbors for each sample. mode : {'connectivity', 'distance'}, default='connectivity' Type of returned matrix: 'connectivity' will return the connectivity matrix with ones and zeros, and 'distance' will return the distances between neighbors according to the given metric. metric : str, default='minkowski' Metric to use for distance computation. Default is "minkowski", which results in the standard Euclidean distance when p = 2. See the documentation of `scipy.spatial.distance <https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and the metrics listed in :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric values. p : float, default=2 Power parameter for the Minkowski metric. When p = 1, this is equivalent to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. This parameter is expected to be positive. metric_params : dict, default=None Additional keyword arguments for the metric function. include_self : bool or 'auto', default=False Whether or not to mark each sample as the first nearest neighbor to itself. If 'auto', then True is used for mode='connectivity' and False for mode='distance'. n_jobs : int, default=None The number of parallel jobs to run for neighbors search. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. Returns ------- A : sparse matrix of shape (n_samples, n_samples) Graph where A[i, j] is assigned the weight of edge that connects i to j. The matrix is of CSR format. See Also -------- radius_neighbors_graph: Compute the (weighted) graph of Neighbors for points in X. Examples -------- >>> X = [[0], [3], [1]] >>> from sklearn.neighbors import kneighbors_graph >>> A = kneighbors_graph(X, 2, mode='connectivity', include_self=True) >>> A.toarray() array([[1., 0., 1.], [0., 1., 1.], [1., 0., 1.]]) """ if not isinstance(X, KNeighborsMixin): X = NearestNeighbors( n_neighbors=n_neighbors, metric=metric, p=p, metric_params=metric_params, n_jobs=n_jobs, ).fit(X) else: _check_params(X, metric, p, metric_params) query = _query_include_self(X._fit_X, include_self, mode) return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
Compute the (weighted) graph of k-Neighbors for points in X. Read more in the :ref:`User Guide <unsupervised_neighbors>`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Sample data. n_neighbors : int Number of neighbors for each sample. mode : {'connectivity', 'distance'}, default='connectivity' Type of returned matrix: 'connectivity' will return the connectivity matrix with ones and zeros, and 'distance' will return the distances between neighbors according to the given metric. metric : str, default='minkowski' Metric to use for distance computation. Default is "minkowski", which results in the standard Euclidean distance when p = 2. See the documentation of `scipy.spatial.distance <https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and the metrics listed in :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric values. p : float, default=2 Power parameter for the Minkowski metric. When p = 1, this is equivalent to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. This parameter is expected to be positive. metric_params : dict, default=None Additional keyword arguments for the metric function. include_self : bool or 'auto', default=False Whether or not to mark each sample as the first nearest neighbor to itself. If 'auto', then True is used for mode='connectivity' and False for mode='distance'. n_jobs : int, default=None The number of parallel jobs to run for neighbors search. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. Returns ------- A : sparse matrix of shape (n_samples, n_samples) Graph where A[i, j] is assigned the weight of edge that connects i to j. The matrix is of CSR format. See Also -------- radius_neighbors_graph: Compute the (weighted) graph of Neighbors for points in X. Examples -------- >>> X = [[0], [3], [1]] >>> from sklearn.neighbors import kneighbors_graph >>> A = kneighbors_graph(X, 2, mode='connectivity', include_self=True) >>> A.toarray() array([[1., 0., 1.], [0., 1., 1.], [1., 0., 1.]])
kneighbors_graph
python
scikit-learn/scikit-learn
sklearn/neighbors/_graph.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_graph.py
BSD-3-Clause
def radius_neighbors_graph( X, radius, *, mode="connectivity", metric="minkowski", p=2, metric_params=None, include_self=False, n_jobs=None, ): """Compute the (weighted) graph of Neighbors for points in X. Neighborhoods are restricted the points at a distance lower than radius. Read more in the :ref:`User Guide <unsupervised_neighbors>`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Sample data. radius : float Radius of neighborhoods. mode : {'connectivity', 'distance'}, default='connectivity' Type of returned matrix: 'connectivity' will return the connectivity matrix with ones and zeros, and 'distance' will return the distances between neighbors according to the given metric. metric : str, default='minkowski' Metric to use for distance computation. Default is "minkowski", which results in the standard Euclidean distance when p = 2. See the documentation of `scipy.spatial.distance <https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and the metrics listed in :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric values. p : float, default=2 Power parameter for the Minkowski metric. When p = 1, this is equivalent to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. metric_params : dict, default=None Additional keyword arguments for the metric function. include_self : bool or 'auto', default=False Whether or not to mark each sample as the first nearest neighbor to itself. If 'auto', then True is used for mode='connectivity' and False for mode='distance'. n_jobs : int, default=None The number of parallel jobs to run for neighbors search. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. Returns ------- A : sparse matrix of shape (n_samples, n_samples) Graph where A[i, j] is assigned the weight of edge that connects i to j. The matrix is of CSR format. See Also -------- kneighbors_graph: Compute the weighted graph of k-neighbors for points in X. Examples -------- >>> X = [[0], [3], [1]] >>> from sklearn.neighbors import radius_neighbors_graph >>> A = radius_neighbors_graph(X, 1.5, mode='connectivity', ... include_self=True) >>> A.toarray() array([[1., 0., 1.], [0., 1., 0.], [1., 0., 1.]]) """ if not isinstance(X, RadiusNeighborsMixin): X = NearestNeighbors( radius=radius, metric=metric, p=p, metric_params=metric_params, n_jobs=n_jobs, ).fit(X) else: _check_params(X, metric, p, metric_params) query = _query_include_self(X._fit_X, include_self, mode) return X.radius_neighbors_graph(query, radius, mode)
Compute the (weighted) graph of Neighbors for points in X. Neighborhoods are restricted the points at a distance lower than radius. Read more in the :ref:`User Guide <unsupervised_neighbors>`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Sample data. radius : float Radius of neighborhoods. mode : {'connectivity', 'distance'}, default='connectivity' Type of returned matrix: 'connectivity' will return the connectivity matrix with ones and zeros, and 'distance' will return the distances between neighbors according to the given metric. metric : str, default='minkowski' Metric to use for distance computation. Default is "minkowski", which results in the standard Euclidean distance when p = 2. See the documentation of `scipy.spatial.distance <https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and the metrics listed in :class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric values. p : float, default=2 Power parameter for the Minkowski metric. When p = 1, this is equivalent to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. metric_params : dict, default=None Additional keyword arguments for the metric function. include_self : bool or 'auto', default=False Whether or not to mark each sample as the first nearest neighbor to itself. If 'auto', then True is used for mode='connectivity' and False for mode='distance'. n_jobs : int, default=None The number of parallel jobs to run for neighbors search. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. Returns ------- A : sparse matrix of shape (n_samples, n_samples) Graph where A[i, j] is assigned the weight of edge that connects i to j. The matrix is of CSR format. See Also -------- kneighbors_graph: Compute the weighted graph of k-neighbors for points in X. Examples -------- >>> X = [[0], [3], [1]] >>> from sklearn.neighbors import radius_neighbors_graph >>> A = radius_neighbors_graph(X, 1.5, mode='connectivity', ... include_self=True) >>> A.toarray() array([[1., 0., 1.], [0., 1., 0.], [1., 0., 1.]])
radius_neighbors_graph
python
scikit-learn/scikit-learn
sklearn/neighbors/_graph.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_graph.py
BSD-3-Clause
def fit(self, X, y=None): """Fit the k-nearest neighbors transformer from the training dataset. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ (n_samples, n_samples) if metric='precomputed' Training data. y : Ignored Not used, present for API consistency by convention. Returns ------- self : KNeighborsTransformer The fitted k-nearest neighbors transformer. """ self._fit(X) self._n_features_out = self.n_samples_fit_ return self
Fit the k-nearest neighbors transformer from the training dataset. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) if metric='precomputed' Training data. y : Ignored Not used, present for API consistency by convention. Returns ------- self : KNeighborsTransformer The fitted k-nearest neighbors transformer.
fit
python
scikit-learn/scikit-learn
sklearn/neighbors/_graph.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_graph.py
BSD-3-Clause
def transform(self, X): """Compute the (weighted) graph of Neighbors for points in X. Parameters ---------- X : array-like of shape (n_samples_transform, n_features) Sample data. Returns ------- Xt : sparse matrix of shape (n_samples_transform, n_samples_fit) Xt[i, j] is assigned the weight of edge that connects i to j. Only the neighbors have an explicit value. The diagonal is always explicit. The matrix is of CSR format. """ check_is_fitted(self) add_one = self.mode == "distance" return self.kneighbors_graph( X, mode=self.mode, n_neighbors=self.n_neighbors + add_one )
Compute the (weighted) graph of Neighbors for points in X. Parameters ---------- X : array-like of shape (n_samples_transform, n_features) Sample data. Returns ------- Xt : sparse matrix of shape (n_samples_transform, n_samples_fit) Xt[i, j] is assigned the weight of edge that connects i to j. Only the neighbors have an explicit value. The diagonal is always explicit. The matrix is of CSR format.
transform
python
scikit-learn/scikit-learn
sklearn/neighbors/_graph.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_graph.py
BSD-3-Clause
def fit(self, X, y=None): """Fit the radius neighbors transformer from the training dataset. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ (n_samples, n_samples) if metric='precomputed' Training data. y : Ignored Not used, present for API consistency by convention. Returns ------- self : RadiusNeighborsTransformer The fitted radius neighbors transformer. """ self._fit(X) self._n_features_out = self.n_samples_fit_ return self
Fit the radius neighbors transformer from the training dataset. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) if metric='precomputed' Training data. y : Ignored Not used, present for API consistency by convention. Returns ------- self : RadiusNeighborsTransformer The fitted radius neighbors transformer.
fit
python
scikit-learn/scikit-learn
sklearn/neighbors/_graph.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_graph.py
BSD-3-Clause
def transform(self, X): """Compute the (weighted) graph of Neighbors for points in X. Parameters ---------- X : array-like of shape (n_samples_transform, n_features) Sample data. Returns ------- Xt : sparse matrix of shape (n_samples_transform, n_samples_fit) Xt[i, j] is assigned the weight of edge that connects i to j. Only the neighbors have an explicit value. The diagonal is always explicit. The matrix is of CSR format. """ check_is_fitted(self) return self.radius_neighbors_graph(X, mode=self.mode, sort_results=True)
Compute the (weighted) graph of Neighbors for points in X. Parameters ---------- X : array-like of shape (n_samples_transform, n_features) Sample data. Returns ------- Xt : sparse matrix of shape (n_samples_transform, n_samples_fit) Xt[i, j] is assigned the weight of edge that connects i to j. Only the neighbors have an explicit value. The diagonal is always explicit. The matrix is of CSR format.
transform
python
scikit-learn/scikit-learn
sklearn/neighbors/_graph.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_graph.py
BSD-3-Clause
def fit(self, X, y=None, sample_weight=None): """Fit the Kernel Density model on the data. Parameters ---------- X : array-like of shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. y : None Ignored. This parameter exists only for compatibility with :class:`~sklearn.pipeline.Pipeline`. sample_weight : array-like of shape (n_samples,), default=None List of sample weights attached to the data X. .. versionadded:: 0.20 Returns ------- self : object Returns the instance itself. """ algorithm = self._choose_algorithm(self.algorithm, self.metric) if isinstance(self.bandwidth, str): if self.bandwidth == "scott": self.bandwidth_ = X.shape[0] ** (-1 / (X.shape[1] + 4)) elif self.bandwidth == "silverman": self.bandwidth_ = (X.shape[0] * (X.shape[1] + 2) / 4) ** ( -1 / (X.shape[1] + 4) ) else: self.bandwidth_ = self.bandwidth X = validate_data(self, X, order="C", dtype=np.float64) if sample_weight is not None: sample_weight = _check_sample_weight( sample_weight, X, dtype=np.float64, ensure_non_negative=True ) kwargs = self.metric_params if kwargs is None: kwargs = {} self.tree_ = TREE_DICT[algorithm]( X, metric=self.metric, leaf_size=self.leaf_size, sample_weight=sample_weight, **kwargs, ) return self
Fit the Kernel Density model on the data. Parameters ---------- X : array-like of shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. y : None Ignored. This parameter exists only for compatibility with :class:`~sklearn.pipeline.Pipeline`. sample_weight : array-like of shape (n_samples,), default=None List of sample weights attached to the data X. .. versionadded:: 0.20 Returns ------- self : object Returns the instance itself.
fit
python
scikit-learn/scikit-learn
sklearn/neighbors/_kde.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_kde.py
BSD-3-Clause
def score_samples(self, X): """Compute the log-likelihood of each sample under the model. Parameters ---------- X : array-like of shape (n_samples, n_features) An array of points to query. Last dimension should match dimension of training data (n_features). Returns ------- density : ndarray of shape (n_samples,) Log-likelihood of each sample in `X`. These are normalized to be probability densities, so values will be low for high-dimensional data. """ check_is_fitted(self) # The returned density is normalized to the number of points. # For it to be a probability, we must scale it. For this reason # we'll also scale atol. X = validate_data(self, X, order="C", dtype=np.float64, reset=False) if self.tree_.sample_weight is None: N = self.tree_.data.shape[0] else: N = self.tree_.sum_weight atol_N = self.atol * N log_density = self.tree_.kernel_density( X, h=self.bandwidth_, kernel=self.kernel, atol=atol_N, rtol=self.rtol, breadth_first=self.breadth_first, return_log=True, ) log_density -= np.log(N) return log_density
Compute the log-likelihood of each sample under the model. Parameters ---------- X : array-like of shape (n_samples, n_features) An array of points to query. Last dimension should match dimension of training data (n_features). Returns ------- density : ndarray of shape (n_samples,) Log-likelihood of each sample in `X`. These are normalized to be probability densities, so values will be low for high-dimensional data.
score_samples
python
scikit-learn/scikit-learn
sklearn/neighbors/_kde.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_kde.py
BSD-3-Clause
def sample(self, n_samples=1, random_state=None): """Generate random samples from the model. Currently, this is implemented only for gaussian and tophat kernels. Parameters ---------- n_samples : int, default=1 Number of samples to generate. random_state : int, RandomState instance or None, default=None Determines random number generation used to generate random samples. Pass an int for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- X : array-like of shape (n_samples, n_features) List of samples. """ check_is_fitted(self) # TODO: implement sampling for other valid kernel shapes if self.kernel not in ["gaussian", "tophat"]: raise NotImplementedError() data = np.asarray(self.tree_.data) rng = check_random_state(random_state) u = rng.uniform(0, 1, size=n_samples) if self.tree_.sample_weight is None: i = (u * data.shape[0]).astype(np.int64) else: cumsum_weight = np.cumsum(np.asarray(self.tree_.sample_weight)) sum_weight = cumsum_weight[-1] i = np.searchsorted(cumsum_weight, u * sum_weight) if self.kernel == "gaussian": return np.atleast_2d(rng.normal(data[i], self.bandwidth_)) elif self.kernel == "tophat": # we first draw points from a d-dimensional normal distribution, # then use an incomplete gamma function to map them to a uniform # d-dimensional tophat distribution. dim = data.shape[1] X = rng.normal(size=(n_samples, dim)) s_sq = row_norms(X, squared=True) correction = ( gammainc(0.5 * dim, 0.5 * s_sq) ** (1.0 / dim) * self.bandwidth_ / np.sqrt(s_sq) ) return data[i] + X * correction[:, np.newaxis]
Generate random samples from the model. Currently, this is implemented only for gaussian and tophat kernels. Parameters ---------- n_samples : int, default=1 Number of samples to generate. random_state : int, RandomState instance or None, default=None Determines random number generation used to generate random samples. Pass an int for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- X : array-like of shape (n_samples, n_features) List of samples.
sample
python
scikit-learn/scikit-learn
sklearn/neighbors/_kde.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_kde.py
BSD-3-Clause
def fit_predict(self, X, y=None): """Fit the model to the training set X and return the labels. **Not available for novelty detection (when novelty is set to True).** Label is 1 for an inlier and -1 for an outlier according to the LOF score and the contamination parameter. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None The query sample or samples to compute the Local Outlier Factor w.r.t. the training samples. y : Ignored Not used, present for API consistency by convention. Returns ------- is_inlier : ndarray of shape (n_samples,) Returns -1 for anomalies/outliers and 1 for inliers. """ # As fit_predict would be different from fit.predict, fit_predict is # only available for outlier detection (novelty=False) return self.fit(X)._predict()
Fit the model to the training set X and return the labels. **Not available for novelty detection (when novelty is set to True).** Label is 1 for an inlier and -1 for an outlier according to the LOF score and the contamination parameter. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None The query sample or samples to compute the Local Outlier Factor w.r.t. the training samples. y : Ignored Not used, present for API consistency by convention. Returns ------- is_inlier : ndarray of shape (n_samples,) Returns -1 for anomalies/outliers and 1 for inliers.
fit_predict
python
scikit-learn/scikit-learn
sklearn/neighbors/_lof.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_lof.py
BSD-3-Clause
def fit(self, X, y=None): """Fit the local outlier factor detector from the training dataset. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ (n_samples, n_samples) if metric='precomputed' Training data. y : Ignored Not used, present for API consistency by convention. Returns ------- self : LocalOutlierFactor The fitted local outlier factor detector. """ self._fit(X) n_samples = self.n_samples_fit_ if self.n_neighbors > n_samples: warnings.warn( "n_neighbors (%s) is greater than the " "total number of samples (%s). n_neighbors " "will be set to (n_samples - 1) for estimation." % (self.n_neighbors, n_samples) ) self.n_neighbors_ = max(1, min(self.n_neighbors, n_samples - 1)) self._distances_fit_X_, _neighbors_indices_fit_X_ = self.kneighbors( n_neighbors=self.n_neighbors_ ) if self._fit_X.dtype == np.float32: self._distances_fit_X_ = self._distances_fit_X_.astype( self._fit_X.dtype, copy=False, ) self._lrd = self._local_reachability_density( self._distances_fit_X_, _neighbors_indices_fit_X_ ) # Compute lof score over training samples to define offset_: lrd_ratios_array = ( self._lrd[_neighbors_indices_fit_X_] / self._lrd[:, np.newaxis] ) self.negative_outlier_factor_ = -np.mean(lrd_ratios_array, axis=1) if self.contamination == "auto": # inliers score around -1 (the higher, the less abnormal). self.offset_ = -1.5 else: self.offset_ = np.percentile( self.negative_outlier_factor_, 100.0 * self.contamination ) # Verify if negative_outlier_factor_ values are within acceptable range. # Novelty must also be false to detect outliers if np.min(self.negative_outlier_factor_) < -1e7 and not self.novelty: warnings.warn( "Duplicate values are leading to incorrect results. " "Increase the number of neighbors for more accurate results." ) return self
Fit the local outlier factor detector from the training dataset. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) if metric='precomputed' Training data. y : Ignored Not used, present for API consistency by convention. Returns ------- self : LocalOutlierFactor The fitted local outlier factor detector.
fit
python
scikit-learn/scikit-learn
sklearn/neighbors/_lof.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_lof.py
BSD-3-Clause
def _predict(self, X=None): """Predict the labels (1 inlier, -1 outlier) of X according to LOF. If X is None, returns the same as fit_predict(X_train). Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None The query sample or samples to compute the Local Outlier Factor w.r.t. the training samples. If None, makes prediction on the training data without considering them as their own neighbors. Returns ------- is_inlier : ndarray of shape (n_samples,) Returns -1 for anomalies/outliers and +1 for inliers. """ check_is_fitted(self) if X is not None: shifted_opposite_lof_scores = self.decision_function(X) is_inlier = np.ones(shifted_opposite_lof_scores.shape[0], dtype=int) is_inlier[shifted_opposite_lof_scores < 0] = -1 else: is_inlier = np.ones(self.n_samples_fit_, dtype=int) is_inlier[self.negative_outlier_factor_ < self.offset_] = -1 return is_inlier
Predict the labels (1 inlier, -1 outlier) of X according to LOF. If X is None, returns the same as fit_predict(X_train). Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None The query sample or samples to compute the Local Outlier Factor w.r.t. the training samples. If None, makes prediction on the training data without considering them as their own neighbors. Returns ------- is_inlier : ndarray of shape (n_samples,) Returns -1 for anomalies/outliers and +1 for inliers.
_predict
python
scikit-learn/scikit-learn
sklearn/neighbors/_lof.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_lof.py
BSD-3-Clause
def score_samples(self, X): """Opposite of the Local Outlier Factor of X. It is the opposite as bigger is better, i.e. large values correspond to inliers. **Only available for novelty detection (when novelty is set to True).** The argument X is supposed to contain *new data*: if X contains a point from training, it considers the later in its own neighborhood. Also, the samples in X are not considered in the neighborhood of any point. Because of this, the scores obtained via ``score_samples`` may differ from the standard LOF scores. The standard LOF scores for the training data is available via the ``negative_outlier_factor_`` attribute. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The query sample or samples to compute the Local Outlier Factor w.r.t. the training samples. Returns ------- opposite_lof_scores : ndarray of shape (n_samples,) The opposite of the Local Outlier Factor of each input samples. The lower, the more abnormal. """ check_is_fitted(self) X = check_array(X, accept_sparse="csr") distances_X, neighbors_indices_X = self.kneighbors( X, n_neighbors=self.n_neighbors_ ) if X.dtype == np.float32: distances_X = distances_X.astype(X.dtype, copy=False) X_lrd = self._local_reachability_density( distances_X, neighbors_indices_X, ) lrd_ratios_array = self._lrd[neighbors_indices_X] / X_lrd[:, np.newaxis] # as bigger is better: return -np.mean(lrd_ratios_array, axis=1)
Opposite of the Local Outlier Factor of X. It is the opposite as bigger is better, i.e. large values correspond to inliers. **Only available for novelty detection (when novelty is set to True).** The argument X is supposed to contain *new data*: if X contains a point from training, it considers the later in its own neighborhood. Also, the samples in X are not considered in the neighborhood of any point. Because of this, the scores obtained via ``score_samples`` may differ from the standard LOF scores. The standard LOF scores for the training data is available via the ``negative_outlier_factor_`` attribute. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The query sample or samples to compute the Local Outlier Factor w.r.t. the training samples. Returns ------- opposite_lof_scores : ndarray of shape (n_samples,) The opposite of the Local Outlier Factor of each input samples. The lower, the more abnormal.
score_samples
python
scikit-learn/scikit-learn
sklearn/neighbors/_lof.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_lof.py
BSD-3-Clause
def _local_reachability_density(self, distances_X, neighbors_indices): """The local reachability density (LRD) The LRD of a sample is the inverse of the average reachability distance of its k-nearest neighbors. Parameters ---------- distances_X : ndarray of shape (n_queries, self.n_neighbors) Distances to the neighbors (in the training samples `self._fit_X`) of each query point to compute the LRD. neighbors_indices : ndarray of shape (n_queries, self.n_neighbors) Neighbors indices (of each query point) among training samples self._fit_X. Returns ------- local_reachability_density : ndarray of shape (n_queries,) The local reachability density of each sample. """ dist_k = self._distances_fit_X_[neighbors_indices, self.n_neighbors_ - 1] reach_dist_array = np.maximum(distances_X, dist_k) # 1e-10 to avoid `nan' when nb of duplicates > n_neighbors_: return 1.0 / (np.mean(reach_dist_array, axis=1) + 1e-10)
The local reachability density (LRD) The LRD of a sample is the inverse of the average reachability distance of its k-nearest neighbors. Parameters ---------- distances_X : ndarray of shape (n_queries, self.n_neighbors) Distances to the neighbors (in the training samples `self._fit_X`) of each query point to compute the LRD. neighbors_indices : ndarray of shape (n_queries, self.n_neighbors) Neighbors indices (of each query point) among training samples self._fit_X. Returns ------- local_reachability_density : ndarray of shape (n_queries,) The local reachability density of each sample.
_local_reachability_density
python
scikit-learn/scikit-learn
sklearn/neighbors/_lof.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_lof.py
BSD-3-Clause
def fit(self, X, y): """Fit the model according to the given training data. Parameters ---------- X : array-like of shape (n_samples, n_features) The training samples. y : array-like of shape (n_samples,) The corresponding training labels. Returns ------- self : object Fitted estimator. """ # Validate the inputs X and y, and converts y to numerical classes. X, y = validate_data(self, X, y, ensure_min_samples=2) check_classification_targets(y) y = LabelEncoder().fit_transform(y) # Check the preferred dimensionality of the projected space if self.n_components is not None and self.n_components > X.shape[1]: raise ValueError( "The preferred dimensionality of the " f"projected space `n_components` ({self.n_components}) cannot " "be greater than the given data " f"dimensionality ({X.shape[1]})!" ) # If warm_start is enabled, check that the inputs are consistent if ( self.warm_start and hasattr(self, "components_") and self.components_.shape[1] != X.shape[1] ): raise ValueError( f"The new inputs dimensionality ({X.shape[1]}) does not " "match the input dimensionality of the " f"previously learned transformation ({self.components_.shape[1]})." ) # Check how the linear transformation should be initialized init = self.init if isinstance(init, np.ndarray): init = check_array(init) # Assert that init.shape[1] = X.shape[1] if init.shape[1] != X.shape[1]: raise ValueError( f"The input dimensionality ({init.shape[1]}) of the given " "linear transformation `init` must match the " f"dimensionality of the given inputs `X` ({X.shape[1]})." ) # Assert that init.shape[0] <= init.shape[1] if init.shape[0] > init.shape[1]: raise ValueError( f"The output dimensionality ({init.shape[0]}) of the given " "linear transformation `init` cannot be " f"greater than its input dimensionality ({init.shape[1]})." ) # Assert that self.n_components = init.shape[0] if self.n_components is not None and self.n_components != init.shape[0]: raise ValueError( "The preferred dimensionality of the " f"projected space `n_components` ({self.n_components}) does" " not match the output dimensionality of " "the given linear transformation " f"`init` ({init.shape[0]})!" ) # Initialize the random generator self.random_state_ = check_random_state(self.random_state) # Measure the total training time t_train = time.time() # Compute a mask that stays fixed during optimization: same_class_mask = y[:, np.newaxis] == y[np.newaxis, :] # (n_samples, n_samples) # Initialize the transformation transformation = np.ravel(self._initialize(X, y, init)) # Create a dictionary of parameters to be passed to the optimizer disp = self.verbose - 2 if self.verbose > 1 else -1 optimizer_params = { "method": "L-BFGS-B", "fun": self._loss_grad_lbfgs, "args": (X, same_class_mask, -1.0), "jac": True, "x0": transformation, "tol": self.tol, "options": dict(maxiter=self.max_iter, disp=disp), "callback": self._callback, } # Call the optimizer self.n_iter_ = 0 opt_result = minimize(**optimizer_params) # Reshape the solution found by the optimizer self.components_ = opt_result.x.reshape(-1, X.shape[1]) # Stop timer t_train = time.time() - t_train if self.verbose: cls_name = self.__class__.__name__ # Warn the user if the algorithm did not converge if not opt_result.success: warn( "[{}] NCA did not converge: {}".format( cls_name, opt_result.message ), ConvergenceWarning, ) print("[{}] Training took {:8.2f}s.".format(cls_name, t_train)) return self
Fit the model according to the given training data. Parameters ---------- X : array-like of shape (n_samples, n_features) The training samples. y : array-like of shape (n_samples,) The corresponding training labels. Returns ------- self : object Fitted estimator.
fit
python
scikit-learn/scikit-learn
sklearn/neighbors/_nca.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_nca.py
BSD-3-Clause
def transform(self, X): """Apply the learned transformation to the given data. Parameters ---------- X : array-like of shape (n_samples, n_features) Data samples. Returns ------- X_embedded: ndarray of shape (n_samples, n_components) The data samples transformed. Raises ------ NotFittedError If :meth:`fit` has not been called before. """ check_is_fitted(self) X = validate_data(self, X, reset=False) return np.dot(X, self.components_.T)
Apply the learned transformation to the given data. Parameters ---------- X : array-like of shape (n_samples, n_features) Data samples. Returns ------- X_embedded: ndarray of shape (n_samples, n_components) The data samples transformed. Raises ------ NotFittedError If :meth:`fit` has not been called before.
transform
python
scikit-learn/scikit-learn
sklearn/neighbors/_nca.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_nca.py
BSD-3-Clause
def _initialize(self, X, y, init): """Initialize the transformation. Parameters ---------- X : array-like of shape (n_samples, n_features) The training samples. y : array-like of shape (n_samples,) The training labels. init : str or ndarray of shape (n_features_a, n_features_b) The validated initialization of the linear transformation. Returns ------- transformation : ndarray of shape (n_components, n_features) The initialized linear transformation. """ transformation = init if self.warm_start and hasattr(self, "components_"): transformation = self.components_ elif isinstance(init, np.ndarray): pass else: n_samples, n_features = X.shape n_components = self.n_components or n_features if init == "auto": n_classes = len(np.unique(y)) if n_components <= min(n_features, n_classes - 1): init = "lda" elif n_components < min(n_features, n_samples): init = "pca" else: init = "identity" if init == "identity": transformation = np.eye(n_components, X.shape[1]) elif init == "random": transformation = self.random_state_.standard_normal( size=(n_components, X.shape[1]) ) elif init in {"pca", "lda"}: init_time = time.time() if init == "pca": pca = PCA( n_components=n_components, random_state=self.random_state_ ) if self.verbose: print("Finding principal components... ", end="") sys.stdout.flush() pca.fit(X) transformation = pca.components_ elif init == "lda": from ..discriminant_analysis import LinearDiscriminantAnalysis lda = LinearDiscriminantAnalysis(n_components=n_components) if self.verbose: print("Finding most discriminative components... ", end="") sys.stdout.flush() lda.fit(X, y) transformation = lda.scalings_.T[:n_components] if self.verbose: print("done in {:5.2f}s".format(time.time() - init_time)) return transformation
Initialize the transformation. Parameters ---------- X : array-like of shape (n_samples, n_features) The training samples. y : array-like of shape (n_samples,) The training labels. init : str or ndarray of shape (n_features_a, n_features_b) The validated initialization of the linear transformation. Returns ------- transformation : ndarray of shape (n_components, n_features) The initialized linear transformation.
_initialize
python
scikit-learn/scikit-learn
sklearn/neighbors/_nca.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_nca.py
BSD-3-Clause
def _callback(self, transformation): """Called after each iteration of the optimizer. Parameters ---------- transformation : ndarray of shape (n_components * n_features,) The solution computed by the optimizer in this iteration. """ if self.callback is not None: self.callback(transformation, self.n_iter_) self.n_iter_ += 1
Called after each iteration of the optimizer. Parameters ---------- transformation : ndarray of shape (n_components * n_features,) The solution computed by the optimizer in this iteration.
_callback
python
scikit-learn/scikit-learn
sklearn/neighbors/_nca.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_nca.py
BSD-3-Clause
def _loss_grad_lbfgs(self, transformation, X, same_class_mask, sign=1.0): """Compute the loss and the loss gradient w.r.t. `transformation`. Parameters ---------- transformation : ndarray of shape (n_components * n_features,) The raveled linear transformation on which to compute loss and evaluate gradient. X : ndarray of shape (n_samples, n_features) The training samples. same_class_mask : ndarray of shape (n_samples, n_samples) A mask where `mask[i, j] == 1` if `X[i]` and `X[j]` belong to the same class, and `0` otherwise. Returns ------- loss : float The loss computed for the given transformation. gradient : ndarray of shape (n_components * n_features,) The new (flattened) gradient of the loss. """ if self.n_iter_ == 0: self.n_iter_ += 1 if self.verbose: header_fields = ["Iteration", "Objective Value", "Time(s)"] header_fmt = "{:>10} {:>20} {:>10}" header = header_fmt.format(*header_fields) cls_name = self.__class__.__name__ print("[{}]".format(cls_name)) print( "[{}] {}\n[{}] {}".format( cls_name, header, cls_name, "-" * len(header) ) ) t_funcall = time.time() transformation = transformation.reshape(-1, X.shape[1]) X_embedded = np.dot(X, transformation.T) # (n_samples, n_components) # Compute softmax distances p_ij = pairwise_distances(X_embedded, squared=True) np.fill_diagonal(p_ij, np.inf) p_ij = softmax(-p_ij) # (n_samples, n_samples) # Compute loss masked_p_ij = p_ij * same_class_mask p = np.sum(masked_p_ij, axis=1, keepdims=True) # (n_samples, 1) loss = np.sum(p) # Compute gradient of loss w.r.t. `transform` weighted_p_ij = masked_p_ij - p_ij * p weighted_p_ij_sym = weighted_p_ij + weighted_p_ij.T np.fill_diagonal(weighted_p_ij_sym, -weighted_p_ij.sum(axis=0)) gradient = 2 * X_embedded.T.dot(weighted_p_ij_sym).dot(X) # time complexity of the gradient: O(n_components x n_samples x ( # n_samples + n_features)) if self.verbose: t_funcall = time.time() - t_funcall values_fmt = "[{}] {:>10} {:>20.6e} {:>10.2f}" print( values_fmt.format( self.__class__.__name__, self.n_iter_, loss, t_funcall ) ) sys.stdout.flush() return sign * loss, sign * gradient.ravel()
Compute the loss and the loss gradient w.r.t. `transformation`. Parameters ---------- transformation : ndarray of shape (n_components * n_features,) The raveled linear transformation on which to compute loss and evaluate gradient. X : ndarray of shape (n_samples, n_features) The training samples. same_class_mask : ndarray of shape (n_samples, n_samples) A mask where `mask[i, j] == 1` if `X[i]` and `X[j]` belong to the same class, and `0` otherwise. Returns ------- loss : float The loss computed for the given transformation. gradient : ndarray of shape (n_components * n_features,) The new (flattened) gradient of the loss.
_loss_grad_lbfgs
python
scikit-learn/scikit-learn
sklearn/neighbors/_nca.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_nca.py
BSD-3-Clause
def fit(self, X, y): """ Fit the NearestCentroid model according to the given training data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vector, where `n_samples` is the number of samples and `n_features` is the number of features. Note that centroid shrinking cannot be used with sparse matrices. y : array-like of shape (n_samples,) Target values. Returns ------- self : object Fitted estimator. """ # If X is sparse and the metric is "manhattan", store it in a csc # format is easier to calculate the median. if self.metric == "manhattan": X, y = validate_data(self, X, y, accept_sparse=["csc"]) else: ensure_all_finite = ( "allow-nan" if get_tags(self).input_tags.allow_nan else True ) X, y = validate_data( self, X, y, ensure_all_finite=ensure_all_finite, accept_sparse=["csr", "csc"], ) is_X_sparse = sp.issparse(X) check_classification_targets(y) n_samples, n_features = X.shape le = LabelEncoder() y_ind = le.fit_transform(y) self.classes_ = classes = le.classes_ n_classes = classes.size if n_classes < 2: raise ValueError( "The number of classes has to be greater than one; got %d class" % (n_classes) ) if self.priors == "empirical": # estimate priors from sample _, class_counts = np.unique(y, return_inverse=True) # non-negative ints self.class_prior_ = np.bincount(class_counts) / float(len(y)) elif self.priors == "uniform": self.class_prior_ = np.asarray([1 / n_classes] * n_classes) else: self.class_prior_ = np.asarray(self.priors) if (self.class_prior_ < 0).any(): raise ValueError("priors must be non-negative") if not np.isclose(self.class_prior_.sum(), 1.0): warnings.warn( "The priors do not sum to 1. Normalizing such that it sums to one.", UserWarning, ) self.class_prior_ = self.class_prior_ / self.class_prior_.sum() # Mask mapping each class to its members. self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64) # Number of clusters in each class. nk = np.zeros(n_classes) for cur_class in range(n_classes): center_mask = y_ind == cur_class nk[cur_class] = np.sum(center_mask) if is_X_sparse: center_mask = np.where(center_mask)[0] if self.metric == "manhattan": # NumPy does not calculate median of sparse matrices. if not is_X_sparse: self.centroids_[cur_class] = np.median(X[center_mask], axis=0) else: self.centroids_[cur_class] = csc_median_axis_0(X[center_mask]) else: # metric == "euclidean" self.centroids_[cur_class] = X[center_mask].mean(axis=0) # Compute within-class std_dev with unshrunked centroids variance = np.array(X - self.centroids_[y_ind], copy=False) ** 2 self.within_class_std_dev_ = np.array( np.sqrt(variance.sum(axis=0) / (n_samples - n_classes)), copy=False ) if any(self.within_class_std_dev_ == 0): warnings.warn( "self.within_class_std_dev_ has at least 1 zero standard deviation." "Inputs within the same classes for at least 1 feature are identical." ) err_msg = "All features have zero variance. Division by zero." if is_X_sparse and np.all((X.max(axis=0) - X.min(axis=0)).toarray() == 0): raise ValueError(err_msg) elif not is_X_sparse and np.all(np.ptp(X, axis=0) == 0): raise ValueError(err_msg) dataset_centroid_ = X.mean(axis=0) # m parameter for determining deviation m = np.sqrt((1.0 / nk) - (1.0 / n_samples)) # Calculate deviation using the standard deviation of centroids. # To deter outliers from affecting the results. s = self.within_class_std_dev_ + np.median(self.within_class_std_dev_) mm = m.reshape(len(m), 1) # Reshape to allow broadcasting. ms = mm * s self.deviations_ = np.array( (self.centroids_ - dataset_centroid_) / ms, copy=False ) # Soft thresholding: if the deviation crosses 0 during shrinking, # it becomes zero. if self.shrink_threshold: signs = np.sign(self.deviations_) self.deviations_ = np.abs(self.deviations_) - self.shrink_threshold np.clip(self.deviations_, 0, None, out=self.deviations_) self.deviations_ *= signs # Now adjust the centroids using the deviation msd = ms * self.deviations_ self.centroids_ = np.array(dataset_centroid_ + msd, copy=False) return self
Fit the NearestCentroid model according to the given training data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vector, where `n_samples` is the number of samples and `n_features` is the number of features. Note that centroid shrinking cannot be used with sparse matrices. y : array-like of shape (n_samples,) Target values. Returns ------- self : object Fitted estimator.
fit
python
scikit-learn/scikit-learn
sklearn/neighbors/_nearest_centroid.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_nearest_centroid.py
BSD-3-Clause
def predict(self, X): """Perform classification on an array of test vectors `X`. The predicted class `C` for each sample in `X` is returned. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Input data. Returns ------- y_pred : ndarray of shape (n_samples,) The predicted classes. """ check_is_fitted(self) if np.isclose(self.class_prior_, 1 / len(self.classes_)).all(): # `validate_data` is called here since we are not calling `super()` ensure_all_finite = ( "allow-nan" if get_tags(self).input_tags.allow_nan else True ) X = validate_data( self, X, ensure_all_finite=ensure_all_finite, accept_sparse="csr", reset=False, ) return self.classes_[ pairwise_distances_argmin(X, self.centroids_, metric=self.metric) ] else: return super().predict(X)
Perform classification on an array of test vectors `X`. The predicted class `C` for each sample in `X` is returned. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Input data. Returns ------- y_pred : ndarray of shape (n_samples,) The predicted classes.
predict
python
scikit-learn/scikit-learn
sklearn/neighbors/_nearest_centroid.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_nearest_centroid.py
BSD-3-Clause
def predict(self, X): """Predict the target for the provided data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_queries, n_features), \ or (n_queries, n_indexed) if metric == 'precomputed', or None Test samples. If `None`, predictions for all indexed points are returned; in this case, points are not considered their own neighbors. Returns ------- y : ndarray of shape (n_queries,) or (n_queries, n_outputs), dtype=int Target values. """ if self.weights == "uniform": # In that case, we do not need the distances to perform # the weighting so we do not compute them. neigh_ind = self.kneighbors(X, return_distance=False) neigh_dist = None else: neigh_dist, neigh_ind = self.kneighbors(X) weights = _get_weights(neigh_dist, self.weights) _y = self._y if _y.ndim == 1: _y = _y.reshape((-1, 1)) if weights is None: y_pred = np.mean(_y[neigh_ind], axis=1) else: y_pred = np.empty((neigh_dist.shape[0], _y.shape[1]), dtype=np.float64) denom = np.sum(weights, axis=1) for j in range(_y.shape[1]): num = np.sum(_y[neigh_ind, j] * weights, axis=1) y_pred[:, j] = num / denom if self._y.ndim == 1: y_pred = y_pred.ravel() return y_pred
Predict the target for the provided data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_queries, n_features), or (n_queries, n_indexed) if metric == 'precomputed', or None Test samples. If `None`, predictions for all indexed points are returned; in this case, points are not considered their own neighbors. Returns ------- y : ndarray of shape (n_queries,) or (n_queries, n_outputs), dtype=int Target values.
predict
python
scikit-learn/scikit-learn
sklearn/neighbors/_regression.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_regression.py
BSD-3-Clause
def predict(self, X): """Predict the target for the provided data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_queries, n_features), \ or (n_queries, n_indexed) if metric == 'precomputed', or None Test samples. If `None`, predictions for all indexed points are returned; in this case, points are not considered their own neighbors. Returns ------- y : ndarray of shape (n_queries,) or (n_queries, n_outputs), \ dtype=double Target values. """ neigh_dist, neigh_ind = self.radius_neighbors(X) weights = _get_weights(neigh_dist, self.weights) _y = self._y if _y.ndim == 1: _y = _y.reshape((-1, 1)) empty_obs = np.full_like(_y[0], np.nan) if weights is None: y_pred = np.array( [ np.mean(_y[ind, :], axis=0) if len(ind) else empty_obs for (i, ind) in enumerate(neigh_ind) ] ) else: y_pred = np.array( [ ( np.average(_y[ind, :], axis=0, weights=weights[i]) if len(ind) else empty_obs ) for (i, ind) in enumerate(neigh_ind) ] ) if np.any(np.isnan(y_pred)): empty_warning_msg = ( "One or more samples have no neighbors " "within specified radius; predicting NaN." ) warnings.warn(empty_warning_msg) if self._y.ndim == 1: y_pred = y_pred.ravel() return y_pred
Predict the target for the provided data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_queries, n_features), or (n_queries, n_indexed) if metric == 'precomputed', or None Test samples. If `None`, predictions for all indexed points are returned; in this case, points are not considered their own neighbors. Returns ------- y : ndarray of shape (n_queries,) or (n_queries, n_outputs), dtype=double Target values.
predict
python
scikit-learn/scikit-learn
sklearn/neighbors/_regression.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/_regression.py
BSD-3-Clause
def test_array_object_type(BallTreeImplementation): """Check that we do not accept object dtype array.""" X = np.array([(1, 2, 3), (2, 5), (5, 5, 1, 2)], dtype=object) with pytest.raises(ValueError, match="setting an array element with a sequence"): BallTreeImplementation(X)
Check that we do not accept object dtype array.
test_array_object_type
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_ball_tree.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_ball_tree.py
BSD-3-Clause
def _has_explicit_diagonal(X): """Return True if the diagonal is explicitly stored""" X = X.tocoo() explicit = X.row[X.row == X.col] return len(explicit) == X.shape[0]
Return True if the diagonal is explicitly stored
_has_explicit_diagonal
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_graph.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_graph.py
BSD-3-Clause
def test_graph_feature_names_out(Klass): """Check `get_feature_names_out` for transformers defined in `_graph.py`.""" n_samples_fit = 20 n_features = 10 rng = np.random.RandomState(42) X = rng.randn(n_samples_fit, n_features) est = Klass().fit(X) names_out = est.get_feature_names_out() class_name_lower = Klass.__name__.lower() expected_names_out = np.array( [f"{class_name_lower}{i}" for i in range(est.n_samples_fit_)], dtype=object, ) assert_array_equal(names_out, expected_names_out)
Check `get_feature_names_out` for transformers defined in `_graph.py`.
test_graph_feature_names_out
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_graph.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_graph.py
BSD-3-Clause
def test_array_object_type(BinarySearchTree): """Check that we do not accept object dtype array.""" X = np.array([(1, 2, 3), (2, 5), (5, 5, 1, 2)], dtype=object) with pytest.raises(ValueError, match="setting an array element with a sequence"): BinarySearchTree(X)
Check that we do not accept object dtype array.
test_array_object_type
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_kd_tree.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_kd_tree.py
BSD-3-Clause
def test_kdtree_picklable_with_joblib(BinarySearchTree): """Make sure that KDTree queries work when joblib memmaps. Non-regression test for #21685 and #21228.""" rng = np.random.RandomState(0) X = rng.random_sample((10, 3)) tree = BinarySearchTree(X, leaf_size=2) # Call Parallel with max_nbytes=1 to trigger readonly memory mapping that # use to raise "ValueError: buffer source array is read-only" in a previous # version of the Cython code. Parallel(n_jobs=2, max_nbytes=1)(delayed(tree.query)(data) for data in 2 * [X])
Make sure that KDTree queries work when joblib memmaps. Non-regression test for #21685 and #21228.
test_kdtree_picklable_with_joblib
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_kd_tree.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_kd_tree.py
BSD-3-Clause