code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def test_lof_error_n_neighbors_too_large(): """Check that we raise a proper error message when n_neighbors == n_samples. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/17207 """ X = np.ones((7, 7)) msg = ( "Expected n_neighbors < n_samples_fit, but n_neighbors = 1, " "n_samples_fit = 1, n_samples = 1" ) with pytest.raises(ValueError, match=msg): lof = neighbors.LocalOutlierFactor(n_neighbors=1).fit(X[:1]) lof = neighbors.LocalOutlierFactor(n_neighbors=2).fit(X[:2]) assert lof.n_samples_fit_ == 2 msg = ( "Expected n_neighbors < n_samples_fit, but n_neighbors = 2, " "n_samples_fit = 2, n_samples = 2" ) with pytest.raises(ValueError, match=msg): lof.kneighbors(None, n_neighbors=2) distances, indices = lof.kneighbors(None, n_neighbors=1) assert distances.shape == (2, 1) assert indices.shape == (2, 1) msg = ( "Expected n_neighbors <= n_samples_fit, but n_neighbors = 3, " "n_samples_fit = 2, n_samples = 7" ) with pytest.raises(ValueError, match=msg): lof.kneighbors(X, n_neighbors=3) ( distances, indices, ) = lof.kneighbors(X, n_neighbors=2) assert distances.shape == (7, 2) assert indices.shape == (7, 2)
Check that we raise a proper error message when n_neighbors == n_samples. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/17207
test_lof_error_n_neighbors_too_large
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_lof.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_lof.py
BSD-3-Clause
def test_lof_input_dtype_preservation(global_dtype, algorithm, contamination, novelty): """Check that the fitted attributes are stored using the data type of X.""" X = iris.data.astype(global_dtype, copy=False) iso = neighbors.LocalOutlierFactor( n_neighbors=5, algorithm=algorithm, contamination=contamination, novelty=novelty ) iso.fit(X) assert iso.negative_outlier_factor_.dtype == global_dtype for method in ("score_samples", "decision_function"): if hasattr(iso, method): y_pred = getattr(iso, method)(X) assert y_pred.dtype == global_dtype
Check that the fitted attributes are stored using the data type of X.
test_lof_input_dtype_preservation
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_lof.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_lof.py
BSD-3-Clause
def test_lof_dtype_equivalence(algorithm, novelty, contamination): """Check the equivalence of the results with 32 and 64 bits input.""" inliers = iris.data[:50] # setosa iris are really distinct from others outliers = iris.data[-5:] # virginica will be considered as outliers # lower the precision of the input data to check that we have an equivalence when # making the computation in 32 and 64 bits. X = np.concatenate([inliers, outliers], axis=0).astype(np.float32) lof_32 = neighbors.LocalOutlierFactor( algorithm=algorithm, novelty=novelty, contamination=contamination ) X_32 = X.astype(np.float32, copy=True) lof_32.fit(X_32) lof_64 = neighbors.LocalOutlierFactor( algorithm=algorithm, novelty=novelty, contamination=contamination ) X_64 = X.astype(np.float64, copy=True) lof_64.fit(X_64) assert_allclose(lof_32.negative_outlier_factor_, lof_64.negative_outlier_factor_) for method in ("score_samples", "decision_function", "predict", "fit_predict"): if hasattr(lof_32, method): y_pred_32 = getattr(lof_32, method)(X_32) y_pred_64 = getattr(lof_64, method)(X_64) assert_allclose(y_pred_32, y_pred_64, atol=0.0002)
Check the equivalence of the results with 32 and 64 bits input.
test_lof_dtype_equivalence
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_lof.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_lof.py
BSD-3-Clause
def test_lof_duplicate_samples(): """ Check that LocalOutlierFactor raises a warning when duplicate values in the training data cause inaccurate results. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/27839 """ rng = np.random.default_rng(0) x = rng.permutation( np.hstack( [ [0.1] * 1000, # constant values np.linspace(0.1, 0.3, num=3000), rng.random(500) * 100, # the clear outliers ] ) ) X = x.reshape(-1, 1) error_msg = ( "Duplicate values are leading to incorrect results. " "Increase the number of neighbors for more accurate results." ) lof = neighbors.LocalOutlierFactor(n_neighbors=5, contamination=0.1) # Catch the warning with pytest.warns(UserWarning, match=re.escape(error_msg)): lof.fit_predict(X)
Check that LocalOutlierFactor raises a warning when duplicate values in the training data cause inaccurate results. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/27839
test_lof_duplicate_samples
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_lof.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_lof.py
BSD-3-Clause
def test_simple_example(): """Test on a simple example. Puts four points in the input space where the opposite labels points are next to each other. After transform the samples from the same class should be next to each other. """ X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) nca = NeighborhoodComponentsAnalysis( n_components=2, init="identity", random_state=42 ) nca.fit(X, y) X_t = nca.transform(X) assert_array_equal(pairwise_distances(X_t).argsort()[:, 1], np.array([2, 3, 0, 1]))
Test on a simple example. Puts four points in the input space where the opposite labels points are next to each other. After transform the samples from the same class should be next to each other.
test_simple_example
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_nca.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_nca.py
BSD-3-Clause
def test_toy_example_collapse_points(): """Test on a toy example of three points that should collapse We build a simple example: two points from the same class and a point from a different class in the middle of them. On this simple example, the new (transformed) points should all collapse into one single point. Indeed, the objective is 2/(1 + exp(d/2)), with d the euclidean distance between the two samples from the same class. This is maximized for d=0 (because d>=0), with an objective equal to 1 (loss=-1.). """ rng = np.random.RandomState(42) input_dim = 5 two_points = rng.randn(2, input_dim) X = np.vstack([two_points, two_points.mean(axis=0)[np.newaxis, :]]) y = [0, 0, 1] class LossStorer: def __init__(self, X, y): self.loss = np.inf # initialize the loss to very high # Initialize a fake NCA and variables needed to compute the loss: self.fake_nca = NeighborhoodComponentsAnalysis() self.fake_nca.n_iter_ = np.inf self.X, y = validate_data(self.fake_nca, X, y, ensure_min_samples=2) y = LabelEncoder().fit_transform(y) self.same_class_mask = y[:, np.newaxis] == y[np.newaxis, :] def callback(self, transformation, n_iter): """Stores the last value of the loss function""" self.loss, _ = self.fake_nca._loss_grad_lbfgs( transformation, self.X, self.same_class_mask, -1.0 ) loss_storer = LossStorer(X, y) nca = NeighborhoodComponentsAnalysis(random_state=42, callback=loss_storer.callback) X_t = nca.fit_transform(X, y) print(X_t) # test that points are collapsed into one point assert_array_almost_equal(X_t - X_t[0], 0.0) assert abs(loss_storer.loss + 1) < 1e-10
Test on a toy example of three points that should collapse We build a simple example: two points from the same class and a point from a different class in the middle of them. On this simple example, the new (transformed) points should all collapse into one single point. Indeed, the objective is 2/(1 + exp(d/2)), with d the euclidean distance between the two samples from the same class. This is maximized for d=0 (because d>=0), with an objective equal to 1 (loss=-1.).
test_toy_example_collapse_points
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_nca.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_nca.py
BSD-3-Clause
def callback(self, transformation, n_iter): """Stores the last value of the loss function""" self.loss, _ = self.fake_nca._loss_grad_lbfgs( transformation, self.X, self.same_class_mask, -1.0 )
Stores the last value of the loss function
callback
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_nca.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_nca.py
BSD-3-Clause
def test_finite_differences(global_random_seed): """Test gradient of loss function Assert that the gradient is almost equal to its finite differences approximation. """ # Initialize the transformation `M`, as well as `X` and `y` and `NCA` rng = np.random.RandomState(global_random_seed) X, y = make_classification(random_state=global_random_seed) M = rng.randn(rng.randint(1, X.shape[1] + 1), X.shape[1]) nca = NeighborhoodComponentsAnalysis() nca.n_iter_ = 0 mask = y[:, np.newaxis] == y[np.newaxis, :] def fun(M): return nca._loss_grad_lbfgs(M, X, mask)[0] def grad(M): return nca._loss_grad_lbfgs(M, X, mask)[1] # compare the gradient to a finite difference approximation diff = check_grad(fun, grad, M.ravel()) assert diff == pytest.approx(0.0, abs=1e-4)
Test gradient of loss function Assert that the gradient is almost equal to its finite differences approximation.
test_finite_differences
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_nca.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_nca.py
BSD-3-Clause
def test_expected_transformation_shape(): """Test that the transformation has the expected shape.""" X = iris_data y = iris_target class TransformationStorer: def __init__(self, X, y): # Initialize a fake NCA and variables needed to call the loss # function: self.fake_nca = NeighborhoodComponentsAnalysis() self.fake_nca.n_iter_ = np.inf self.X, y = validate_data(self.fake_nca, X, y, ensure_min_samples=2) y = LabelEncoder().fit_transform(y) self.same_class_mask = y[:, np.newaxis] == y[np.newaxis, :] def callback(self, transformation, n_iter): """Stores the last value of the transformation taken as input by the optimizer""" self.transformation = transformation transformation_storer = TransformationStorer(X, y) cb = transformation_storer.callback nca = NeighborhoodComponentsAnalysis(max_iter=5, callback=cb) nca.fit(X, y) assert transformation_storer.transformation.size == X.shape[1] ** 2
Test that the transformation has the expected shape.
test_expected_transformation_shape
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_nca.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_nca.py
BSD-3-Clause
def test_nca_feature_names_out(n_components): """Check `get_feature_names_out` for `NeighborhoodComponentsAnalysis`. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/28293 """ X = iris_data y = iris_target est = NeighborhoodComponentsAnalysis(n_components=n_components).fit(X, y) names_out = est.get_feature_names_out() class_name_lower = est.__class__.__name__.lower() if n_components is not None: expected_n_features = n_components else: expected_n_features = X.shape[1] expected_names_out = np.array( [f"{class_name_lower}{i}" for i in range(expected_n_features)], dtype=object, ) assert_array_equal(names_out, expected_names_out)
Check `get_feature_names_out` for `NeighborhoodComponentsAnalysis`. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/28293
test_nca_feature_names_out
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_nca.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_nca.py
BSD-3-Clause
def test_negative_priors_error(): """Check that we raise an error when the user-defined priors are negative.""" clf = NearestCentroid(priors=[-2, 4]) with pytest.raises(ValueError, match="priors must be non-negative"): clf.fit(X, y)
Check that we raise an error when the user-defined priors are negative.
test_negative_priors_error
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_nearest_centroid.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_nearest_centroid.py
BSD-3-Clause
def test_warn_non_normalized_priors(): """Check that we raise a warning and normalize the user-defined priors when they don't sum to 1. """ priors = [2, 4] clf = NearestCentroid(priors=priors) with pytest.warns( UserWarning, match="The priors do not sum to 1. Normalizing such that it sums to one.", ): clf.fit(X, y) assert_allclose(clf.class_prior_, np.asarray(priors) / np.asarray(priors).sum())
Check that we raise a warning and normalize the user-defined priors when they don't sum to 1.
test_warn_non_normalized_priors
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_nearest_centroid.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_nearest_centroid.py
BSD-3-Clause
def test_method_not_available_with_manhattan(response_method): """Check that we raise an AttributeError with Manhattan metric when trying to call a non-thresholded response method. """ clf = NearestCentroid(metric="manhattan").fit(X, y) with pytest.raises(AttributeError): getattr(clf, response_method)(T)
Check that we raise an AttributeError with Manhattan metric when trying to call a non-thresholded response method.
test_method_not_available_with_manhattan
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_nearest_centroid.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_nearest_centroid.py
BSD-3-Clause
def test_error_zero_variances(array_constructor): """Check that we raise an error when the variance for all features is zero.""" X = np.ones((len(y), 2)) X[:, 1] *= 2 X = array_constructor(X) clf = NearestCentroid() with pytest.raises(ValueError, match="All features have zero variance"): clf.fit(X, y)
Check that we raise an error when the variance for all features is zero.
test_error_zero_variances
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_nearest_centroid.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_nearest_centroid.py
BSD-3-Clause
def _parse_metric(metric: str, dtype=None): """ Helper function for properly building a type-specialized DistanceMetric instances. Constructs a type-specialized DistanceMetric instance from a string beginning with "DM_" while allowing a pass-through for other metric-specifying strings. This is necessary since we wish to parameterize dtype independent of metric, yet DistanceMetric requires it for construction. """ if metric[:3] == "DM_": return DistanceMetric.get_metric(metric[3:], dtype=dtype) return metric
Helper function for properly building a type-specialized DistanceMetric instances. Constructs a type-specialized DistanceMetric instance from a string beginning with "DM_" while allowing a pass-through for other metric-specifying strings. This is necessary since we wish to parameterize dtype independent of metric, yet DistanceMetric requires it for construction.
_parse_metric
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_neighbors.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_neighbors.py
BSD-3-Clause
def _generate_test_params_for(metric: str, n_features: int): """Return list of DistanceMetric kwargs for tests.""" # Distinguishing on cases not to compute unneeded datastructures. rng = np.random.RandomState(1) if metric == "minkowski": return [ dict(p=1.5), dict(p=2), dict(p=3), dict(p=np.inf), dict(p=3, w=rng.rand(n_features)), ] if metric == "seuclidean": return [dict(V=rng.rand(n_features))] if metric == "mahalanobis": A = rng.rand(n_features, n_features) # Make the matrix symmetric positive definite VI = A + A.T + 3 * np.eye(n_features) return [dict(VI=VI)] # Case of: "euclidean", "manhattan", "chebyshev", "haversine" or any other metric. # In those cases, no kwargs are needed. return [{}]
Return list of DistanceMetric kwargs for tests.
_generate_test_params_for
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_neighbors.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_neighbors.py
BSD-3-Clause
def _weight_func(dist): """Weight function to replace lambda d: d ** -2. The lambda function is not valid because: if d==0 then 0^-2 is not valid.""" # Dist could be multidimensional, flatten it so all values # can be looped with np.errstate(divide="ignore"): retval = 1.0 / dist return retval**2
Weight function to replace lambda d: d ** -2. The lambda function is not valid because: if d==0 then 0^-2 is not valid.
_weight_func
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_neighbors.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_neighbors.py
BSD-3-Clause
def check_precomputed(make_train_test, estimators): """Tests unsupervised NearestNeighbors with a distance matrix.""" # Note: smaller samples may result in spurious test success rng = np.random.RandomState(42) X = rng.random_sample((10, 4)) Y = rng.random_sample((3, 4)) DXX, DYX = make_train_test(X, Y) for method in [ "kneighbors", ]: # TODO: also test radius_neighbors, but requires different assertion # As a feature matrix (n_samples by n_features) nbrs_X = neighbors.NearestNeighbors(n_neighbors=3) nbrs_X.fit(X) dist_X, ind_X = getattr(nbrs_X, method)(Y) # As a dense distance matrix (n_samples by n_samples) nbrs_D = neighbors.NearestNeighbors( n_neighbors=3, algorithm="brute", metric="precomputed" ) nbrs_D.fit(DXX) dist_D, ind_D = getattr(nbrs_D, method)(DYX) assert_allclose(dist_X, dist_D) assert_array_equal(ind_X, ind_D) # Check auto works too nbrs_D = neighbors.NearestNeighbors( n_neighbors=3, algorithm="auto", metric="precomputed" ) nbrs_D.fit(DXX) dist_D, ind_D = getattr(nbrs_D, method)(DYX) assert_allclose(dist_X, dist_D) assert_array_equal(ind_X, ind_D) # Check X=None in prediction dist_X, ind_X = getattr(nbrs_X, method)(None) dist_D, ind_D = getattr(nbrs_D, method)(None) assert_allclose(dist_X, dist_D) assert_array_equal(ind_X, ind_D) # Must raise a ValueError if the matrix is not of correct shape with pytest.raises(ValueError): getattr(nbrs_D, method)(X) target = np.arange(X.shape[0]) for Est in estimators: est = Est(metric="euclidean") est.radius = est.n_neighbors = 1 pred_X = est.fit(X, target).predict(Y) est.metric = "precomputed" pred_D = est.fit(DXX, target).predict(DYX) assert_allclose(pred_X, pred_D)
Tests unsupervised NearestNeighbors with a distance matrix.
check_precomputed
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_neighbors.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_neighbors.py
BSD-3-Clause
def test_radius_neighbors_boundary_handling(): """Test whether points lying on boundary are handled consistently Also ensures that even with only one query point, an object array is returned rather than a 2d array. """ X = np.array([[1.5], [3.0], [3.01]]) radius = 3.0 for algorithm in ALGORITHMS: nbrs = neighbors.NearestNeighbors(radius=radius, algorithm=algorithm).fit(X) results = nbrs.radius_neighbors([[0.0]], return_distance=False) assert results.shape == (1,) assert results.dtype == object assert_array_equal(results[0], [0, 1])
Test whether points lying on boundary are handled consistently Also ensures that even with only one query point, an object array is returned rather than a 2d array.
test_radius_neighbors_boundary_handling
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_neighbors.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_neighbors.py
BSD-3-Clause
def test_neighbors_validate_parameters(Estimator, csr_container): """Additional parameter validation for *Neighbors* estimators not covered by common validation.""" X = rng.random_sample((10, 2)) Xsparse = csr_container(X) X3 = rng.random_sample((10, 3)) y = np.ones(10) nbrs = Estimator(algorithm="ball_tree", metric="haversine") msg = "instance is not fitted yet" with pytest.raises(ValueError, match=msg): nbrs.predict(X) msg = "Metric 'haversine' not valid for sparse input." with pytest.raises(ValueError, match=msg): ignore_warnings(nbrs.fit(Xsparse, y)) nbrs = Estimator(metric="haversine", algorithm="brute") nbrs.fit(X3, y) msg = "Haversine distance only valid in 2 dimensions" with pytest.raises(ValueError, match=msg): nbrs.predict(X3) nbrs = Estimator() msg = re.escape("Found array with 0 sample(s)") with pytest.raises(ValueError, match=msg): nbrs.fit(np.ones((0, 2)), np.ones(0)) msg = "Found array with dim 3" with pytest.raises(ValueError, match=msg): nbrs.fit(X[:, :, None], y) nbrs.fit(X, y) msg = re.escape("Found array with 0 feature(s)") with pytest.raises(ValueError, match=msg): nbrs.predict([[]])
Additional parameter validation for *Neighbors* estimators not covered by common validation.
test_neighbors_validate_parameters
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_neighbors.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_neighbors.py
BSD-3-Clause
def test_neighbors_minkowski_semimetric_algo_warn(Estimator, n_features, algorithm): """ Validation of all classes extending NeighborsBase with Minkowski semi-metrics (i.e. when 0 < p < 1). That proper Warning is raised for `algorithm="auto"` and "brute". """ X = rng.random_sample((10, n_features)) y = np.ones(10) model = Estimator(p=0.1, algorithm=algorithm) msg = ( "Mind that for 0 < p < 1, Minkowski metrics are not distance" " metrics. Continuing the execution with `algorithm='brute'`." ) with pytest.warns(UserWarning, match=msg): model.fit(X, y) assert model._fit_method == "brute"
Validation of all classes extending NeighborsBase with Minkowski semi-metrics (i.e. when 0 < p < 1). That proper Warning is raised for `algorithm="auto"` and "brute".
test_neighbors_minkowski_semimetric_algo_warn
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_neighbors.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_neighbors.py
BSD-3-Clause
def test_neighbors_minkowski_semimetric_algo_error(Estimator, n_features, algorithm): """Check that we raise a proper error if `algorithm!='brute'` and `p<1`.""" X = rng.random_sample((10, 2)) y = np.ones(10) model = Estimator(algorithm=algorithm, p=0.1) msg = ( f'algorithm="{algorithm}" does not support 0 < p < 1 for ' "the Minkowski metric. To resolve this problem either " 'set p >= 1 or algorithm="brute".' ) with pytest.raises(ValueError, match=msg): model.fit(X, y)
Check that we raise a proper error if `algorithm!='brute'` and `p<1`.
test_neighbors_minkowski_semimetric_algo_error
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_neighbors.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_neighbors.py
BSD-3-Clause
def test_regressor_predict_on_arraylikes(): """Ensures that `predict` works for array-likes when `weights` is a callable. Non-regression test for #22687. """ X = [[5, 1], [3, 1], [4, 3], [0, 3]] y = [2, 3, 5, 6] def _weights(dist): return np.ones_like(dist) est = KNeighborsRegressor(n_neighbors=1, algorithm="brute", weights=_weights) est.fit(X, y) assert_allclose(est.predict([[0, 2.5]]), [6])
Ensures that `predict` works for array-likes when `weights` is a callable. Non-regression test for #22687.
test_regressor_predict_on_arraylikes
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_neighbors.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_neighbors.py
BSD-3-Clause
def test_nan_euclidean_support(Estimator, params): """Check that the different neighbor estimators are lenient towards `nan` values if using `metric="nan_euclidean"`. """ X = [[0, 1], [1, np.nan], [2, 3], [3, 5]] y = [0, 0, 1, 1] params.update({"metric": "nan_euclidean"}) estimator = Estimator().set_params(**params).fit(X, y) for response_method in ("kneighbors", "predict", "transform", "fit_predict"): if hasattr(estimator, response_method): output = getattr(estimator, response_method)(X) if hasattr(output, "toarray"): assert not np.isnan(output.data).any() else: assert not np.isnan(output).any()
Check that the different neighbor estimators are lenient towards `nan` values if using `metric="nan_euclidean"`.
test_nan_euclidean_support
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_neighbors.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_neighbors.py
BSD-3-Clause
def test_predict_dataframe(): """Check that KNN predict works with dataframes non-regression test for issue #26768 """ pd = pytest.importorskip("pandas") X = pd.DataFrame(np.array([[1, 2], [3, 4], [5, 6], [7, 8]]), columns=["a", "b"]) y = np.array([1, 2, 3, 4]) knn = neighbors.KNeighborsClassifier(n_neighbors=2).fit(X, y) knn.predict(X)
Check that KNN predict works with dataframes non-regression test for issue #26768
test_predict_dataframe
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_neighbors.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_neighbors.py
BSD-3-Clause
def test_nearest_neighbours_works_with_p_less_than_1(): """Check that NearestNeighbors works with :math:`p \\in (0,1)` when `algorithm` is `"auto"` or `"brute"` regardless of the dtype of X. Non-regression test for issue #26548 """ X = np.array([[1.0, 0.0], [0.0, 0.0], [0.0, 1.0]]) neigh = neighbors.NearestNeighbors( n_neighbors=3, algorithm="brute", metric_params={"p": 0.5} ) neigh.fit(X) y = neigh.radius_neighbors(X[0].reshape(1, -1), radius=4, return_distance=False) assert_allclose(y[0], [0, 1, 2]) y = neigh.kneighbors(X[0].reshape(1, -1), return_distance=False) assert_allclose(y[0], [0, 1, 2])
Check that NearestNeighbors works with :math:`p \in (0,1)` when `algorithm` is `"auto"` or `"brute"` regardless of the dtype of X. Non-regression test for issue #26548
test_nearest_neighbours_works_with_p_less_than_1
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_neighbors.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_neighbors.py
BSD-3-Clause
def test_KNeighborsClassifier_raise_on_all_zero_weights(): """Check that `predict` and `predict_proba` raises on sample of all zeros weights. Related to Issue #25854. """ X = [[0, 1], [1, 2], [2, 3], [3, 4]] y = [0, 0, 1, 1] def _weights(dist): return np.vectorize(lambda x: 0 if x > 0.5 else 1)(dist) est = neighbors.KNeighborsClassifier(n_neighbors=3, weights=_weights) est.fit(X, y) msg = ( "All neighbors of some sample is getting zero weights. " "Please modify 'weights' to avoid this case if you are " "using a user-defined function." ) with pytest.raises(ValueError, match=msg): est.predict([[1.1, 1.1]]) with pytest.raises(ValueError, match=msg): est.predict_proba([[1.1, 1.1]])
Check that `predict` and `predict_proba` raises on sample of all zeros weights. Related to Issue #25854.
test_KNeighborsClassifier_raise_on_all_zero_weights
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_neighbors.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_neighbors.py
BSD-3-Clause
def test_neighbor_classifiers_loocv(nn_model, algorithm): """Check that `predict` and related functions work fine with X=None Calling predict with X=None computes a prediction for each training point from the labels of its neighbors (without the label of the data point being predicted upon). This is therefore mathematically equivalent to leave-one-out cross-validation without having do any retraining (rebuilding a KD-tree or Ball-tree index) or any data reshuffling. """ X, y = datasets.make_blobs(n_samples=15, centers=5, n_features=2, random_state=0) nn_model = clone(nn_model).set_params(algorithm=algorithm) # Set the radius for RadiusNeighborsRegressor to some percentile of the # empirical pairwise distances to avoid trivial test cases and warnings for # predictions with no neighbors within the radius. if "radius" in nn_model.get_params(): dists = pairwise_distances(X).ravel() dists = dists[dists > 0] nn_model.set_params(radius=np.percentile(dists, 80)) loocv = cross_val_score(nn_model, X, y, cv=LeaveOneOut()) nn_model.fit(X, y) assert_allclose(loocv, nn_model.predict(None) == y) assert np.mean(loocv) == pytest.approx(nn_model.score(None, y)) # Evaluating `nn_model` on its "training" set should lead to a higher # accuracy value than leaving out each data point in turn because the # former can overfit while the latter cannot by construction. assert nn_model.score(None, y) < nn_model.score(X, y)
Check that `predict` and related functions work fine with X=None Calling predict with X=None computes a prediction for each training point from the labels of its neighbors (without the label of the data point being predicted upon). This is therefore mathematically equivalent to leave-one-out cross-validation without having do any retraining (rebuilding a KD-tree or Ball-tree index) or any data reshuffling.
test_neighbor_classifiers_loocv
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_neighbors.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_neighbors.py
BSD-3-Clause
def test_neighbor_regressors_loocv(nn_model, algorithm): """Check that `predict` and related functions work fine with X=None""" X, y = datasets.make_regression(n_samples=15, n_features=2, random_state=0) # Only checking cross_val_predict and not cross_val_score because # cross_val_score does not work with LeaveOneOut() for a regressor: the # default score method implements R2 score which is not well defined for a # single data point. # # TODO: if score is refactored to evaluate models for other scoring # functions, then this test can be extended to check cross_val_score as # well. nn_model = clone(nn_model).set_params(algorithm=algorithm) # Set the radius for RadiusNeighborsRegressor to some percentile of the # empirical pairwise distances to avoid trivial test cases and warnings for # predictions with no neighbors within the radius. if "radius" in nn_model.get_params(): dists = pairwise_distances(X).ravel() dists = dists[dists > 0] nn_model.set_params(radius=np.percentile(dists, 80)) loocv = cross_val_predict(nn_model, X, y, cv=LeaveOneOut()) nn_model.fit(X, y) assert_allclose(loocv, nn_model.predict(None))
Check that `predict` and related functions work fine with X=None
test_neighbor_regressors_loocv
python
scikit-learn/scikit-learn
sklearn/neighbors/tests/test_neighbors.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neighbors/tests/test_neighbors.py
BSD-3-Clause
def inplace_softmax(X): """Compute the K-way softmax function inplace. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) The input data. """ tmp = X - X.max(axis=1)[:, np.newaxis] np.exp(tmp, out=X) X /= X.sum(axis=1)[:, np.newaxis]
Compute the K-way softmax function inplace. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) The input data.
inplace_softmax
python
scikit-learn/scikit-learn
sklearn/neural_network/_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/_base.py
BSD-3-Clause
def inplace_logistic_derivative(Z, delta): """Apply the derivative of the logistic sigmoid function. It exploits the fact that the derivative is a simple function of the output value from logistic function. Parameters ---------- Z : {array-like, sparse matrix}, shape (n_samples, n_features) The data which was output from the logistic activation function during the forward pass. delta : {array-like}, shape (n_samples, n_features) The backpropagated error signal to be modified inplace. """ delta *= Z delta *= 1 - Z
Apply the derivative of the logistic sigmoid function. It exploits the fact that the derivative is a simple function of the output value from logistic function. Parameters ---------- Z : {array-like, sparse matrix}, shape (n_samples, n_features) The data which was output from the logistic activation function during the forward pass. delta : {array-like}, shape (n_samples, n_features) The backpropagated error signal to be modified inplace.
inplace_logistic_derivative
python
scikit-learn/scikit-learn
sklearn/neural_network/_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/_base.py
BSD-3-Clause
def squared_loss(y_true, y_pred, sample_weight=None): """Compute the squared loss for regression. Parameters ---------- y_true : array-like or label indicator matrix Ground truth (correct) values. y_pred : array-like or label indicator matrix Predicted values, as returned by a regression estimator. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- loss : float The degree to which the samples are correctly predicted. """ return ( 0.5 * np.average((y_true - y_pred) ** 2, weights=sample_weight, axis=0).mean() )
Compute the squared loss for regression. Parameters ---------- y_true : array-like or label indicator matrix Ground truth (correct) values. y_pred : array-like or label indicator matrix Predicted values, as returned by a regression estimator. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- loss : float The degree to which the samples are correctly predicted.
squared_loss
python
scikit-learn/scikit-learn
sklearn/neural_network/_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/_base.py
BSD-3-Clause
def poisson_loss(y_true, y_pred, sample_weight=None): """Compute (half of the) Poisson deviance loss for regression. Parameters ---------- y_true : array-like or label indicator matrix Ground truth (correct) labels. y_pred : array-like or label indicator matrix Predicted values, as returned by a regression estimator. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- loss : float The degree to which the samples are correctly predicted. """ # TODO: Decide what to do with the term `xlogy(y_true, y_true) - y_true`. For now, # it is included. But the _loss module doesn't use it (for performance reasons) and # only adds it as return of constant_to_optimal_zero (mainly for testing). return np.average( xlogy(y_true, y_true / y_pred) - y_true + y_pred, weights=sample_weight, axis=0 ).sum()
Compute (half of the) Poisson deviance loss for regression. Parameters ---------- y_true : array-like or label indicator matrix Ground truth (correct) labels. y_pred : array-like or label indicator matrix Predicted values, as returned by a regression estimator. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- loss : float The degree to which the samples are correctly predicted.
poisson_loss
python
scikit-learn/scikit-learn
sklearn/neural_network/_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/_base.py
BSD-3-Clause
def log_loss(y_true, y_prob, sample_weight=None): """Compute Logistic loss for classification. Parameters ---------- y_true : array-like or label indicator matrix Ground truth (correct) labels. y_prob : array-like of float, shape = (n_samples, n_classes) Predicted probabilities, as returned by a classifier's predict_proba method. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- loss : float The degree to which the samples are correctly predicted. """ eps = np.finfo(y_prob.dtype).eps y_prob = np.clip(y_prob, eps, 1 - eps) if y_prob.shape[1] == 1: y_prob = np.append(1 - y_prob, y_prob, axis=1) if y_true.shape[1] == 1: y_true = np.append(1 - y_true, y_true, axis=1) return -np.average(xlogy(y_true, y_prob), weights=sample_weight, axis=0).sum()
Compute Logistic loss for classification. Parameters ---------- y_true : array-like or label indicator matrix Ground truth (correct) labels. y_prob : array-like of float, shape = (n_samples, n_classes) Predicted probabilities, as returned by a classifier's predict_proba method. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- loss : float The degree to which the samples are correctly predicted.
log_loss
python
scikit-learn/scikit-learn
sklearn/neural_network/_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/_base.py
BSD-3-Clause
def binary_log_loss(y_true, y_prob, sample_weight=None): """Compute binary logistic loss for classification. This is identical to log_loss in binary classification case, but is kept for its use in multilabel case. Parameters ---------- y_true : array-like or label indicator matrix Ground truth (correct) labels. y_prob : array-like of float, shape = (n_samples, 1) Predicted probabilities, as returned by a classifier's predict_proba method. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- loss : float The degree to which the samples are correctly predicted. """ eps = np.finfo(y_prob.dtype).eps y_prob = np.clip(y_prob, eps, 1 - eps) return -np.average( xlogy(y_true, y_prob) + xlogy(1 - y_true, 1 - y_prob), weights=sample_weight, axis=0, ).sum()
Compute binary logistic loss for classification. This is identical to log_loss in binary classification case, but is kept for its use in multilabel case. Parameters ---------- y_true : array-like or label indicator matrix Ground truth (correct) labels. y_prob : array-like of float, shape = (n_samples, 1) Predicted probabilities, as returned by a classifier's predict_proba method. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- loss : float The degree to which the samples are correctly predicted.
binary_log_loss
python
scikit-learn/scikit-learn
sklearn/neural_network/_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/_base.py
BSD-3-Clause
def _unpack(self, packed_parameters): """Extract the coefficients and intercepts from packed_parameters.""" for i in range(self.n_layers_ - 1): start, end, shape = self._coef_indptr[i] self.coefs_[i] = np.reshape(packed_parameters[start:end], shape) start, end = self._intercept_indptr[i] self.intercepts_[i] = packed_parameters[start:end]
Extract the coefficients and intercepts from packed_parameters.
_unpack
python
scikit-learn/scikit-learn
sklearn/neural_network/_multilayer_perceptron.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/_multilayer_perceptron.py
BSD-3-Clause
def _forward_pass(self, activations): """Perform a forward pass on the network by computing the values of the neurons in the hidden layers and the output layer. Parameters ---------- activations : list, length = n_layers - 1 The ith element of the list holds the values of the ith layer. """ hidden_activation = ACTIVATIONS[self.activation] # Iterate over the hidden layers for i in range(self.n_layers_ - 1): activations[i + 1] = safe_sparse_dot(activations[i], self.coefs_[i]) activations[i + 1] += self.intercepts_[i] # For the hidden layers if (i + 1) != (self.n_layers_ - 1): hidden_activation(activations[i + 1]) # For the last layer output_activation = ACTIVATIONS[self.out_activation_] output_activation(activations[i + 1]) return activations
Perform a forward pass on the network by computing the values of the neurons in the hidden layers and the output layer. Parameters ---------- activations : list, length = n_layers - 1 The ith element of the list holds the values of the ith layer.
_forward_pass
python
scikit-learn/scikit-learn
sklearn/neural_network/_multilayer_perceptron.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/_multilayer_perceptron.py
BSD-3-Clause
def _forward_pass_fast(self, X, check_input=True): """Predict using the trained model This is the same as _forward_pass but does not record the activations of all layers and only returns the last layer's activation. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. check_input : bool, default=True Perform input data validation or not. Returns ------- y_pred : ndarray of shape (n_samples,) or (n_samples, n_outputs) The decision function of the samples for each class in the model. """ if check_input: X = validate_data(self, X, accept_sparse=["csr", "csc"], reset=False) # Initialize first layer activation = X # Forward propagate hidden_activation = ACTIVATIONS[self.activation] for i in range(self.n_layers_ - 1): activation = safe_sparse_dot(activation, self.coefs_[i]) activation += self.intercepts_[i] if i != self.n_layers_ - 2: hidden_activation(activation) output_activation = ACTIVATIONS[self.out_activation_] output_activation(activation) return activation
Predict using the trained model This is the same as _forward_pass but does not record the activations of all layers and only returns the last layer's activation. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. check_input : bool, default=True Perform input data validation or not. Returns ------- y_pred : ndarray of shape (n_samples,) or (n_samples, n_outputs) The decision function of the samples for each class in the model.
_forward_pass_fast
python
scikit-learn/scikit-learn
sklearn/neural_network/_multilayer_perceptron.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/_multilayer_perceptron.py
BSD-3-Clause
def _compute_loss_grad( self, layer, sw_sum, activations, deltas, coef_grads, intercept_grads ): """Compute the gradient of loss with respect to coefs and intercept for specified layer. This function does backpropagation for the specified one layer. """ coef_grads[layer] = safe_sparse_dot(activations[layer].T, deltas[layer]) coef_grads[layer] += self.alpha * self.coefs_[layer] coef_grads[layer] /= sw_sum intercept_grads[layer] = np.sum(deltas[layer], axis=0) / sw_sum
Compute the gradient of loss with respect to coefs and intercept for specified layer. This function does backpropagation for the specified one layer.
_compute_loss_grad
python
scikit-learn/scikit-learn
sklearn/neural_network/_multilayer_perceptron.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/_multilayer_perceptron.py
BSD-3-Clause
def _loss_grad_lbfgs( self, packed_coef_inter, X, y, sample_weight, activations, deltas, coef_grads, intercept_grads, ): """Compute the MLP loss function and its corresponding derivatives with respect to the different parameters given in the initialization. Returned gradients are packed in a single vector so it can be used in lbfgs Parameters ---------- packed_coef_inter : ndarray A vector comprising the flattened coefficients and intercepts. X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. y : ndarray of shape (n_samples,) The target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. activations : list, length = n_layers - 1 The ith element of the list holds the values of the ith layer. deltas : list, length = n_layers - 1 The ith element of the list holds the difference between the activations of the i + 1 layer and the backpropagated error. More specifically, deltas are gradients of loss with respect to z in each layer, where z = wx + b is the value of a particular layer before passing through the activation function coef_grads : list, length = n_layers - 1 The ith element contains the amount of change used to update the coefficient parameters of the ith layer in an iteration. intercept_grads : list, length = n_layers - 1 The ith element contains the amount of change used to update the intercept parameters of the ith layer in an iteration. Returns ------- loss : float grad : array-like, shape (number of nodes of all layers,) """ self._unpack(packed_coef_inter) loss, coef_grads, intercept_grads = self._backprop( X, y, sample_weight, activations, deltas, coef_grads, intercept_grads ) grad = _pack(coef_grads, intercept_grads) return loss, grad
Compute the MLP loss function and its corresponding derivatives with respect to the different parameters given in the initialization. Returned gradients are packed in a single vector so it can be used in lbfgs Parameters ---------- packed_coef_inter : ndarray A vector comprising the flattened coefficients and intercepts. X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. y : ndarray of shape (n_samples,) The target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. activations : list, length = n_layers - 1 The ith element of the list holds the values of the ith layer. deltas : list, length = n_layers - 1 The ith element of the list holds the difference between the activations of the i + 1 layer and the backpropagated error. More specifically, deltas are gradients of loss with respect to z in each layer, where z = wx + b is the value of a particular layer before passing through the activation function coef_grads : list, length = n_layers - 1 The ith element contains the amount of change used to update the coefficient parameters of the ith layer in an iteration. intercept_grads : list, length = n_layers - 1 The ith element contains the amount of change used to update the intercept parameters of the ith layer in an iteration. Returns ------- loss : float grad : array-like, shape (number of nodes of all layers,)
_loss_grad_lbfgs
python
scikit-learn/scikit-learn
sklearn/neural_network/_multilayer_perceptron.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/_multilayer_perceptron.py
BSD-3-Clause
def _backprop( self, X, y, sample_weight, activations, deltas, coef_grads, intercept_grads ): """Compute the MLP loss function and its corresponding derivatives with respect to each parameter: weights and bias vectors. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. y : ndarray of shape (n_samples,) The target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. activations : list, length = n_layers - 1 The ith element of the list holds the values of the ith layer. deltas : list, length = n_layers - 1 The ith element of the list holds the difference between the activations of the i + 1 layer and the backpropagated error. More specifically, deltas are gradients of loss with respect to z in each layer, where z = wx + b is the value of a particular layer before passing through the activation function coef_grads : list, length = n_layers - 1 The ith element contains the amount of change used to update the coefficient parameters of the ith layer in an iteration. intercept_grads : list, length = n_layers - 1 The ith element contains the amount of change used to update the intercept parameters of the ith layer in an iteration. Returns ------- loss : float coef_grads : list, length = n_layers - 1 intercept_grads : list, length = n_layers - 1 """ n_samples = X.shape[0] # Forward propagate activations = self._forward_pass(activations) # Get loss loss_func_name = self.loss if loss_func_name == "log_loss" and self.out_activation_ == "logistic": loss_func_name = "binary_log_loss" loss = LOSS_FUNCTIONS[loss_func_name](y, activations[-1], sample_weight) # Add L2 regularization term to loss values = 0 for s in self.coefs_: s = s.ravel() values += np.dot(s, s) if sample_weight is None: sw_sum = n_samples else: sw_sum = sample_weight.sum() loss += (0.5 * self.alpha) * values / sw_sum # Backward propagate last = self.n_layers_ - 2 # The calculation of delta[last] is as follows: # delta[last] = d/dz loss(y, act(z)) = act(z) - y # with z=x@w + b being the output of the last layer before passing through the # output activation, act(z) = activations[-1]. # The simple formula for delta[last] here works with following (canonical # loss-link) combinations of output activation and loss function: # sigmoid and binary cross entropy, softmax and categorical cross # entropy, and identity with squared loss deltas[last] = activations[-1] - y if sample_weight is not None: deltas[last] *= sample_weight.reshape(-1, 1) # Compute gradient for the last layer self._compute_loss_grad( last, sw_sum, activations, deltas, coef_grads, intercept_grads ) inplace_derivative = DERIVATIVES[self.activation] # Iterate over the hidden layers for i in range(last, 0, -1): deltas[i - 1] = safe_sparse_dot(deltas[i], self.coefs_[i].T) inplace_derivative(activations[i], deltas[i - 1]) self._compute_loss_grad( i - 1, sw_sum, activations, deltas, coef_grads, intercept_grads ) return loss, coef_grads, intercept_grads
Compute the MLP loss function and its corresponding derivatives with respect to each parameter: weights and bias vectors. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. y : ndarray of shape (n_samples,) The target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. activations : list, length = n_layers - 1 The ith element of the list holds the values of the ith layer. deltas : list, length = n_layers - 1 The ith element of the list holds the difference between the activations of the i + 1 layer and the backpropagated error. More specifically, deltas are gradients of loss with respect to z in each layer, where z = wx + b is the value of a particular layer before passing through the activation function coef_grads : list, length = n_layers - 1 The ith element contains the amount of change used to update the coefficient parameters of the ith layer in an iteration. intercept_grads : list, length = n_layers - 1 The ith element contains the amount of change used to update the intercept parameters of the ith layer in an iteration. Returns ------- loss : float coef_grads : list, length = n_layers - 1 intercept_grads : list, length = n_layers - 1
_backprop
python
scikit-learn/scikit-learn
sklearn/neural_network/_multilayer_perceptron.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/_multilayer_perceptron.py
BSD-3-Clause
def _score_with_function(self, X, y, sample_weight, score_function): """Private score method without input validation.""" # Input validation would remove feature names, so we disable it y_pred = self._predict(X, check_input=False) if np.isnan(y_pred).any() or np.isinf(y_pred).any(): return np.nan return score_function(y, y_pred, sample_weight=sample_weight)
Private score method without input validation.
_score_with_function
python
scikit-learn/scikit-learn
sklearn/neural_network/_multilayer_perceptron.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/_multilayer_perceptron.py
BSD-3-Clause
def _predict(self, X, check_input=True): """Private predict method with optional input validation""" y_pred = self._forward_pass_fast(X, check_input=check_input) if self.n_outputs_ == 1: y_pred = y_pred.ravel() return self._label_binarizer.inverse_transform(y_pred)
Private predict method with optional input validation
_predict
python
scikit-learn/scikit-learn
sklearn/neural_network/_multilayer_perceptron.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/_multilayer_perceptron.py
BSD-3-Clause
def predict_log_proba(self, X): """Return the log of probability estimates. Parameters ---------- X : ndarray of shape (n_samples, n_features) The input data. Returns ------- log_y_prob : ndarray of shape (n_samples, n_classes) The predicted log-probability of the sample for each class in the model, where classes are ordered as they are in `self.classes_`. Equivalent to `log(predict_proba(X))`. """ y_prob = self.predict_proba(X) return np.log(y_prob, out=y_prob)
Return the log of probability estimates. Parameters ---------- X : ndarray of shape (n_samples, n_features) The input data. Returns ------- log_y_prob : ndarray of shape (n_samples, n_classes) The predicted log-probability of the sample for each class in the model, where classes are ordered as they are in `self.classes_`. Equivalent to `log(predict_proba(X))`.
predict_log_proba
python
scikit-learn/scikit-learn
sklearn/neural_network/_multilayer_perceptron.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/_multilayer_perceptron.py
BSD-3-Clause
def predict_proba(self, X): """Probability estimates. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. Returns ------- y_prob : ndarray of shape (n_samples, n_classes) The predicted probability of the sample for each class in the model, where classes are ordered as they are in `self.classes_`. """ check_is_fitted(self) y_pred = self._forward_pass_fast(X) if self.n_outputs_ == 1: y_pred = y_pred.ravel() if y_pred.ndim == 1: return np.vstack([1 - y_pred, y_pred]).T else: return y_pred
Probability estimates. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. Returns ------- y_prob : ndarray of shape (n_samples, n_classes) The predicted probability of the sample for each class in the model, where classes are ordered as they are in `self.classes_`.
predict_proba
python
scikit-learn/scikit-learn
sklearn/neural_network/_multilayer_perceptron.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/_multilayer_perceptron.py
BSD-3-Clause
def _predict(self, X, check_input=True): """Private predict method with optional input validation""" y_pred = self._forward_pass_fast(X, check_input=check_input) if y_pred.shape[1] == 1: return y_pred.ravel() return y_pred
Private predict method with optional input validation
_predict
python
scikit-learn/scikit-learn
sklearn/neural_network/_multilayer_perceptron.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/_multilayer_perceptron.py
BSD-3-Clause
def transform(self, X): """Compute the hidden layer activation probabilities, P(h=1|v=X). Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data to be transformed. Returns ------- h : ndarray of shape (n_samples, n_components) Latent representations of the data. """ check_is_fitted(self) X = validate_data( self, X, accept_sparse="csr", reset=False, dtype=(np.float64, np.float32) ) return self._mean_hiddens(X)
Compute the hidden layer activation probabilities, P(h=1|v=X). Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data to be transformed. Returns ------- h : ndarray of shape (n_samples, n_components) Latent representations of the data.
transform
python
scikit-learn/scikit-learn
sklearn/neural_network/_rbm.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/_rbm.py
BSD-3-Clause
def _mean_hiddens(self, v): """Computes the probabilities P(h=1|v). Parameters ---------- v : ndarray of shape (n_samples, n_features) Values of the visible layer. Returns ------- h : ndarray of shape (n_samples, n_components) Corresponding mean field values for the hidden layer. """ p = safe_sparse_dot(v, self.components_.T) p += self.intercept_hidden_ return expit(p, out=p)
Computes the probabilities P(h=1|v). Parameters ---------- v : ndarray of shape (n_samples, n_features) Values of the visible layer. Returns ------- h : ndarray of shape (n_samples, n_components) Corresponding mean field values for the hidden layer.
_mean_hiddens
python
scikit-learn/scikit-learn
sklearn/neural_network/_rbm.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/_rbm.py
BSD-3-Clause
def _sample_hiddens(self, v, rng): """Sample from the distribution P(h|v). Parameters ---------- v : ndarray of shape (n_samples, n_features) Values of the visible layer to sample from. rng : RandomState instance Random number generator to use. Returns ------- h : ndarray of shape (n_samples, n_components) Values of the hidden layer. """ p = self._mean_hiddens(v) return rng.uniform(size=p.shape) < p
Sample from the distribution P(h|v). Parameters ---------- v : ndarray of shape (n_samples, n_features) Values of the visible layer to sample from. rng : RandomState instance Random number generator to use. Returns ------- h : ndarray of shape (n_samples, n_components) Values of the hidden layer.
_sample_hiddens
python
scikit-learn/scikit-learn
sklearn/neural_network/_rbm.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/_rbm.py
BSD-3-Clause
def _sample_visibles(self, h, rng): """Sample from the distribution P(v|h). Parameters ---------- h : ndarray of shape (n_samples, n_components) Values of the hidden layer to sample from. rng : RandomState instance Random number generator to use. Returns ------- v : ndarray of shape (n_samples, n_features) Values of the visible layer. """ p = np.dot(h, self.components_) p += self.intercept_visible_ expit(p, out=p) return rng.uniform(size=p.shape) < p
Sample from the distribution P(v|h). Parameters ---------- h : ndarray of shape (n_samples, n_components) Values of the hidden layer to sample from. rng : RandomState instance Random number generator to use. Returns ------- v : ndarray of shape (n_samples, n_features) Values of the visible layer.
_sample_visibles
python
scikit-learn/scikit-learn
sklearn/neural_network/_rbm.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/_rbm.py
BSD-3-Clause
def _free_energy(self, v): """Computes the free energy F(v) = - log sum_h exp(-E(v,h)). Parameters ---------- v : ndarray of shape (n_samples, n_features) Values of the visible layer. Returns ------- free_energy : ndarray of shape (n_samples,) The value of the free energy. """ return -safe_sparse_dot(v, self.intercept_visible_) - np.logaddexp( 0, safe_sparse_dot(v, self.components_.T) + self.intercept_hidden_ ).sum(axis=1)
Computes the free energy F(v) = - log sum_h exp(-E(v,h)). Parameters ---------- v : ndarray of shape (n_samples, n_features) Values of the visible layer. Returns ------- free_energy : ndarray of shape (n_samples,) The value of the free energy.
_free_energy
python
scikit-learn/scikit-learn
sklearn/neural_network/_rbm.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/_rbm.py
BSD-3-Clause
def gibbs(self, v): """Perform one Gibbs sampling step. Parameters ---------- v : ndarray of shape (n_samples, n_features) Values of the visible layer to start from. Returns ------- v_new : ndarray of shape (n_samples, n_features) Values of the visible layer after one Gibbs step. """ check_is_fitted(self) if not hasattr(self, "random_state_"): self.random_state_ = check_random_state(self.random_state) h_ = self._sample_hiddens(v, self.random_state_) v_ = self._sample_visibles(h_, self.random_state_) return v_
Perform one Gibbs sampling step. Parameters ---------- v : ndarray of shape (n_samples, n_features) Values of the visible layer to start from. Returns ------- v_new : ndarray of shape (n_samples, n_features) Values of the visible layer after one Gibbs step.
gibbs
python
scikit-learn/scikit-learn
sklearn/neural_network/_rbm.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/_rbm.py
BSD-3-Clause
def partial_fit(self, X, y=None): """Fit the model to the partial segment of the data X. Parameters ---------- X : ndarray of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None Target values (None for unsupervised transformations). Returns ------- self : BernoulliRBM The fitted model. """ first_pass = not hasattr(self, "components_") X = validate_data( self, X, accept_sparse="csr", dtype=np.float64, reset=first_pass ) if not hasattr(self, "random_state_"): self.random_state_ = check_random_state(self.random_state) if not hasattr(self, "components_"): self.components_ = np.asarray( self.random_state_.normal(0, 0.01, (self.n_components, X.shape[1])), order="F", ) self._n_features_out = self.components_.shape[0] if not hasattr(self, "intercept_hidden_"): self.intercept_hidden_ = np.zeros( self.n_components, ) if not hasattr(self, "intercept_visible_"): self.intercept_visible_ = np.zeros( X.shape[1], ) if not hasattr(self, "h_samples_"): self.h_samples_ = np.zeros((self.batch_size, self.n_components)) self._fit(X, self.random_state_)
Fit the model to the partial segment of the data X. Parameters ---------- X : ndarray of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None Target values (None for unsupervised transformations). Returns ------- self : BernoulliRBM The fitted model.
partial_fit
python
scikit-learn/scikit-learn
sklearn/neural_network/_rbm.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/_rbm.py
BSD-3-Clause
def _fit(self, v_pos, rng): """Inner fit for one mini-batch. Adjust the parameters to maximize the likelihood of v using Stochastic Maximum Likelihood (SML). Parameters ---------- v_pos : ndarray of shape (n_samples, n_features) The data to use for training. rng : RandomState instance Random number generator to use for sampling. """ h_pos = self._mean_hiddens(v_pos) v_neg = self._sample_visibles(self.h_samples_, rng) h_neg = self._mean_hiddens(v_neg) lr = float(self.learning_rate) / v_pos.shape[0] update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T update -= np.dot(h_neg.T, v_neg) self.components_ += lr * update self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0)) self.intercept_visible_ += lr * ( np.asarray(v_pos.sum(axis=0)).squeeze() - v_neg.sum(axis=0) ) h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial self.h_samples_ = np.floor(h_neg, h_neg)
Inner fit for one mini-batch. Adjust the parameters to maximize the likelihood of v using Stochastic Maximum Likelihood (SML). Parameters ---------- v_pos : ndarray of shape (n_samples, n_features) The data to use for training. rng : RandomState instance Random number generator to use for sampling.
_fit
python
scikit-learn/scikit-learn
sklearn/neural_network/_rbm.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/_rbm.py
BSD-3-Clause
def score_samples(self, X): """Compute the pseudo-likelihood of X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Values of the visible layer. Must be all-boolean (not checked). Returns ------- pseudo_likelihood : ndarray of shape (n_samples,) Value of the pseudo-likelihood (proxy for likelihood). Notes ----- This method is not deterministic: it computes a quantity called the free energy on X, then on a randomly corrupted version of X, and returns the log of the logistic function of the difference. """ check_is_fitted(self) v = validate_data(self, X, accept_sparse="csr", reset=False) rng = check_random_state(self.random_state) # Randomly corrupt one feature in each sample in v. ind = (np.arange(v.shape[0]), rng.randint(0, v.shape[1], v.shape[0])) if sp.issparse(v): data = -2 * v[ind] + 1 if isinstance(data, np.matrix): # v is a sparse matrix v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape) else: # v is a sparse array v_ = v + sp.csr_array((data.ravel(), ind), shape=v.shape) else: v_ = v.copy() v_[ind] = 1 - v_[ind] fe = self._free_energy(v) fe_ = self._free_energy(v_) # log(expit(x)) = log(1 / (1 + exp(-x)) = -np.logaddexp(0, -x) return -v.shape[1] * np.logaddexp(0, -(fe_ - fe))
Compute the pseudo-likelihood of X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Values of the visible layer. Must be all-boolean (not checked). Returns ------- pseudo_likelihood : ndarray of shape (n_samples,) Value of the pseudo-likelihood (proxy for likelihood). Notes ----- This method is not deterministic: it computes a quantity called the free energy on X, then on a randomly corrupted version of X, and returns the log of the logistic function of the difference.
score_samples
python
scikit-learn/scikit-learn
sklearn/neural_network/_rbm.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/_rbm.py
BSD-3-Clause
def fit(self, X, y=None): """Fit the model to the data X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None Target values (None for unsupervised transformations). Returns ------- self : BernoulliRBM The fitted model. """ X = validate_data(self, X, accept_sparse="csr", dtype=(np.float64, np.float32)) n_samples = X.shape[0] rng = check_random_state(self.random_state) self.components_ = np.asarray( rng.normal(0, 0.01, (self.n_components, X.shape[1])), order="F", dtype=X.dtype, ) self._n_features_out = self.components_.shape[0] self.intercept_hidden_ = np.zeros(self.n_components, dtype=X.dtype) self.intercept_visible_ = np.zeros(X.shape[1], dtype=X.dtype) self.h_samples_ = np.zeros((self.batch_size, self.n_components), dtype=X.dtype) n_batches = int(np.ceil(float(n_samples) / self.batch_size)) batch_slices = list( gen_even_slices(n_batches * self.batch_size, n_batches, n_samples=n_samples) ) verbose = self.verbose begin = time.time() for iteration in range(1, self.n_iter + 1): for batch_slice in batch_slices: self._fit(X[batch_slice], rng) if verbose: end = time.time() print( "[%s] Iteration %d, pseudo-likelihood = %.2f, time = %.2fs" % ( type(self).__name__, iteration, self.score_samples(X).mean(), end - begin, ) ) begin = end return self
Fit the model to the data X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None Target values (None for unsupervised transformations). Returns ------- self : BernoulliRBM The fitted model.
fit
python
scikit-learn/scikit-learn
sklearn/neural_network/_rbm.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/_rbm.py
BSD-3-Clause
def update_params(self, params, grads): """Update parameters with given gradients Parameters ---------- params : list of length = len(coefs_) + len(intercepts_) The concatenated list containing coefs_ and intercepts_ in MLP model. Used for initializing velocities and updating params grads : list of length = len(params) Containing gradients with respect to coefs_ and intercepts_ in MLP model. So length should be aligned with params """ updates = self._get_updates(grads) for param, update in zip((p for p in params), updates): param += update
Update parameters with given gradients Parameters ---------- params : list of length = len(coefs_) + len(intercepts_) The concatenated list containing coefs_ and intercepts_ in MLP model. Used for initializing velocities and updating params grads : list of length = len(params) Containing gradients with respect to coefs_ and intercepts_ in MLP model. So length should be aligned with params
update_params
python
scikit-learn/scikit-learn
sklearn/neural_network/_stochastic_optimizers.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/_stochastic_optimizers.py
BSD-3-Clause
def trigger_stopping(self, msg, verbose): """Decides whether it is time to stop training Parameters ---------- msg : str Message passed in for verbose output verbose : bool Print message to stdin if True Returns ------- is_stopping : bool True if training needs to stop """ if verbose: print(msg + " Stopping.") return True
Decides whether it is time to stop training Parameters ---------- msg : str Message passed in for verbose output verbose : bool Print message to stdin if True Returns ------- is_stopping : bool True if training needs to stop
trigger_stopping
python
scikit-learn/scikit-learn
sklearn/neural_network/_stochastic_optimizers.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/_stochastic_optimizers.py
BSD-3-Clause
def iteration_ends(self, time_step): """Perform updates to learning rate and potential other states at the end of an iteration Parameters ---------- time_step : int number of training samples trained on so far, used to update learning rate for 'invscaling' """ if self.lr_schedule == "invscaling": self.learning_rate = ( float(self.learning_rate_init) / (time_step + 1) ** self.power_t )
Perform updates to learning rate and potential other states at the end of an iteration Parameters ---------- time_step : int number of training samples trained on so far, used to update learning rate for 'invscaling'
iteration_ends
python
scikit-learn/scikit-learn
sklearn/neural_network/_stochastic_optimizers.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/_stochastic_optimizers.py
BSD-3-Clause
def _get_updates(self, grads): """Get the values used to update params with given gradients Parameters ---------- grads : list, length = len(coefs_) + len(intercepts_) Containing gradients with respect to coefs_ and intercepts_ in MLP model. So length should be aligned with params Returns ------- updates : list, length = len(grads) The values to add to params """ updates = [ self.momentum * velocity - self.learning_rate * grad for velocity, grad in zip(self.velocities, grads) ] self.velocities = updates if self.nesterov: updates = [ self.momentum * velocity - self.learning_rate * grad for velocity, grad in zip(self.velocities, grads) ] return updates
Get the values used to update params with given gradients Parameters ---------- grads : list, length = len(coefs_) + len(intercepts_) Containing gradients with respect to coefs_ and intercepts_ in MLP model. So length should be aligned with params Returns ------- updates : list, length = len(grads) The values to add to params
_get_updates
python
scikit-learn/scikit-learn
sklearn/neural_network/_stochastic_optimizers.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/_stochastic_optimizers.py
BSD-3-Clause
def _get_updates(self, grads): """Get the values used to update params with given gradients Parameters ---------- grads : list, length = len(coefs_) + len(intercepts_) Containing gradients with respect to coefs_ and intercepts_ in MLP model. So length should be aligned with params Returns ------- updates : list, length = len(grads) The values to add to params """ self.t += 1 self.ms = [ self.beta_1 * m + (1 - self.beta_1) * grad for m, grad in zip(self.ms, grads) ] self.vs = [ self.beta_2 * v + (1 - self.beta_2) * (grad**2) for v, grad in zip(self.vs, grads) ] self.learning_rate = ( self.learning_rate_init * np.sqrt(1 - self.beta_2**self.t) / (1 - self.beta_1**self.t) ) updates = [ -self.learning_rate * m / (np.sqrt(v) + self.epsilon) for m, v in zip(self.ms, self.vs) ] return updates
Get the values used to update params with given gradients Parameters ---------- grads : list, length = len(coefs_) + len(intercepts_) Containing gradients with respect to coefs_ and intercepts_ in MLP model. So length should be aligned with params Returns ------- updates : list, length = len(grads) The values to add to params
_get_updates
python
scikit-learn/scikit-learn
sklearn/neural_network/_stochastic_optimizers.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/_stochastic_optimizers.py
BSD-3-Clause
def test_poisson_loss(global_random_seed): """Test Poisson loss against well tested HalfPoissonLoss.""" n = 1000 rng = np.random.default_rng(global_random_seed) y_true = rng.integers(low=0, high=10, size=n).astype(float) y_raw = rng.standard_normal(n) y_pred = np.exp(y_raw) sw = rng.uniform(low=0.1, high=10, size=n) assert 0 in y_true loss = poisson_loss(y_true=y_true, y_pred=y_pred, sample_weight=sw) pl = HalfPoissonLoss() loss_ref = ( pl(y_true=y_true, raw_prediction=y_raw, sample_weight=sw) + pl.constant_to_optimal_zero(y_true=y_true, sample_weight=sw).mean() / sw.mean() ) assert loss == pytest.approx(loss_ref, rel=1e-12)
Test Poisson loss against well tested HalfPoissonLoss.
test_poisson_loss
python
scikit-learn/scikit-learn
sklearn/neural_network/tests/test_base.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/tests/test_base.py
BSD-3-Clause
def test_mlp_loading_from_joblib_partial_fit(tmp_path): """Loading from MLP and partial fitting updates weights. Non-regression test for #19626.""" pre_trained_estimator = MLPRegressor( hidden_layer_sizes=(42,), random_state=42, learning_rate_init=0.01, max_iter=200 ) features, target = [[2]], [4] # Fit on x=2, y=4 pre_trained_estimator.fit(features, target) # dump and load model pickled_file = tmp_path / "mlp.pkl" joblib.dump(pre_trained_estimator, pickled_file) load_estimator = joblib.load(pickled_file) # Train for a more epochs on point x=2, y=1 fine_tune_features, fine_tune_target = [[2]], [1] for _ in range(200): load_estimator.partial_fit(fine_tune_features, fine_tune_target) # finetuned model learned the new target predicted_value = load_estimator.predict(fine_tune_features) assert_allclose(predicted_value, fine_tune_target, rtol=1e-4)
Loading from MLP and partial fitting updates weights. Non-regression test for #19626.
test_mlp_loading_from_joblib_partial_fit
python
scikit-learn/scikit-learn
sklearn/neural_network/tests/test_mlp.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/tests/test_mlp.py
BSD-3-Clause
def test_preserve_feature_names(Estimator): """Check that feature names are preserved when early stopping is enabled. Feature names are required for consistency checks during scoring. Non-regression test for gh-24846 """ pd = pytest.importorskip("pandas") rng = np.random.RandomState(0) X = pd.DataFrame(data=rng.randn(10, 2), columns=["colname_a", "colname_b"]) y = pd.Series(data=np.full(10, 1), name="colname_y") model = Estimator(early_stopping=True, validation_fraction=0.2) with warnings.catch_warnings(): warnings.simplefilter("error", UserWarning) model.fit(X, y)
Check that feature names are preserved when early stopping is enabled. Feature names are required for consistency checks during scoring. Non-regression test for gh-24846
test_preserve_feature_names
python
scikit-learn/scikit-learn
sklearn/neural_network/tests/test_mlp.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/tests/test_mlp.py
BSD-3-Clause
def test_mlp_warm_start_with_early_stopping(MLPEstimator): """Check that early stopping works with warm start.""" mlp = MLPEstimator( max_iter=10, random_state=0, warm_start=True, early_stopping=True ) with warnings.catch_warnings(): warnings.simplefilter("ignore", ConvergenceWarning) mlp.fit(X_iris, y_iris) n_validation_scores = len(mlp.validation_scores_) mlp.set_params(max_iter=20) mlp.fit(X_iris, y_iris) assert len(mlp.validation_scores_) > n_validation_scores
Check that early stopping works with warm start.
test_mlp_warm_start_with_early_stopping
python
scikit-learn/scikit-learn
sklearn/neural_network/tests/test_mlp.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/tests/test_mlp.py
BSD-3-Clause
def test_mlp_warm_start_no_convergence(MLPEstimator, solver): """Check that we stop the number of iteration at `max_iter` when warm starting. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/24764 """ model = MLPEstimator( solver=solver, warm_start=True, early_stopping=False, max_iter=10, n_iter_no_change=np.inf, random_state=0, ) with pytest.warns(ConvergenceWarning): model.fit(X_iris, y_iris) assert model.n_iter_ == 10 model.set_params(max_iter=20) with pytest.warns(ConvergenceWarning): model.fit(X_iris, y_iris) assert model.n_iter_ == 20
Check that we stop the number of iteration at `max_iter` when warm starting. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/24764
test_mlp_warm_start_no_convergence
python
scikit-learn/scikit-learn
sklearn/neural_network/tests/test_mlp.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/tests/test_mlp.py
BSD-3-Clause
def test_mlp_partial_fit_after_fit(MLPEstimator): """Check partial fit does not fail after fit when early_stopping=True. Non-regression test for gh-25693. """ mlp = MLPEstimator(early_stopping=True, random_state=0).fit(X_iris, y_iris) msg = "partial_fit does not support early_stopping=True" with pytest.raises(ValueError, match=msg): mlp.partial_fit(X_iris, y_iris)
Check partial fit does not fail after fit when early_stopping=True. Non-regression test for gh-25693.
test_mlp_partial_fit_after_fit
python
scikit-learn/scikit-learn
sklearn/neural_network/tests/test_mlp.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/tests/test_mlp.py
BSD-3-Clause
def test_mlp_diverging_loss(): """Test that a diverging model does not raise errors when early stopping is enabled. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/29504 """ mlp = MLPRegressor( hidden_layer_sizes=100, activation="identity", solver="sgd", alpha=0.0001, learning_rate="constant", learning_rate_init=1, shuffle=True, max_iter=20, early_stopping=True, n_iter_no_change=10, random_state=0, ) with warnings.catch_warnings(): # RuntimeWarning: overflow encountered in matmul # ConvergenceWarning: Stochastic Optimizer: Maximum iteration warnings.simplefilter("ignore", RuntimeWarning) warnings.simplefilter("ignore", ConvergenceWarning) mlp.fit(X_iris, y_iris) # In python, float("nan") != float("nan") assert str(mlp.validation_scores_[-1]) == str(np.nan) assert isinstance(mlp.validation_scores_[-1], float)
Test that a diverging model does not raise errors when early stopping is enabled. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/29504
test_mlp_diverging_loss
python
scikit-learn/scikit-learn
sklearn/neural_network/tests/test_mlp.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/tests/test_mlp.py
BSD-3-Clause
def test_mlp_vs_poisson_glm_equivalent(global_random_seed): """Test MLP with Poisson loss and no hidden layer equals GLM.""" n = 100 rng = np.random.default_rng(global_random_seed) X = np.linspace(0, 1, n) y = rng.poisson(np.exp(X + 1)) X = X.reshape(n, -1) glm = PoissonRegressor(alpha=0, tol=1e-7).fit(X, y) # Unfortunately, we can't set a zero hidden_layer_size, so we use a trick by using # just one hidden layer node with an identity activation. Coefficients will # therefore be different, but predictions are the same. mlp = MLPRegressor( loss="poisson", hidden_layer_sizes=(1,), activation="identity", alpha=0, solver="lbfgs", tol=1e-7, random_state=np.random.RandomState(global_random_seed + 1), ).fit(X, y) assert_allclose(mlp.predict(X), glm.predict(X), rtol=1e-4) # The same does not work with the squared error because the output activation is # the identity instead of the exponential. mlp = MLPRegressor( loss="squared_error", hidden_layer_sizes=(1,), activation="identity", alpha=0, solver="lbfgs", tol=1e-7, random_state=np.random.RandomState(global_random_seed + 1), ).fit(X, y) assert not np.allclose(mlp.predict(X), glm.predict(X), rtol=1e-4)
Test MLP with Poisson loss and no hidden layer equals GLM.
test_mlp_vs_poisson_glm_equivalent
python
scikit-learn/scikit-learn
sklearn/neural_network/tests/test_mlp.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/tests/test_mlp.py
BSD-3-Clause
def test_minimum_input_sample_size(): """Check error message when the validation set is too small.""" X, y = make_regression(n_samples=2, n_features=5, random_state=0) model = MLPRegressor(early_stopping=True, random_state=0) with pytest.raises(ValueError, match="The validation set is too small"): model.fit(X, y)
Check error message when the validation set is too small.
test_minimum_input_sample_size
python
scikit-learn/scikit-learn
sklearn/neural_network/tests/test_mlp.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/neural_network/tests/test_mlp.py
BSD-3-Clause
def _is_constant_feature(var, mean, n_samples): """Detect if a feature is indistinguishable from a constant feature. The detection is based on its computed variance and on the theoretical error bounds of the '2 pass algorithm' for variance computation. See "Algorithms for computing the sample variance: analysis and recommendations", by Chan, Golub, and LeVeque. """ # In scikit-learn, variance is always computed using float64 accumulators. eps = np.finfo(np.float64).eps upper_bound = n_samples * eps * var + (n_samples * mean * eps) ** 2 return var <= upper_bound
Detect if a feature is indistinguishable from a constant feature. The detection is based on its computed variance and on the theoretical error bounds of the '2 pass algorithm' for variance computation. See "Algorithms for computing the sample variance: analysis and recommendations", by Chan, Golub, and LeVeque.
_is_constant_feature
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def _handle_zeros_in_scale(scale, copy=True, constant_mask=None): """Set scales of near constant features to 1. The goal is to avoid division by very small or zero values. Near constant features are detected automatically by identifying scales close to machine precision unless they are precomputed by the caller and passed with the `constant_mask` kwarg. Typically for standard scaling, the scales are the standard deviation while near constant features are better detected on the computed variances which are closer to machine precision by construction. """ # if we are fitting on 1D arrays, scale might be a scalar if np.isscalar(scale): if scale == 0.0: scale = 1.0 return scale # scale is an array else: xp, _ = get_namespace(scale) if constant_mask is None: # Detect near constant values to avoid dividing by a very small # value that could lead to surprising results and numerical # stability issues. constant_mask = scale < 10 * xp.finfo(scale.dtype).eps if copy: # New array to avoid side-effects scale = xp.asarray(scale, copy=True) scale[constant_mask] = 1.0 return scale
Set scales of near constant features to 1. The goal is to avoid division by very small or zero values. Near constant features are detected automatically by identifying scales close to machine precision unless they are precomputed by the caller and passed with the `constant_mask` kwarg. Typically for standard scaling, the scales are the standard deviation while near constant features are better detected on the computed variances which are closer to machine precision by construction.
_handle_zeros_in_scale
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def scale(X, *, axis=0, with_mean=True, with_std=True, copy=True): """Standardize a dataset along any axis. Center to the mean and component wise scale to unit variance. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data to center and scale. axis : {0, 1}, default=0 Axis used to compute the means and standard deviations along. If 0, independently standardize each feature, otherwise (if 1) standardize each sample. with_mean : bool, default=True If True, center the data before scaling. with_std : bool, default=True If True, scale the data to unit variance (or equivalently, unit standard deviation). copy : bool, default=True If False, try to avoid a copy and scale in place. This is not guaranteed to always work in place; e.g. if the data is a numpy array with an int dtype, a copy will be returned even with copy=False. Returns ------- X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) The transformed data. See Also -------- StandardScaler : Performs scaling to unit variance using the Transformer API (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`). Notes ----- This implementation will refuse to center scipy.sparse matrices since it would make them non-sparse and would potentially crash the program with memory exhaustion problems. Instead the caller is expected to either set explicitly `with_mean=False` (in that case, only variance scaling will be performed on the features of the CSC matrix) or to call `X.toarray()` if he/she expects the materialized dense array to fit in memory. To avoid memory copy the caller should pass a CSC matrix. NaNs are treated as missing values: disregarded to compute the statistics, and maintained during the data transformation. We use a biased estimator for the standard deviation, equivalent to `numpy.std(x, ddof=0)`. Note that the choice of `ddof` is unlikely to affect model performance. For a comparison of the different scalers, transformers, and normalizers, see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`. .. warning:: Risk of data leak Do not use :func:`~sklearn.preprocessing.scale` unless you know what you are doing. A common mistake is to apply it to the entire data *before* splitting into training and test sets. This will bias the model evaluation because information would have leaked from the test set to the training set. In general, we recommend using :class:`~sklearn.preprocessing.StandardScaler` within a :ref:`Pipeline <pipeline>` in order to prevent most risks of data leaking: `pipe = make_pipeline(StandardScaler(), LogisticRegression())`. Examples -------- >>> from sklearn.preprocessing import scale >>> X = [[-2, 1, 2], [-1, 0, 1]] >>> scale(X, axis=0) # scaling each column independently array([[-1., 1., 1.], [ 1., -1., -1.]]) >>> scale(X, axis=1) # scaling each row independently array([[-1.37, 0.39, 0.98], [-1.22, 0. , 1.22]]) """ X = check_array( X, accept_sparse="csc", copy=copy, ensure_2d=False, estimator="the scale function", dtype=FLOAT_DTYPES, ensure_all_finite="allow-nan", ) if sparse.issparse(X): if with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` instead" " See docstring for motivation and alternatives." ) if axis != 0: raise ValueError( "Can only scale sparse matrix on axis=0, got axis=%d" % axis ) if with_std: _, var = mean_variance_axis(X, axis=0) var = _handle_zeros_in_scale(var, copy=False) inplace_column_scale(X, 1 / np.sqrt(var)) else: X = np.asarray(X) if with_mean: mean_ = np.nanmean(X, axis) if with_std: scale_ = np.nanstd(X, axis) # Xr is a view on the original array that enables easy use of # broadcasting on the axis in which we are interested in Xr = np.rollaxis(X, axis) if with_mean: Xr -= mean_ mean_1 = np.nanmean(Xr, axis=0) # Verify that mean_1 is 'close to zero'. If X contains very # large values, mean_1 can also be very large, due to a lack of # precision of mean_. In this case, a pre-scaling of the # concerned feature is efficient, for instance by its mean or # maximum. if not np.allclose(mean_1, 0): warnings.warn( "Numerical issues were encountered " "when centering the data " "and might not be solved. Dataset may " "contain too large values. You may need " "to prescale your features." ) Xr -= mean_1 if with_std: scale_ = _handle_zeros_in_scale(scale_, copy=False) Xr /= scale_ if with_mean: mean_2 = np.nanmean(Xr, axis=0) # If mean_2 is not 'close to zero', it comes from the fact that # scale_ is very small so that mean_2 = mean_1/scale_ > 0, even # if mean_1 was close to zero. The problem is thus essentially # due to the lack of precision of mean_. A solution is then to # subtract the mean again: if not np.allclose(mean_2, 0): warnings.warn( "Numerical issues were encountered " "when scaling the data " "and might not be solved. The standard " "deviation of the data is probably " "very close to 0. " ) Xr -= mean_2 return X
Standardize a dataset along any axis. Center to the mean and component wise scale to unit variance. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data to center and scale. axis : {0, 1}, default=0 Axis used to compute the means and standard deviations along. If 0, independently standardize each feature, otherwise (if 1) standardize each sample. with_mean : bool, default=True If True, center the data before scaling. with_std : bool, default=True If True, scale the data to unit variance (or equivalently, unit standard deviation). copy : bool, default=True If False, try to avoid a copy and scale in place. This is not guaranteed to always work in place; e.g. if the data is a numpy array with an int dtype, a copy will be returned even with copy=False. Returns ------- X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) The transformed data. See Also -------- StandardScaler : Performs scaling to unit variance using the Transformer API (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`). Notes ----- This implementation will refuse to center scipy.sparse matrices since it would make them non-sparse and would potentially crash the program with memory exhaustion problems. Instead the caller is expected to either set explicitly `with_mean=False` (in that case, only variance scaling will be performed on the features of the CSC matrix) or to call `X.toarray()` if he/she expects the materialized dense array to fit in memory. To avoid memory copy the caller should pass a CSC matrix. NaNs are treated as missing values: disregarded to compute the statistics, and maintained during the data transformation. We use a biased estimator for the standard deviation, equivalent to `numpy.std(x, ddof=0)`. Note that the choice of `ddof` is unlikely to affect model performance. For a comparison of the different scalers, transformers, and normalizers, see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`. .. warning:: Risk of data leak Do not use :func:`~sklearn.preprocessing.scale` unless you know what you are doing. A common mistake is to apply it to the entire data *before* splitting into training and test sets. This will bias the model evaluation because information would have leaked from the test set to the training set. In general, we recommend using :class:`~sklearn.preprocessing.StandardScaler` within a :ref:`Pipeline <pipeline>` in order to prevent most risks of data leaking: `pipe = make_pipeline(StandardScaler(), LogisticRegression())`. Examples -------- >>> from sklearn.preprocessing import scale >>> X = [[-2, 1, 2], [-1, 0, 1]] >>> scale(X, axis=0) # scaling each column independently array([[-1., 1., 1.], [ 1., -1., -1.]]) >>> scale(X, axis=1) # scaling each row independently array([[-1.37, 0.39, 0.98], [-1.22, 0. , 1.22]])
scale
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def _reset(self): """Reset internal data-dependent state of the scaler, if necessary. __init__ parameters are not touched. """ # Checking one attribute is enough, because they are all set together # in partial_fit if hasattr(self, "scale_"): del self.scale_ del self.min_ del self.n_samples_seen_ del self.data_min_ del self.data_max_ del self.data_range_
Reset internal data-dependent state of the scaler, if necessary. __init__ parameters are not touched.
_reset
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def fit(self, X, y=None): """Compute the minimum and maximum to be used for later scaling. Parameters ---------- X : array-like of shape (n_samples, n_features) The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. y : None Ignored. Returns ------- self : object Fitted scaler. """ # Reset internal state before fitting self._reset() return self.partial_fit(X, y)
Compute the minimum and maximum to be used for later scaling. Parameters ---------- X : array-like of shape (n_samples, n_features) The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. y : None Ignored. Returns ------- self : object Fitted scaler.
fit
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def partial_fit(self, X, y=None): """Online computation of min and max on X for later scaling. All of X is processed as a single batch. This is intended for cases when :meth:`fit` is not feasible due to very large number of `n_samples` or because X is read from a continuous stream. Parameters ---------- X : array-like of shape (n_samples, n_features) The data used to compute the mean and standard deviation used for later scaling along the features axis. y : None Ignored. Returns ------- self : object Fitted scaler. """ feature_range = self.feature_range if feature_range[0] >= feature_range[1]: raise ValueError( "Minimum of desired feature range must be smaller than maximum. Got %s." % str(feature_range) ) if sparse.issparse(X): raise TypeError( "MinMaxScaler does not support sparse input. " "Consider using MaxAbsScaler instead." ) xp, _ = get_namespace(X) first_pass = not hasattr(self, "n_samples_seen_") X = validate_data( self, X, reset=first_pass, dtype=_array_api.supported_float_dtypes(xp), ensure_all_finite="allow-nan", ) device_ = device(X) feature_range = ( xp.asarray(feature_range[0], dtype=X.dtype, device=device_), xp.asarray(feature_range[1], dtype=X.dtype, device=device_), ) data_min = _array_api._nanmin(X, axis=0, xp=xp) data_max = _array_api._nanmax(X, axis=0, xp=xp) if first_pass: self.n_samples_seen_ = X.shape[0] else: data_min = xp.minimum(self.data_min_, data_min) data_max = xp.maximum(self.data_max_, data_max) self.n_samples_seen_ += X.shape[0] data_range = data_max - data_min self.scale_ = (feature_range[1] - feature_range[0]) / _handle_zeros_in_scale( data_range, copy=True ) self.min_ = feature_range[0] - data_min * self.scale_ self.data_min_ = data_min self.data_max_ = data_max self.data_range_ = data_range return self
Online computation of min and max on X for later scaling. All of X is processed as a single batch. This is intended for cases when :meth:`fit` is not feasible due to very large number of `n_samples` or because X is read from a continuous stream. Parameters ---------- X : array-like of shape (n_samples, n_features) The data used to compute the mean and standard deviation used for later scaling along the features axis. y : None Ignored. Returns ------- self : object Fitted scaler.
partial_fit
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def transform(self, X): """Scale features of X according to feature_range. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data that will be transformed. Returns ------- Xt : ndarray of shape (n_samples, n_features) Transformed data. """ check_is_fitted(self) xp, _ = get_namespace(X) X = validate_data( self, X, copy=self.copy, dtype=_array_api.supported_float_dtypes(xp), force_writeable=True, ensure_all_finite="allow-nan", reset=False, ) X *= self.scale_ X += self.min_ if self.clip: device_ = device(X) X = _modify_in_place_if_numpy( xp, xp.clip, X, xp.asarray(self.feature_range[0], dtype=X.dtype, device=device_), xp.asarray(self.feature_range[1], dtype=X.dtype, device=device_), out=X, ) return X
Scale features of X according to feature_range. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data that will be transformed. Returns ------- Xt : ndarray of shape (n_samples, n_features) Transformed data.
transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def inverse_transform(self, X): """Undo the scaling of X according to feature_range. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data that will be transformed. It cannot be sparse. Returns ------- X_original : ndarray of shape (n_samples, n_features) Transformed data. """ check_is_fitted(self) xp, _ = get_namespace(X) X = check_array( X, copy=self.copy, dtype=_array_api.supported_float_dtypes(xp), force_writeable=True, ensure_all_finite="allow-nan", ) X -= self.min_ X /= self.scale_ return X
Undo the scaling of X according to feature_range. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data that will be transformed. It cannot be sparse. Returns ------- X_original : ndarray of shape (n_samples, n_features) Transformed data.
inverse_transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def minmax_scale(X, feature_range=(0, 1), *, axis=0, copy=True): """Transform features by scaling each feature to a given range. This estimator scales and translates each feature individually such that it is in the given range on the training set, i.e. between zero and one. The transformation is given by (when ``axis=0``):: X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) X_scaled = X_std * (max - min) + min where min, max = feature_range. The transformation is calculated as (when ``axis=0``):: X_scaled = scale * X + min - X.min(axis=0) * scale where scale = (max - min) / (X.max(axis=0) - X.min(axis=0)) This transformation is often used as an alternative to zero mean, unit variance scaling. Read more in the :ref:`User Guide <preprocessing_scaler>`. .. versionadded:: 0.17 *minmax_scale* function interface to :class:`~sklearn.preprocessing.MinMaxScaler`. Parameters ---------- X : array-like of shape (n_samples, n_features) The data. feature_range : tuple (min, max), default=(0, 1) Desired range of transformed data. axis : {0, 1}, default=0 Axis used to scale along. If 0, independently scale each feature, otherwise (if 1) scale each sample. copy : bool, default=True If False, try to avoid a copy and scale in place. This is not guaranteed to always work in place; e.g. if the data is a numpy array with an int dtype, a copy will be returned even with copy=False. Returns ------- X_tr : ndarray of shape (n_samples, n_features) The transformed data. .. warning:: Risk of data leak Do not use :func:`~sklearn.preprocessing.minmax_scale` unless you know what you are doing. A common mistake is to apply it to the entire data *before* splitting into training and test sets. This will bias the model evaluation because information would have leaked from the test set to the training set. In general, we recommend using :class:`~sklearn.preprocessing.MinMaxScaler` within a :ref:`Pipeline <pipeline>` in order to prevent most risks of data leaking: `pipe = make_pipeline(MinMaxScaler(), LogisticRegression())`. See Also -------- MinMaxScaler : Performs scaling to a given range using the Transformer API (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`). Notes ----- For a comparison of the different scalers, transformers, and normalizers, see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`. Examples -------- >>> from sklearn.preprocessing import minmax_scale >>> X = [[-2, 1, 2], [-1, 0, 1]] >>> minmax_scale(X, axis=0) # scale each column independently array([[0., 1., 1.], [1., 0., 0.]]) >>> minmax_scale(X, axis=1) # scale each row independently array([[0. , 0.75, 1. ], [0. , 0.5 , 1. ]]) """ # Unlike the scaler object, this function allows 1d input. # If copy is required, it will be done inside the scaler object. X = check_array( X, copy=False, ensure_2d=False, dtype=FLOAT_DTYPES, ensure_all_finite="allow-nan", ) original_ndim = X.ndim if original_ndim == 1: X = X.reshape(X.shape[0], 1) s = MinMaxScaler(feature_range=feature_range, copy=copy) if axis == 0: X = s.fit_transform(X) else: X = s.fit_transform(X.T).T if original_ndim == 1: X = X.ravel() return X
Transform features by scaling each feature to a given range. This estimator scales and translates each feature individually such that it is in the given range on the training set, i.e. between zero and one. The transformation is given by (when ``axis=0``):: X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) X_scaled = X_std * (max - min) + min where min, max = feature_range. The transformation is calculated as (when ``axis=0``):: X_scaled = scale * X + min - X.min(axis=0) * scale where scale = (max - min) / (X.max(axis=0) - X.min(axis=0)) This transformation is often used as an alternative to zero mean, unit variance scaling. Read more in the :ref:`User Guide <preprocessing_scaler>`. .. versionadded:: 0.17 *minmax_scale* function interface to :class:`~sklearn.preprocessing.MinMaxScaler`. Parameters ---------- X : array-like of shape (n_samples, n_features) The data. feature_range : tuple (min, max), default=(0, 1) Desired range of transformed data. axis : {0, 1}, default=0 Axis used to scale along. If 0, independently scale each feature, otherwise (if 1) scale each sample. copy : bool, default=True If False, try to avoid a copy and scale in place. This is not guaranteed to always work in place; e.g. if the data is a numpy array with an int dtype, a copy will be returned even with copy=False. Returns ------- X_tr : ndarray of shape (n_samples, n_features) The transformed data. .. warning:: Risk of data leak Do not use :func:`~sklearn.preprocessing.minmax_scale` unless you know what you are doing. A common mistake is to apply it to the entire data *before* splitting into training and test sets. This will bias the model evaluation because information would have leaked from the test set to the training set. In general, we recommend using :class:`~sklearn.preprocessing.MinMaxScaler` within a :ref:`Pipeline <pipeline>` in order to prevent most risks of data leaking: `pipe = make_pipeline(MinMaxScaler(), LogisticRegression())`. See Also -------- MinMaxScaler : Performs scaling to a given range using the Transformer API (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`). Notes ----- For a comparison of the different scalers, transformers, and normalizers, see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`. Examples -------- >>> from sklearn.preprocessing import minmax_scale >>> X = [[-2, 1, 2], [-1, 0, 1]] >>> minmax_scale(X, axis=0) # scale each column independently array([[0., 1., 1.], [1., 0., 0.]]) >>> minmax_scale(X, axis=1) # scale each row independently array([[0. , 0.75, 1. ], [0. , 0.5 , 1. ]])
minmax_scale
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def _reset(self): """Reset internal data-dependent state of the scaler, if necessary. __init__ parameters are not touched. """ # Checking one attribute is enough, because they are all set together # in partial_fit if hasattr(self, "scale_"): del self.scale_ del self.n_samples_seen_ del self.mean_ del self.var_
Reset internal data-dependent state of the scaler, if necessary. __init__ parameters are not touched.
_reset
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def fit(self, X, y=None, sample_weight=None): """Compute the mean and std to be used for later scaling. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to compute the mean and standard deviation used for later scaling along the features axis. y : None Ignored. sample_weight : array-like of shape (n_samples,), default=None Individual weights for each sample. .. versionadded:: 0.24 parameter *sample_weight* support to StandardScaler. Returns ------- self : object Fitted scaler. """ # Reset internal state before fitting self._reset() return self.partial_fit(X, y, sample_weight)
Compute the mean and std to be used for later scaling. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to compute the mean and standard deviation used for later scaling along the features axis. y : None Ignored. sample_weight : array-like of shape (n_samples,), default=None Individual weights for each sample. .. versionadded:: 0.24 parameter *sample_weight* support to StandardScaler. Returns ------- self : object Fitted scaler.
fit
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def partial_fit(self, X, y=None, sample_weight=None): """Online computation of mean and std on X for later scaling. All of X is processed as a single batch. This is intended for cases when :meth:`fit` is not feasible due to very large number of `n_samples` or because X is read from a continuous stream. The algorithm for incremental mean and std is given in Equation 1.5a,b in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms for computing the sample variance: Analysis and recommendations." The American Statistician 37.3 (1983): 242-247: Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to compute the mean and standard deviation used for later scaling along the features axis. y : None Ignored. sample_weight : array-like of shape (n_samples,), default=None Individual weights for each sample. .. versionadded:: 0.24 parameter *sample_weight* support to StandardScaler. Returns ------- self : object Fitted scaler. """ first_call = not hasattr(self, "n_samples_seen_") X = validate_data( self, X, accept_sparse=("csr", "csc"), dtype=FLOAT_DTYPES, ensure_all_finite="allow-nan", reset=first_call, ) n_features = X.shape[1] if sample_weight is not None: sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) # Even in the case of `with_mean=False`, we update the mean anyway # This is needed for the incremental computation of the var # See incr_mean_variance_axis and _incremental_mean_variance_axis # if n_samples_seen_ is an integer (i.e. no missing values), we need to # transform it to a NumPy array of shape (n_features,) required by # incr_mean_variance_axis and _incremental_variance_axis dtype = np.int64 if sample_weight is None else X.dtype if not hasattr(self, "n_samples_seen_"): self.n_samples_seen_ = np.zeros(n_features, dtype=dtype) elif np.size(self.n_samples_seen_) == 1: self.n_samples_seen_ = np.repeat(self.n_samples_seen_, X.shape[1]) self.n_samples_seen_ = self.n_samples_seen_.astype(dtype, copy=False) if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " "instead. See docstring for motivation and alternatives." ) sparse_constructor = ( sparse.csr_matrix if X.format == "csr" else sparse.csc_matrix ) if self.with_std: # First pass if not hasattr(self, "scale_"): self.mean_, self.var_, self.n_samples_seen_ = mean_variance_axis( X, axis=0, weights=sample_weight, return_sum_weights=True ) # Next passes else: ( self.mean_, self.var_, self.n_samples_seen_, ) = incr_mean_variance_axis( X, axis=0, last_mean=self.mean_, last_var=self.var_, last_n=self.n_samples_seen_, weights=sample_weight, ) # We force the mean and variance to float64 for large arrays # See https://github.com/scikit-learn/scikit-learn/pull/12338 self.mean_ = self.mean_.astype(np.float64, copy=False) self.var_ = self.var_.astype(np.float64, copy=False) else: self.mean_ = None # as with_mean must be False for sparse self.var_ = None weights = _check_sample_weight(sample_weight, X) sum_weights_nan = weights @ sparse_constructor( (np.isnan(X.data), X.indices, X.indptr), shape=X.shape ) self.n_samples_seen_ += (np.sum(weights) - sum_weights_nan).astype( dtype ) else: # First pass if not hasattr(self, "scale_"): self.mean_ = 0.0 if self.with_std: self.var_ = 0.0 else: self.var_ = None if not self.with_mean and not self.with_std: self.mean_ = None self.var_ = None self.n_samples_seen_ += X.shape[0] - np.isnan(X).sum(axis=0) else: self.mean_, self.var_, self.n_samples_seen_ = _incremental_mean_and_var( X, self.mean_, self.var_, self.n_samples_seen_, sample_weight=sample_weight, ) # for backward-compatibility, reduce n_samples_seen_ to an integer # if the number of samples is the same for each feature (i.e. no # missing values) if np.ptp(self.n_samples_seen_) == 0: self.n_samples_seen_ = self.n_samples_seen_[0] if self.with_std: # Extract the list of near constant features on the raw variances, # before taking the square root. constant_mask = _is_constant_feature( self.var_, self.mean_, self.n_samples_seen_ ) self.scale_ = _handle_zeros_in_scale( np.sqrt(self.var_), copy=False, constant_mask=constant_mask ) else: self.scale_ = None return self
Online computation of mean and std on X for later scaling. All of X is processed as a single batch. This is intended for cases when :meth:`fit` is not feasible due to very large number of `n_samples` or because X is read from a continuous stream. The algorithm for incremental mean and std is given in Equation 1.5a,b in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms for computing the sample variance: Analysis and recommendations." The American Statistician 37.3 (1983): 242-247: Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to compute the mean and standard deviation used for later scaling along the features axis. y : None Ignored. sample_weight : array-like of shape (n_samples,), default=None Individual weights for each sample. .. versionadded:: 0.24 parameter *sample_weight* support to StandardScaler. Returns ------- self : object Fitted scaler.
partial_fit
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def transform(self, X, copy=None): """Perform standardization by centering and scaling. Parameters ---------- X : {array-like, sparse matrix of shape (n_samples, n_features) The data used to scale along the features axis. copy : bool, default=None Copy the input X or not. Returns ------- X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) Transformed array. """ check_is_fitted(self) copy = copy if copy is not None else self.copy X = validate_data( self, X, reset=False, accept_sparse="csr", copy=copy, dtype=FLOAT_DTYPES, force_writeable=True, ensure_all_finite="allow-nan", ) if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " "instead. See docstring for motivation and alternatives." ) if self.scale_ is not None: inplace_column_scale(X, 1 / self.scale_) else: if self.with_mean: X -= self.mean_ if self.with_std: X /= self.scale_ return X
Perform standardization by centering and scaling. Parameters ---------- X : {array-like, sparse matrix of shape (n_samples, n_features) The data used to scale along the features axis. copy : bool, default=None Copy the input X or not. Returns ------- X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) Transformed array.
transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def inverse_transform(self, X, copy=None): """Scale back the data to the original representation. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to scale along the features axis. copy : bool, default=None Copy the input `X` or not. Returns ------- X_original : {ndarray, sparse matrix} of shape (n_samples, n_features) Transformed array. """ check_is_fitted(self) copy = copy if copy is not None else self.copy X = check_array( X, accept_sparse="csr", copy=copy, dtype=FLOAT_DTYPES, force_writeable=True, ensure_all_finite="allow-nan", ) if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot uncenter sparse matrices: pass `with_mean=False` " "instead See docstring for motivation and alternatives." ) if self.scale_ is not None: inplace_column_scale(X, self.scale_) else: if self.with_std: X *= self.scale_ if self.with_mean: X += self.mean_ return X
Scale back the data to the original representation. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to scale along the features axis. copy : bool, default=None Copy the input `X` or not. Returns ------- X_original : {ndarray, sparse matrix} of shape (n_samples, n_features) Transformed array.
inverse_transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def _reset(self): """Reset internal data-dependent state of the scaler, if necessary. __init__ parameters are not touched. """ # Checking one attribute is enough, because they are all set together # in partial_fit if hasattr(self, "scale_"): del self.scale_ del self.n_samples_seen_ del self.max_abs_
Reset internal data-dependent state of the scaler, if necessary. __init__ parameters are not touched.
_reset
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def fit(self, X, y=None): """Compute the maximum absolute value to be used for later scaling. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. y : None Ignored. Returns ------- self : object Fitted scaler. """ # Reset internal state before fitting self._reset() return self.partial_fit(X, y)
Compute the maximum absolute value to be used for later scaling. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. y : None Ignored. Returns ------- self : object Fitted scaler.
fit
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def partial_fit(self, X, y=None): """Online computation of max absolute value of X for later scaling. All of X is processed as a single batch. This is intended for cases when :meth:`fit` is not feasible due to very large number of `n_samples` or because X is read from a continuous stream. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to compute the mean and standard deviation used for later scaling along the features axis. y : None Ignored. Returns ------- self : object Fitted scaler. """ xp, _ = get_namespace(X) first_pass = not hasattr(self, "n_samples_seen_") X = validate_data( self, X, reset=first_pass, accept_sparse=("csr", "csc"), dtype=_array_api.supported_float_dtypes(xp), ensure_all_finite="allow-nan", ) if sparse.issparse(X): mins, maxs = min_max_axis(X, axis=0, ignore_nan=True) max_abs = np.maximum(np.abs(mins), np.abs(maxs)) else: max_abs = _array_api._nanmax(xp.abs(X), axis=0, xp=xp) if first_pass: self.n_samples_seen_ = X.shape[0] else: max_abs = xp.maximum(self.max_abs_, max_abs) self.n_samples_seen_ += X.shape[0] self.max_abs_ = max_abs self.scale_ = _handle_zeros_in_scale(max_abs, copy=True) return self
Online computation of max absolute value of X for later scaling. All of X is processed as a single batch. This is intended for cases when :meth:`fit` is not feasible due to very large number of `n_samples` or because X is read from a continuous stream. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to compute the mean and standard deviation used for later scaling along the features axis. y : None Ignored. Returns ------- self : object Fitted scaler.
partial_fit
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def transform(self, X): """Scale the data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data that should be scaled. Returns ------- X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) Transformed array. """ check_is_fitted(self) xp, _ = get_namespace(X) X = validate_data( self, X, accept_sparse=("csr", "csc"), copy=self.copy, reset=False, dtype=_array_api.supported_float_dtypes(xp), force_writeable=True, ensure_all_finite="allow-nan", ) if sparse.issparse(X): inplace_column_scale(X, 1.0 / self.scale_) else: X /= self.scale_ return X
Scale the data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data that should be scaled. Returns ------- X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) Transformed array.
transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def inverse_transform(self, X): """Scale back the data to the original representation. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data that should be transformed back. Returns ------- X_original : {ndarray, sparse matrix} of shape (n_samples, n_features) Transformed array. """ check_is_fitted(self) xp, _ = get_namespace(X) X = check_array( X, accept_sparse=("csr", "csc"), copy=self.copy, dtype=_array_api.supported_float_dtypes(xp), force_writeable=True, ensure_all_finite="allow-nan", ) if sparse.issparse(X): inplace_column_scale(X, self.scale_) else: X *= self.scale_ return X
Scale back the data to the original representation. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data that should be transformed back. Returns ------- X_original : {ndarray, sparse matrix} of shape (n_samples, n_features) Transformed array.
inverse_transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def maxabs_scale(X, *, axis=0, copy=True): """Scale each feature to the [-1, 1] range without breaking the sparsity. This estimator scales each feature individually such that the maximal absolute value of each feature in the training set will be 1.0. This scaler can also be applied to sparse CSR or CSC matrices. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data. axis : {0, 1}, default=0 Axis used to scale along. If 0, independently scale each feature, otherwise (if 1) scale each sample. copy : bool, default=True If False, try to avoid a copy and scale in place. This is not guaranteed to always work in place; e.g. if the data is a numpy array with an int dtype, a copy will be returned even with copy=False. Returns ------- X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) The transformed data. .. warning:: Risk of data leak Do not use :func:`~sklearn.preprocessing.maxabs_scale` unless you know what you are doing. A common mistake is to apply it to the entire data *before* splitting into training and test sets. This will bias the model evaluation because information would have leaked from the test set to the training set. In general, we recommend using :class:`~sklearn.preprocessing.MaxAbsScaler` within a :ref:`Pipeline <pipeline>` in order to prevent most risks of data leaking: `pipe = make_pipeline(MaxAbsScaler(), LogisticRegression())`. See Also -------- MaxAbsScaler : Performs scaling to the [-1, 1] range using the Transformer API (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`). Notes ----- NaNs are treated as missing values: disregarded to compute the statistics, and maintained during the data transformation. For a comparison of the different scalers, transformers, and normalizers, see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`. Examples -------- >>> from sklearn.preprocessing import maxabs_scale >>> X = [[-2, 1, 2], [-1, 0, 1]] >>> maxabs_scale(X, axis=0) # scale each column independently array([[-1. , 1. , 1. ], [-0.5, 0. , 0.5]]) >>> maxabs_scale(X, axis=1) # scale each row independently array([[-1. , 0.5, 1. ], [-1. , 0. , 1. ]]) """ # Unlike the scaler object, this function allows 1d input. # If copy is required, it will be done inside the scaler object. X = check_array( X, accept_sparse=("csr", "csc"), copy=False, ensure_2d=False, dtype=FLOAT_DTYPES, ensure_all_finite="allow-nan", ) original_ndim = X.ndim if original_ndim == 1: X = X.reshape(X.shape[0], 1) s = MaxAbsScaler(copy=copy) if axis == 0: X = s.fit_transform(X) else: X = s.fit_transform(X.T).T if original_ndim == 1: X = X.ravel() return X
Scale each feature to the [-1, 1] range without breaking the sparsity. This estimator scales each feature individually such that the maximal absolute value of each feature in the training set will be 1.0. This scaler can also be applied to sparse CSR or CSC matrices. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data. axis : {0, 1}, default=0 Axis used to scale along. If 0, independently scale each feature, otherwise (if 1) scale each sample. copy : bool, default=True If False, try to avoid a copy and scale in place. This is not guaranteed to always work in place; e.g. if the data is a numpy array with an int dtype, a copy will be returned even with copy=False. Returns ------- X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) The transformed data. .. warning:: Risk of data leak Do not use :func:`~sklearn.preprocessing.maxabs_scale` unless you know what you are doing. A common mistake is to apply it to the entire data *before* splitting into training and test sets. This will bias the model evaluation because information would have leaked from the test set to the training set. In general, we recommend using :class:`~sklearn.preprocessing.MaxAbsScaler` within a :ref:`Pipeline <pipeline>` in order to prevent most risks of data leaking: `pipe = make_pipeline(MaxAbsScaler(), LogisticRegression())`. See Also -------- MaxAbsScaler : Performs scaling to the [-1, 1] range using the Transformer API (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`). Notes ----- NaNs are treated as missing values: disregarded to compute the statistics, and maintained during the data transformation. For a comparison of the different scalers, transformers, and normalizers, see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`. Examples -------- >>> from sklearn.preprocessing import maxabs_scale >>> X = [[-2, 1, 2], [-1, 0, 1]] >>> maxabs_scale(X, axis=0) # scale each column independently array([[-1. , 1. , 1. ], [-0.5, 0. , 0.5]]) >>> maxabs_scale(X, axis=1) # scale each row independently array([[-1. , 0.5, 1. ], [-1. , 0. , 1. ]])
maxabs_scale
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def fit(self, X, y=None): """Compute the median and quantiles to be used for scaling. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to compute the median and quantiles used for later scaling along the features axis. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object Fitted scaler. """ # at fit, convert sparse matrices to csc for optimized computation of # the quantiles X = validate_data( self, X, accept_sparse="csc", dtype=FLOAT_DTYPES, ensure_all_finite="allow-nan", ) q_min, q_max = self.quantile_range if not 0 <= q_min <= q_max <= 100: raise ValueError("Invalid quantile range: %s" % str(self.quantile_range)) if self.with_centering: if sparse.issparse(X): raise ValueError( "Cannot center sparse matrices: use `with_centering=False`" " instead. See docstring for motivation and alternatives." ) self.center_ = np.nanmedian(X, axis=0) else: self.center_ = None if self.with_scaling: quantiles = [] for feature_idx in range(X.shape[1]): if sparse.issparse(X): column_nnz_data = X.data[ X.indptr[feature_idx] : X.indptr[feature_idx + 1] ] column_data = np.zeros(shape=X.shape[0], dtype=X.dtype) column_data[: len(column_nnz_data)] = column_nnz_data else: column_data = X[:, feature_idx] quantiles.append(np.nanpercentile(column_data, self.quantile_range)) quantiles = np.transpose(quantiles) self.scale_ = quantiles[1] - quantiles[0] self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False) if self.unit_variance: adjust = stats.norm.ppf(q_max / 100.0) - stats.norm.ppf(q_min / 100.0) self.scale_ = self.scale_ / adjust else: self.scale_ = None return self
Compute the median and quantiles to be used for scaling. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to compute the median and quantiles used for later scaling along the features axis. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object Fitted scaler.
fit
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def transform(self, X): """Center and scale the data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to scale along the specified axis. Returns ------- X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) Transformed array. """ check_is_fitted(self) X = validate_data( self, X, accept_sparse=("csr", "csc"), copy=self.copy, dtype=FLOAT_DTYPES, force_writeable=True, reset=False, ensure_all_finite="allow-nan", ) if sparse.issparse(X): if self.with_scaling: inplace_column_scale(X, 1.0 / self.scale_) else: if self.with_centering: X -= self.center_ if self.with_scaling: X /= self.scale_ return X
Center and scale the data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to scale along the specified axis. Returns ------- X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) Transformed array.
transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def inverse_transform(self, X): """Scale back the data to the original representation. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The rescaled data to be transformed back. Returns ------- X_original : {ndarray, sparse matrix} of shape (n_samples, n_features) Transformed array. """ check_is_fitted(self) X = check_array( X, accept_sparse=("csr", "csc"), copy=self.copy, dtype=FLOAT_DTYPES, force_writeable=True, ensure_all_finite="allow-nan", ) if sparse.issparse(X): if self.with_scaling: inplace_column_scale(X, self.scale_) else: if self.with_scaling: X *= self.scale_ if self.with_centering: X += self.center_ return X
Scale back the data to the original representation. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The rescaled data to be transformed back. Returns ------- X_original : {ndarray, sparse matrix} of shape (n_samples, n_features) Transformed array.
inverse_transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def robust_scale( X, *, axis=0, with_centering=True, with_scaling=True, quantile_range=(25.0, 75.0), copy=True, unit_variance=False, ): """Standardize a dataset along any axis. Center to the median and component wise scale according to the interquartile range. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_sample, n_features) The data to center and scale. axis : int, default=0 Axis used to compute the medians and IQR along. If 0, independently scale each feature, otherwise (if 1) scale each sample. with_centering : bool, default=True If `True`, center the data before scaling. with_scaling : bool, default=True If `True`, scale the data to unit variance (or equivalently, unit standard deviation). quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0,\ default=(25.0, 75.0) Quantile range used to calculate `scale_`. By default this is equal to the IQR, i.e., `q_min` is the first quantile and `q_max` is the third quantile. .. versionadded:: 0.18 copy : bool, default=True If False, try to avoid a copy and scale in place. This is not guaranteed to always work in place; e.g. if the data is a numpy array with an int dtype, a copy will be returned even with copy=False. unit_variance : bool, default=False If `True`, scale data so that normally distributed features have a variance of 1. In general, if the difference between the x-values of `q_max` and `q_min` for a standard normal distribution is greater than 1, the dataset will be scaled down. If less than 1, the dataset will be scaled up. .. versionadded:: 0.24 Returns ------- X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) The transformed data. See Also -------- RobustScaler : Performs centering and scaling using the Transformer API (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`). Notes ----- This implementation will refuse to center scipy.sparse matrices since it would make them non-sparse and would potentially crash the program with memory exhaustion problems. Instead the caller is expected to either set explicitly `with_centering=False` (in that case, only variance scaling will be performed on the features of the CSR matrix) or to call `X.toarray()` if he/she expects the materialized dense array to fit in memory. To avoid memory copy the caller should pass a CSR matrix. For a comparison of the different scalers, transformers, and normalizers, see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`. .. warning:: Risk of data leak Do not use :func:`~sklearn.preprocessing.robust_scale` unless you know what you are doing. A common mistake is to apply it to the entire data *before* splitting into training and test sets. This will bias the model evaluation because information would have leaked from the test set to the training set. In general, we recommend using :class:`~sklearn.preprocessing.RobustScaler` within a :ref:`Pipeline <pipeline>` in order to prevent most risks of data leaking: `pipe = make_pipeline(RobustScaler(), LogisticRegression())`. Examples -------- >>> from sklearn.preprocessing import robust_scale >>> X = [[-2, 1, 2], [-1, 0, 1]] >>> robust_scale(X, axis=0) # scale each column independently array([[-1., 1., 1.], [ 1., -1., -1.]]) >>> robust_scale(X, axis=1) # scale each row independently array([[-1.5, 0. , 0.5], [-1. , 0. , 1. ]]) """ X = check_array( X, accept_sparse=("csr", "csc"), copy=False, ensure_2d=False, dtype=FLOAT_DTYPES, ensure_all_finite="allow-nan", ) original_ndim = X.ndim if original_ndim == 1: X = X.reshape(X.shape[0], 1) s = RobustScaler( with_centering=with_centering, with_scaling=with_scaling, quantile_range=quantile_range, unit_variance=unit_variance, copy=copy, ) if axis == 0: X = s.fit_transform(X) else: X = s.fit_transform(X.T).T if original_ndim == 1: X = X.ravel() return X
Standardize a dataset along any axis. Center to the median and component wise scale according to the interquartile range. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_sample, n_features) The data to center and scale. axis : int, default=0 Axis used to compute the medians and IQR along. If 0, independently scale each feature, otherwise (if 1) scale each sample. with_centering : bool, default=True If `True`, center the data before scaling. with_scaling : bool, default=True If `True`, scale the data to unit variance (or equivalently, unit standard deviation). quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0, default=(25.0, 75.0) Quantile range used to calculate `scale_`. By default this is equal to the IQR, i.e., `q_min` is the first quantile and `q_max` is the third quantile. .. versionadded:: 0.18 copy : bool, default=True If False, try to avoid a copy and scale in place. This is not guaranteed to always work in place; e.g. if the data is a numpy array with an int dtype, a copy will be returned even with copy=False. unit_variance : bool, default=False If `True`, scale data so that normally distributed features have a variance of 1. In general, if the difference between the x-values of `q_max` and `q_min` for a standard normal distribution is greater than 1, the dataset will be scaled down. If less than 1, the dataset will be scaled up. .. versionadded:: 0.24 Returns ------- X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) The transformed data. See Also -------- RobustScaler : Performs centering and scaling using the Transformer API (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`). Notes ----- This implementation will refuse to center scipy.sparse matrices since it would make them non-sparse and would potentially crash the program with memory exhaustion problems. Instead the caller is expected to either set explicitly `with_centering=False` (in that case, only variance scaling will be performed on the features of the CSR matrix) or to call `X.toarray()` if he/she expects the materialized dense array to fit in memory. To avoid memory copy the caller should pass a CSR matrix. For a comparison of the different scalers, transformers, and normalizers, see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`. .. warning:: Risk of data leak Do not use :func:`~sklearn.preprocessing.robust_scale` unless you know what you are doing. A common mistake is to apply it to the entire data *before* splitting into training and test sets. This will bias the model evaluation because information would have leaked from the test set to the training set. In general, we recommend using :class:`~sklearn.preprocessing.RobustScaler` within a :ref:`Pipeline <pipeline>` in order to prevent most risks of data leaking: `pipe = make_pipeline(RobustScaler(), LogisticRegression())`. Examples -------- >>> from sklearn.preprocessing import robust_scale >>> X = [[-2, 1, 2], [-1, 0, 1]] >>> robust_scale(X, axis=0) # scale each column independently array([[-1., 1., 1.], [ 1., -1., -1.]]) >>> robust_scale(X, axis=1) # scale each row independently array([[-1.5, 0. , 0.5], [-1. , 0. , 1. ]])
robust_scale
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def normalize(X, norm="l2", *, axis=1, copy=True, return_norm=False): """Scale input vectors individually to unit norm (vector length). Read more in the :ref:`User Guide <preprocessing_normalization>`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data to normalize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. norm : {'l1', 'l2', 'max'}, default='l2' The norm to use to normalize each non zero sample (or each non-zero feature if axis is 0). axis : {0, 1}, default=1 Define axis used to normalize the data along. If 1, independently normalize each sample, otherwise (if 0) normalize each feature. copy : bool, default=True If False, try to avoid a copy and normalize in place. This is not guaranteed to always work in place; e.g. if the data is a numpy array with an int dtype, a copy will be returned even with copy=False. return_norm : bool, default=False Whether to return the computed norms. Returns ------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) Normalized input X. norms : ndarray of shape (n_samples, ) if axis=1 else (n_features, ) An array of norms along given axis for X. When X is sparse, a NotImplementedError will be raised for norm 'l1' or 'l2'. See Also -------- Normalizer : Performs normalization using the Transformer API (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`). Notes ----- For a comparison of the different scalers, transformers, and normalizers, see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`. Examples -------- >>> from sklearn.preprocessing import normalize >>> X = [[-2, 1, 2], [-1, 0, 1]] >>> normalize(X, norm="l1") # L1 normalization each row independently array([[-0.4, 0.2, 0.4], [-0.5, 0. , 0.5]]) >>> normalize(X, norm="l2") # L2 normalization each row independently array([[-0.67, 0.33, 0.67], [-0.71, 0. , 0.71]]) """ if axis == 0: sparse_format = "csc" else: # axis == 1: sparse_format = "csr" xp, _ = get_namespace(X) X = check_array( X, accept_sparse=sparse_format, copy=copy, estimator="the normalize function", dtype=_array_api.supported_float_dtypes(xp), force_writeable=True, ) if axis == 0: X = X.T if sparse.issparse(X): if return_norm and norm in ("l1", "l2"): raise NotImplementedError( "return_norm=True is not implemented " "for sparse matrices with norm 'l1' " "or norm 'l2'" ) if norm == "l1": inplace_csr_row_normalize_l1(X) elif norm == "l2": inplace_csr_row_normalize_l2(X) elif norm == "max": mins, maxes = min_max_axis(X, 1) norms = np.maximum(abs(mins), maxes) norms_elementwise = norms.repeat(np.diff(X.indptr)) mask = norms_elementwise != 0 X.data[mask] /= norms_elementwise[mask] else: if norm == "l1": norms = xp.sum(xp.abs(X), axis=1) elif norm == "l2": norms = row_norms(X) elif norm == "max": norms = xp.max(xp.abs(X), axis=1) norms = _handle_zeros_in_scale(norms, copy=False) X /= norms[:, None] if axis == 0: X = X.T if return_norm: return X, norms else: return X
Scale input vectors individually to unit norm (vector length). Read more in the :ref:`User Guide <preprocessing_normalization>`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data to normalize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. norm : {'l1', 'l2', 'max'}, default='l2' The norm to use to normalize each non zero sample (or each non-zero feature if axis is 0). axis : {0, 1}, default=1 Define axis used to normalize the data along. If 1, independently normalize each sample, otherwise (if 0) normalize each feature. copy : bool, default=True If False, try to avoid a copy and normalize in place. This is not guaranteed to always work in place; e.g. if the data is a numpy array with an int dtype, a copy will be returned even with copy=False. return_norm : bool, default=False Whether to return the computed norms. Returns ------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) Normalized input X. norms : ndarray of shape (n_samples, ) if axis=1 else (n_features, ) An array of norms along given axis for X. When X is sparse, a NotImplementedError will be raised for norm 'l1' or 'l2'. See Also -------- Normalizer : Performs normalization using the Transformer API (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`). Notes ----- For a comparison of the different scalers, transformers, and normalizers, see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`. Examples -------- >>> from sklearn.preprocessing import normalize >>> X = [[-2, 1, 2], [-1, 0, 1]] >>> normalize(X, norm="l1") # L1 normalization each row independently array([[-0.4, 0.2, 0.4], [-0.5, 0. , 0.5]]) >>> normalize(X, norm="l2") # L2 normalization each row independently array([[-0.67, 0.33, 0.67], [-0.71, 0. , 0.71]])
normalize
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def transform(self, X, copy=None): """Scale each non zero row of X to unit norm. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data to normalize, row by row. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. copy : bool, default=None Copy the input X or not. Returns ------- X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) Transformed array. """ copy = copy if copy is not None else self.copy X = validate_data( self, X, accept_sparse="csr", force_writeable=True, copy=copy, reset=False ) return normalize(X, norm=self.norm, axis=1, copy=False)
Scale each non zero row of X to unit norm. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data to normalize, row by row. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. copy : bool, default=None Copy the input X or not. Returns ------- X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) Transformed array.
transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def binarize(X, *, threshold=0.0, copy=True): """Boolean thresholding of array-like or scipy.sparse matrix. Read more in the :ref:`User Guide <preprocessing_binarization>`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data to binarize, element by element. scipy.sparse matrices should be in CSR or CSC format to avoid an un-necessary copy. threshold : float, default=0.0 Feature values below or equal to this are replaced by 0, above it by 1. Threshold may not be less than 0 for operations on sparse matrices. copy : bool, default=True If False, try to avoid a copy and binarize in place. This is not guaranteed to always work in place; e.g. if the data is a numpy array with an object dtype, a copy will be returned even with copy=False. Returns ------- X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) The transformed data. See Also -------- Binarizer : Performs binarization using the Transformer API (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`). Examples -------- >>> from sklearn.preprocessing import binarize >>> X = [[0.4, 0.6, 0.5], [0.6, 0.1, 0.2]] >>> binarize(X, threshold=0.5) array([[0., 1., 0.], [1., 0., 0.]]) """ X = check_array(X, accept_sparse=["csr", "csc"], force_writeable=True, copy=copy) if sparse.issparse(X): if threshold < 0: raise ValueError("Cannot binarize a sparse matrix with threshold < 0") cond = X.data > threshold not_cond = np.logical_not(cond) X.data[cond] = 1 X.data[not_cond] = 0 X.eliminate_zeros() else: xp, _, device = get_namespace_and_device(X) float_dtype = _find_matching_floating_dtype(X, threshold, xp=xp) cond = xp.astype(X, float_dtype, copy=False) > threshold not_cond = xp.logical_not(cond) X[cond] = 1 X[not_cond] = 0 return X
Boolean thresholding of array-like or scipy.sparse matrix. Read more in the :ref:`User Guide <preprocessing_binarization>`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data to binarize, element by element. scipy.sparse matrices should be in CSR or CSC format to avoid an un-necessary copy. threshold : float, default=0.0 Feature values below or equal to this are replaced by 0, above it by 1. Threshold may not be less than 0 for operations on sparse matrices. copy : bool, default=True If False, try to avoid a copy and binarize in place. This is not guaranteed to always work in place; e.g. if the data is a numpy array with an object dtype, a copy will be returned even with copy=False. Returns ------- X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) The transformed data. See Also -------- Binarizer : Performs binarization using the Transformer API (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`). Examples -------- >>> from sklearn.preprocessing import binarize >>> X = [[0.4, 0.6, 0.5], [0.6, 0.1, 0.2]] >>> binarize(X, threshold=0.5) array([[0., 1., 0.], [1., 0., 0.]])
binarize
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def transform(self, X, copy=None): """Binarize each element of X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data to binarize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. copy : bool Copy the input X or not. Returns ------- X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) Transformed array. """ copy = copy if copy is not None else self.copy # TODO: This should be refactored because binarize also calls # check_array X = validate_data( self, X, accept_sparse=["csr", "csc"], force_writeable=True, copy=copy, reset=False, ) return binarize(X, threshold=self.threshold, copy=False)
Binarize each element of X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data to binarize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. copy : bool Copy the input X or not. Returns ------- X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) Transformed array.
transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def fit(self, K, y=None): """Fit KernelCenterer. Parameters ---------- K : ndarray of shape (n_samples, n_samples) Kernel matrix. y : None Ignored. Returns ------- self : object Returns the instance itself. """ xp, _ = get_namespace(K) K = validate_data(self, K, dtype=_array_api.supported_float_dtypes(xp)) if K.shape[0] != K.shape[1]: raise ValueError( "Kernel matrix must be a square matrix." " Input is a {}x{} matrix.".format(K.shape[0], K.shape[1]) ) n_samples = K.shape[0] self.K_fit_rows_ = xp.sum(K, axis=0) / n_samples self.K_fit_all_ = xp.sum(self.K_fit_rows_) / n_samples return self
Fit KernelCenterer. Parameters ---------- K : ndarray of shape (n_samples, n_samples) Kernel matrix. y : None Ignored. Returns ------- self : object Returns the instance itself.
fit
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause
def transform(self, K, copy=True): """Center kernel matrix. Parameters ---------- K : ndarray of shape (n_samples1, n_samples2) Kernel matrix. copy : bool, default=True Set to False to perform inplace computation. Returns ------- K_new : ndarray of shape (n_samples1, n_samples2) Returns the instance itself. """ check_is_fitted(self) xp, _ = get_namespace(K) K = validate_data( self, K, copy=copy, force_writeable=True, dtype=_array_api.supported_float_dtypes(xp), reset=False, ) K_pred_cols = (xp.sum(K, axis=1) / self.K_fit_rows_.shape[0])[:, None] K -= self.K_fit_rows_ K -= K_pred_cols K += self.K_fit_all_ return K
Center kernel matrix. Parameters ---------- K : ndarray of shape (n_samples1, n_samples2) Kernel matrix. copy : bool, default=True Set to False to perform inplace computation. Returns ------- K_new : ndarray of shape (n_samples1, n_samples2) Returns the instance itself.
transform
python
scikit-learn/scikit-learn
sklearn/preprocessing/_data.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_data.py
BSD-3-Clause