code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def test_ridge_regression_unpenalized_hstacked_X(
solver, fit_intercept, ols_ridge_dataset, global_random_seed
):
"""Test that unpenalized Ridge = OLS converges for all solvers to correct solution.
We work with a simple constructed data set with known solution.
OLS fit on [X] is the same as fit on [X, X]/2.
For long X, [X, X] is a singular matrix and we check against the minimum norm
solution:
min ||w||_2 subject to min ||X w - y||_2
"""
X, y, coef, _ = ols_ridge_dataset
n_samples, n_features = X.shape
alpha = 0 # OLS
model = Ridge(
alpha=alpha,
fit_intercept=fit_intercept,
solver=solver,
tol=1e-15 if solver in ("sag", "saga") else 1e-10,
random_state=global_random_seed,
)
if fit_intercept:
X = X[:, :-1] # remove intercept
intercept = coef[-1]
coef = coef[:-1]
else:
intercept = 0
X = 0.5 * np.concatenate((X, X), axis=1)
assert np.linalg.matrix_rank(X) <= min(n_samples, n_features)
model.fit(X, y)
if n_samples > n_features or not fit_intercept:
assert model.intercept_ == pytest.approx(intercept)
if solver == "cholesky":
# Cholesky is a bad choice for singular X.
pytest.skip()
assert_allclose(model.coef_, np.r_[coef, coef])
else:
# FIXME: Same as in test_ridge_regression_unpenalized.
# As it is an underdetermined problem, residuals = 0. This shows that we get
# a solution to X w = y ....
assert_allclose(model.predict(X), y)
# But it is not the minimum norm solution. (This should be equal.)
assert np.linalg.norm(np.r_[model.intercept_, model.coef_]) > np.linalg.norm(
np.r_[intercept, coef, coef]
)
pytest.xfail(reason="Ridge does not provide the minimum norm solution.")
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, np.r_[coef, coef])
|
Test that unpenalized Ridge = OLS converges for all solvers to correct solution.
We work with a simple constructed data set with known solution.
OLS fit on [X] is the same as fit on [X, X]/2.
For long X, [X, X] is a singular matrix and we check against the minimum norm
solution:
min ||w||_2 subject to min ||X w - y||_2
|
test_ridge_regression_unpenalized_hstacked_X
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_ridge.py
|
BSD-3-Clause
|
def test_ridge_regression_unpenalized_vstacked_X(
solver, fit_intercept, ols_ridge_dataset, global_random_seed
):
"""Test that unpenalized Ridge = OLS converges for all solvers to correct solution.
We work with a simple constructed data set with known solution.
OLS fit on [X] is the same as fit on [X], [y]
[X], [y].
For wide X, [X', X'] is a singular matrix and we check against the minimum norm
solution:
min ||w||_2 subject to X w = y
"""
X, y, coef, _ = ols_ridge_dataset
n_samples, n_features = X.shape
alpha = 0 # OLS
model = Ridge(
alpha=alpha,
fit_intercept=fit_intercept,
solver=solver,
tol=1e-15 if solver in ("sag", "saga") else 1e-10,
random_state=global_random_seed,
)
if fit_intercept:
X = X[:, :-1] # remove intercept
intercept = coef[-1]
coef = coef[:-1]
else:
intercept = 0
X = np.concatenate((X, X), axis=0)
assert np.linalg.matrix_rank(X) <= min(n_samples, n_features)
y = np.r_[y, y]
model.fit(X, y)
if n_samples > n_features or not fit_intercept:
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, coef)
else:
# FIXME: Same as in test_ridge_regression_unpenalized.
# As it is an underdetermined problem, residuals = 0. This shows that we get
# a solution to X w = y ....
assert_allclose(model.predict(X), y)
# But it is not the minimum norm solution. (This should be equal.)
assert np.linalg.norm(np.r_[model.intercept_, model.coef_]) > np.linalg.norm(
np.r_[intercept, coef]
)
pytest.xfail(reason="Ridge does not provide the minimum norm solution.")
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, coef)
|
Test that unpenalized Ridge = OLS converges for all solvers to correct solution.
We work with a simple constructed data set with known solution.
OLS fit on [X] is the same as fit on [X], [y]
[X], [y].
For wide X, [X', X'] is a singular matrix and we check against the minimum norm
solution:
min ||w||_2 subject to X w = y
|
test_ridge_regression_unpenalized_vstacked_X
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_ridge.py
|
BSD-3-Clause
|
def test_ridge_regression_sample_weights(
solver,
fit_intercept,
sparse_container,
alpha,
ols_ridge_dataset,
global_random_seed,
):
"""Test that Ridge with sample weights gives correct results.
We use the following trick:
||y - Xw||_2 = (z - Aw)' W (z - Aw)
for z=[y, y], A' = [X', X'] (vstacked), and W[:n/2] + W[n/2:] = 1, W=diag(W)
"""
if sparse_container is not None:
if fit_intercept and solver not in SPARSE_SOLVERS_WITH_INTERCEPT:
pytest.skip()
elif not fit_intercept and solver not in SPARSE_SOLVERS_WITHOUT_INTERCEPT:
pytest.skip()
X, y, _, coef = ols_ridge_dataset
n_samples, n_features = X.shape
sw = rng.uniform(low=0, high=1, size=n_samples)
model = Ridge(
alpha=alpha,
fit_intercept=fit_intercept,
solver=solver,
tol=1e-15 if solver in ["sag", "saga"] else 1e-10,
max_iter=100_000,
random_state=global_random_seed,
)
X = X[:, :-1] # remove intercept
X = np.concatenate((X, X), axis=0)
y = np.r_[y, y]
sw = np.r_[sw, 1 - sw] * alpha
if fit_intercept:
intercept = coef[-1]
else:
X = X - X.mean(axis=0)
y = y - y.mean()
intercept = 0
if sparse_container is not None:
X = sparse_container(X)
model.fit(X, y, sample_weight=sw)
coef = coef[:-1]
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, coef)
|
Test that Ridge with sample weights gives correct results.
We use the following trick:
||y - Xw||_2 = (z - Aw)' W (z - Aw)
for z=[y, y], A' = [X', X'] (vstacked), and W[:n/2] + W[n/2:] = 1, W=diag(W)
|
test_ridge_regression_sample_weights
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_ridge.py
|
BSD-3-Clause
|
def test_ridgecv_alphas_zero(cv, Estimator):
"""Check alpha=0.0 raises error only when `cv=None`."""
rng = np.random.RandomState(0)
alphas = (0.0, 1.0, 10.0)
n_samples, n_features = 5, 5
if Estimator is RidgeCV:
y = rng.randn(n_samples)
else:
y = rng.randint(0, 2, n_samples)
X = rng.randn(n_samples, n_features)
ridge_est = Estimator(alphas=alphas, cv=cv)
if cv is None:
with pytest.raises(ValueError, match=r"alphas\[0\] == 0.0, must be > 0.0."):
ridge_est.fit(X, y)
else:
ridge_est.fit(X, y)
|
Check alpha=0.0 raises error only when `cv=None`.
|
test_ridgecv_alphas_zero
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_ridge.py
|
BSD-3-Clause
|
def test_ridgecv_alphas_validation(Estimator, params, err_type, err_msg):
"""Check the `alphas` validation in RidgeCV and RidgeClassifierCV."""
n_samples, n_features = 5, 5
X = rng.randn(n_samples, n_features)
y = rng.randint(0, 2, n_samples)
with pytest.raises(err_type, match=err_msg):
Estimator(**params).fit(X, y)
|
Check the `alphas` validation in RidgeCV and RidgeClassifierCV.
|
test_ridgecv_alphas_validation
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_ridge.py
|
BSD-3-Clause
|
def test_ridgecv_alphas_scalar(Estimator):
"""Check the case when `alphas` is a scalar.
This case was supported in the past when `alphas` where converted
into array in `__init__`.
We add this test to ensure backward compatibility.
"""
n_samples, n_features = 5, 5
X = rng.randn(n_samples, n_features)
if Estimator is RidgeCV:
y = rng.randn(n_samples)
else:
y = rng.randint(0, 2, n_samples)
Estimator(alphas=1).fit(X, y)
|
Check the case when `alphas` is a scalar.
This case was supported in the past when `alphas` where converted
into array in `__init__`.
We add this test to ensure backward compatibility.
|
test_ridgecv_alphas_scalar
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_ridge.py
|
BSD-3-Clause
|
def test_ridge_fit_intercept_sparse(
solver, with_sample_weight, global_random_seed, csr_container
):
"""Check that ridge finds the same coefs and intercept on dense and sparse input
in the presence of sample weights.
For now only sparse_cg and lbfgs can correctly fit an intercept
with sparse X with default tol and max_iter.
'sag' is tested separately in test_ridge_fit_intercept_sparse_sag because it
requires more iterations and should raise a warning if default max_iter is used.
Other solvers raise an exception, as checked in
test_ridge_fit_intercept_sparse_error
"""
positive = solver == "lbfgs"
X, y = _make_sparse_offset_regression(
n_features=20, random_state=global_random_seed, positive=positive
)
sample_weight = None
if with_sample_weight:
rng = np.random.RandomState(global_random_seed)
sample_weight = 1.0 + rng.uniform(size=X.shape[0])
# "auto" should switch to "sparse_cg" when X is sparse
# so the reference we use for both ("auto" and "sparse_cg") is
# Ridge(solver="sparse_cg"), fitted using the dense representation (note
# that "sparse_cg" can fit sparse or dense data)
dense_solver = "sparse_cg" if solver == "auto" else solver
dense_ridge = Ridge(solver=dense_solver, tol=1e-12, positive=positive)
sparse_ridge = Ridge(solver=solver, tol=1e-12, positive=positive)
dense_ridge.fit(X, y, sample_weight=sample_weight)
sparse_ridge.fit(csr_container(X), y, sample_weight=sample_weight)
assert_allclose(dense_ridge.intercept_, sparse_ridge.intercept_)
assert_allclose(dense_ridge.coef_, sparse_ridge.coef_, rtol=5e-7)
|
Check that ridge finds the same coefs and intercept on dense and sparse input
in the presence of sample weights.
For now only sparse_cg and lbfgs can correctly fit an intercept
with sparse X with default tol and max_iter.
'sag' is tested separately in test_ridge_fit_intercept_sparse_sag because it
requires more iterations and should raise a warning if default max_iter is used.
Other solvers raise an exception, as checked in
test_ridge_fit_intercept_sparse_error
|
test_ridge_fit_intercept_sparse
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_ridge.py
|
BSD-3-Clause
|
def test_ridge_regression_check_arguments_validity(
return_intercept, sample_weight, container, solver
):
"""check if all combinations of arguments give valid estimations"""
# test excludes 'svd' solver because it raises exception for sparse inputs
rng = check_random_state(42)
X = rng.rand(1000, 3)
true_coefs = [1, 2, 0.1]
y = np.dot(X, true_coefs)
true_intercept = 0.0
if return_intercept:
true_intercept = 10000.0
y += true_intercept
X_testing = container(X)
alpha, tol = 1e-3, 1e-6
atol = 1e-3 if _IS_32BIT else 1e-4
positive = solver == "lbfgs"
if solver not in ["sag", "auto"] and return_intercept:
with pytest.raises(ValueError, match="In Ridge, only 'sag' solver"):
ridge_regression(
X_testing,
y,
alpha=alpha,
solver=solver,
sample_weight=sample_weight,
return_intercept=return_intercept,
positive=positive,
tol=tol,
)
return
out = ridge_regression(
X_testing,
y,
alpha=alpha,
solver=solver,
sample_weight=sample_weight,
positive=positive,
return_intercept=return_intercept,
tol=tol,
)
if return_intercept:
coef, intercept = out
assert_allclose(coef, true_coefs, rtol=0, atol=atol)
assert_allclose(intercept, true_intercept, rtol=0, atol=atol)
else:
assert_allclose(out, true_coefs, rtol=0, atol=atol)
|
check if all combinations of arguments give valid estimations
|
test_ridge_regression_check_arguments_validity
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_ridge.py
|
BSD-3-Clause
|
def test_ridgeclassifier_multilabel(Classifier, params):
"""Check that multilabel classification is supported and give meaningful
results."""
X, y = make_multilabel_classification(n_classes=1, random_state=0)
y = y.reshape(-1, 1)
Y = np.concatenate([y, y], axis=1)
clf = Classifier(**params).fit(X, Y)
Y_pred = clf.predict(X)
assert Y_pred.shape == Y.shape
assert_array_equal(Y_pred[:, 0], Y_pred[:, 1])
Ridge(solver="sag").fit(X, y)
|
Check that multilabel classification is supported and give meaningful
results.
|
test_ridgeclassifier_multilabel
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_ridge.py
|
BSD-3-Clause
|
def test_ridge_positive_regression_test(solver, fit_intercept, alpha):
"""Test that positive Ridge finds true positive coefficients."""
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
coef = np.array([1, -10])
if fit_intercept:
intercept = 20
y = X.dot(coef) + intercept
else:
y = X.dot(coef)
model = Ridge(
alpha=alpha, positive=True, solver=solver, fit_intercept=fit_intercept
)
model.fit(X, y)
assert np.all(model.coef_ >= 0)
|
Test that positive Ridge finds true positive coefficients.
|
test_ridge_positive_regression_test
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_ridge.py
|
BSD-3-Clause
|
def test_ridge_ground_truth_positive_test(fit_intercept, alpha):
"""Test that Ridge w/wo positive converges to the same solution.
Ridge with positive=True and positive=False must give the same
when the ground truth coefs are all positive.
"""
rng = np.random.RandomState(42)
X = rng.randn(300, 100)
coef = rng.uniform(0.1, 1.0, size=X.shape[1])
if fit_intercept:
intercept = 1
y = X @ coef + intercept
else:
y = X @ coef
y += rng.normal(size=X.shape[0]) * 0.01
results = []
for positive in [True, False]:
model = Ridge(
alpha=alpha, positive=positive, fit_intercept=fit_intercept, tol=1e-10
)
results.append(model.fit(X, y).coef_)
assert_allclose(*results, atol=1e-6, rtol=0)
|
Test that Ridge w/wo positive converges to the same solution.
Ridge with positive=True and positive=False must give the same
when the ground truth coefs are all positive.
|
test_ridge_ground_truth_positive_test
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_ridge.py
|
BSD-3-Clause
|
def test_ridge_positive_error_test(solver):
"""Test input validation for positive argument in Ridge."""
alpha = 0.1
X = np.array([[1, 2], [3, 4]])
coef = np.array([1, -1])
y = X @ coef
model = Ridge(alpha=alpha, positive=True, solver=solver, fit_intercept=False)
with pytest.raises(ValueError, match="does not support positive"):
model.fit(X, y)
with pytest.raises(ValueError, match="only 'lbfgs' solver can be used"):
_, _ = ridge_regression(
X, y, alpha, positive=True, solver=solver, return_intercept=False
)
|
Test input validation for positive argument in Ridge.
|
test_ridge_positive_error_test
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_ridge.py
|
BSD-3-Clause
|
def test_positive_ridge_loss(alpha):
"""Check ridge loss consistency when positive argument is enabled."""
X, y = make_regression(n_samples=300, n_features=300, random_state=42)
alpha = 0.10
n_checks = 100
def ridge_loss(model, random_state=None, noise_scale=1e-8):
intercept = model.intercept_
if random_state is not None:
rng = np.random.RandomState(random_state)
coef = model.coef_ + rng.uniform(0, noise_scale, size=model.coef_.shape)
else:
coef = model.coef_
return 0.5 * np.sum((y - X @ coef - intercept) ** 2) + 0.5 * alpha * np.sum(
coef**2
)
model = Ridge(alpha=alpha).fit(X, y)
model_positive = Ridge(alpha=alpha, positive=True).fit(X, y)
# Check 1:
# Loss for solution found by Ridge(positive=False)
# is lower than that for solution found by Ridge(positive=True)
loss = ridge_loss(model)
loss_positive = ridge_loss(model_positive)
assert loss <= loss_positive
# Check 2:
# Loss for solution found by Ridge(positive=True)
# is lower than that for small random positive perturbation
# of the positive solution.
for random_state in range(n_checks):
loss_perturbed = ridge_loss(model_positive, random_state=random_state)
assert loss_positive <= loss_perturbed
|
Check ridge loss consistency when positive argument is enabled.
|
test_positive_ridge_loss
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_ridge.py
|
BSD-3-Clause
|
def test_lbfgs_solver_consistency(alpha):
"""Test that LBGFS gets almost the same coef of svd when positive=False."""
X, y = make_regression(n_samples=300, n_features=300, random_state=42)
y = np.expand_dims(y, 1)
alpha = np.asarray([alpha])
config = {
"positive": False,
"tol": 1e-16,
"max_iter": 500000,
}
coef_lbfgs = _solve_lbfgs(X, y, alpha, **config)
coef_cholesky = _solve_svd(X, y, alpha)
assert_allclose(coef_lbfgs, coef_cholesky, atol=1e-4, rtol=0)
|
Test that LBGFS gets almost the same coef of svd when positive=False.
|
test_lbfgs_solver_consistency
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_ridge.py
|
BSD-3-Clause
|
def test_lbfgs_solver_error():
"""Test that LBFGS solver raises ConvergenceWarning."""
X = np.array([[1, -1], [1, 1]])
y = np.array([-1e10, 1e10])
model = Ridge(
alpha=0.01,
solver="lbfgs",
fit_intercept=False,
tol=1e-12,
positive=True,
max_iter=1,
)
with pytest.warns(ConvergenceWarning, match="lbfgs solver did not converge"):
model.fit(X, y)
|
Test that LBFGS solver raises ConvergenceWarning.
|
test_lbfgs_solver_error
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_ridge.py
|
BSD-3-Clause
|
def test_ridge_sample_weight_consistency(
fit_intercept, sparse_container, data, solver, global_random_seed
):
"""Test that the impact of sample_weight is consistent.
Note that this test is stricter than the common test
check_sample_weight_equivalence alone.
"""
# filter out solver that do not support sparse input
if sparse_container is not None:
if solver == "svd" or (solver in ("cholesky", "saga") and fit_intercept):
pytest.skip("unsupported configuration")
# XXX: this test is quite sensitive to the seed used to generate the data:
# ideally we would like the test to pass for any global_random_seed but this is not
# the case at the moment.
rng = np.random.RandomState(42)
n_samples = 12
if data == "tall":
n_features = n_samples // 2
else:
n_features = n_samples * 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
if sparse_container is not None:
X = sparse_container(X)
params = dict(
fit_intercept=fit_intercept,
alpha=1.0,
solver=solver,
positive=(solver == "lbfgs"),
random_state=global_random_seed, # for sag/saga
tol=1e-12,
)
# 1) sample_weight=np.ones(..) should be equivalent to sample_weight=None,
# a special case of check_sample_weight_equivalence(name, reg), but we also
# test with sparse input.
reg = Ridge(**params).fit(X, y, sample_weight=None)
coef = reg.coef_.copy()
if fit_intercept:
intercept = reg.intercept_
sample_weight = np.ones_like(y)
reg.fit(X, y, sample_weight=sample_weight)
assert_allclose(reg.coef_, coef, rtol=1e-6)
if fit_intercept:
assert_allclose(reg.intercept_, intercept)
# 2) setting elements of sample_weight to 0 is equivalent to removing these samples,
# another special case of check_sample_weight_equivalence(name, reg), but we
# also test with sparse input
sample_weight = rng.uniform(low=0.01, high=2, size=X.shape[0])
sample_weight[-5:] = 0
y[-5:] *= 1000 # to make excluding those samples important
reg.fit(X, y, sample_weight=sample_weight)
coef = reg.coef_.copy()
if fit_intercept:
intercept = reg.intercept_
reg.fit(X[:-5, :], y[:-5], sample_weight=sample_weight[:-5])
assert_allclose(reg.coef_, coef, rtol=1e-6)
if fit_intercept:
assert_allclose(reg.intercept_, intercept)
# 3) scaling of sample_weight should have no effect
# Note: For models with penalty, scaling the penalty term might work.
reg2 = Ridge(**params).set_params(alpha=np.pi * params["alpha"])
reg2.fit(X, y, sample_weight=np.pi * sample_weight)
if solver in ("sag", "saga") and not fit_intercept:
pytest.xfail(f"Solver {solver} does fail test for scaling of sample_weight.")
assert_allclose(reg2.coef_, coef, rtol=1e-6)
if fit_intercept:
assert_allclose(reg2.intercept_, intercept)
# 4) check that multiplying sample_weight by 2 is equivalent
# to repeating corresponding samples twice
if sparse_container is not None:
X = X.toarray()
X2 = np.concatenate([X, X[: n_samples // 2]], axis=0)
y2 = np.concatenate([y, y[: n_samples // 2]])
sample_weight_1 = sample_weight.copy()
sample_weight_1[: n_samples // 2] *= 2
sample_weight_2 = np.concatenate(
[sample_weight, sample_weight[: n_samples // 2]], axis=0
)
if sparse_container is not None:
X = sparse_container(X)
X2 = sparse_container(X2)
reg1 = Ridge(**params).fit(X, y, sample_weight=sample_weight_1)
reg2 = Ridge(**params).fit(X2, y2, sample_weight=sample_weight_2)
assert_allclose(reg1.coef_, reg2.coef_)
if fit_intercept:
assert_allclose(reg1.intercept_, reg2.intercept_)
|
Test that the impact of sample_weight is consistent.
Note that this test is stricter than the common test
check_sample_weight_equivalence alone.
|
test_ridge_sample_weight_consistency
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_ridge.py
|
BSD-3-Clause
|
def test_ridge_cv_results_predictions(with_sample_weight, fit_intercept, n_targets):
"""Check that the predictions stored in `cv_results_` are on the original scale.
The GCV approach works on scaled data: centered by an offset and scaled by the
square root of the sample weights. Thus, prior to computing scores, the
predictions need to be scaled back to the original scale. These predictions are
the ones stored in `cv_results_` in `RidgeCV`.
In this test, we check that the internal predictions stored in `cv_results_` are
equivalent to a naive LOO-CV grid search with a `Ridge` estimator.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/13998
"""
X, y = make_regression(
n_samples=100, n_features=10, n_targets=n_targets, random_state=0
)
sample_weight = np.ones(shape=(X.shape[0],))
if with_sample_weight:
sample_weight[::2] = 0.5
alphas = (0.1, 1.0, 10.0)
# scoring should be set to store predictions and not the squared error
ridge_cv = RidgeCV(
alphas=alphas,
scoring="neg_mean_squared_error",
fit_intercept=fit_intercept,
store_cv_results=True,
)
ridge_cv.fit(X, y, sample_weight=sample_weight)
# manual grid-search with a `Ridge` estimator
predictions = np.empty(shape=(*y.shape, len(alphas)))
cv = LeaveOneOut()
for alpha_idx, alpha in enumerate(alphas):
for idx, (train_idx, test_idx) in enumerate(cv.split(X, y)):
ridge = Ridge(alpha=alpha, fit_intercept=fit_intercept)
ridge.fit(X[train_idx], y[train_idx], sample_weight[train_idx])
predictions[idx, ..., alpha_idx] = ridge.predict(X[test_idx])
assert_allclose(ridge_cv.cv_results_, predictions)
|
Check that the predictions stored in `cv_results_` are on the original scale.
The GCV approach works on scaled data: centered by an offset and scaled by the
square root of the sample weights. Thus, prior to computing scores, the
predictions need to be scaled back to the original scale. These predictions are
the ones stored in `cv_results_` in `RidgeCV`.
In this test, we check that the internal predictions stored in `cv_results_` are
equivalent to a naive LOO-CV grid search with a `Ridge` estimator.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/13998
|
test_ridge_cv_results_predictions
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_ridge.py
|
BSD-3-Clause
|
def test_ridge_cv_multioutput_sample_weight(global_random_seed):
"""Check that `RidgeCV` works properly with multioutput and sample_weight
when `scoring != None`.
We check the error reported by the RidgeCV is close to a naive LOO-CV using a
Ridge estimator.
"""
X, y = make_regression(n_targets=2, random_state=global_random_seed)
sample_weight = np.ones(shape=(X.shape[0],))
ridge_cv = RidgeCV(scoring="neg_mean_squared_error", store_cv_results=True)
ridge_cv.fit(X, y, sample_weight=sample_weight)
cv = LeaveOneOut()
ridge = Ridge(alpha=ridge_cv.alpha_)
y_pred_loo = np.squeeze(
[
ridge.fit(X[train], y[train], sample_weight=sample_weight[train]).predict(
X[test]
)
for train, test in cv.split(X)
]
)
assert_allclose(ridge_cv.best_score_, -mean_squared_error(y, y_pred_loo))
|
Check that `RidgeCV` works properly with multioutput and sample_weight
when `scoring != None`.
We check the error reported by the RidgeCV is close to a naive LOO-CV using a
Ridge estimator.
|
test_ridge_cv_multioutput_sample_weight
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_ridge.py
|
BSD-3-Clause
|
def test_ridge_cv_custom_multioutput_scorer():
"""Check that `RidgeCV` works properly with a custom multioutput scorer."""
X, y = make_regression(n_targets=2, random_state=0)
def custom_error(y_true, y_pred):
errors = (y_true - y_pred) ** 2
mean_errors = np.mean(errors, axis=0)
if mean_errors.ndim == 1:
# case of multioutput
return -np.average(mean_errors, weights=[2, 1])
# single output - this part of the code should not be reached in the case of
# multioutput scoring
return -mean_errors # pragma: no cover
def custom_multioutput_scorer(estimator, X, y):
"""Multioutput score that give twice more importance to the second target."""
return -custom_error(y, estimator.predict(X))
ridge_cv = RidgeCV(scoring=custom_multioutput_scorer)
ridge_cv.fit(X, y)
cv = LeaveOneOut()
ridge = Ridge(alpha=ridge_cv.alpha_)
y_pred_loo = np.squeeze(
[ridge.fit(X[train], y[train]).predict(X[test]) for train, test in cv.split(X)]
)
assert_allclose(ridge_cv.best_score_, -custom_error(y, y_pred_loo))
|
Check that `RidgeCV` works properly with a custom multioutput scorer.
|
test_ridge_cv_custom_multioutput_scorer
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_ridge.py
|
BSD-3-Clause
|
def test_set_score_request_with_default_scoring(metaestimator, make_dataset):
"""Test that `set_score_request` is set within `RidgeCV.fit()` and
`RidgeClassifierCV.fit()` when using the default scoring and no
UnsetMetadataPassedError is raised. Regression test for the fix in PR #29634."""
X, y = make_dataset(n_samples=100, n_features=5, random_state=42)
metaestimator.fit(X, y, sample_weight=np.ones(X.shape[0]))
|
Test that `set_score_request` is set within `RidgeCV.fit()` and
`RidgeClassifierCV.fit()` when using the default scoring and no
UnsetMetadataPassedError is raised. Regression test for the fix in PR #29634.
|
test_set_score_request_with_default_scoring
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_ridge.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_ridge.py
|
BSD-3-Clause
|
def test_sag_pobj_matches_logistic_regression(csr_container):
"""tests if the sag pobj matches log reg"""
n_samples = 100
alpha = 1.0
max_iter = 20
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0, cluster_std=0.1)
clf1 = LogisticRegression(
solver="sag",
fit_intercept=False,
tol=0.0000001,
C=1.0 / alpha / n_samples,
max_iter=max_iter,
random_state=10,
)
clf2 = clone(clf1)
clf3 = LogisticRegression(
fit_intercept=False,
tol=0.0000001,
C=1.0 / alpha / n_samples,
max_iter=max_iter,
random_state=10,
)
clf1.fit(X, y)
clf2.fit(csr_container(X), y)
clf3.fit(X, y)
pobj1 = get_pobj(clf1.coef_, alpha, X, y, log_loss)
pobj2 = get_pobj(clf2.coef_, alpha, X, y, log_loss)
pobj3 = get_pobj(clf3.coef_, alpha, X, y, log_loss)
assert_array_almost_equal(pobj1, pobj2, decimal=4)
assert_array_almost_equal(pobj2, pobj3, decimal=4)
assert_array_almost_equal(pobj3, pobj1, decimal=4)
|
tests if the sag pobj matches log reg
|
test_sag_pobj_matches_logistic_regression
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_sag.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_sag.py
|
BSD-3-Clause
|
def test_sag_pobj_matches_ridge_regression(csr_container):
"""tests if the sag pobj matches ridge reg"""
n_samples = 100
n_features = 10
alpha = 1.0
n_iter = 100
fit_intercept = False
rng = np.random.RandomState(10)
X = rng.normal(size=(n_samples, n_features))
true_w = rng.normal(size=n_features)
y = X.dot(true_w)
clf1 = Ridge(
fit_intercept=fit_intercept,
tol=0.00000000001,
solver="sag",
alpha=alpha,
max_iter=n_iter,
random_state=42,
)
clf2 = clone(clf1)
clf3 = Ridge(
fit_intercept=fit_intercept,
tol=0.00001,
solver="lsqr",
alpha=alpha,
max_iter=n_iter,
random_state=42,
)
clf1.fit(X, y)
clf2.fit(csr_container(X), y)
clf3.fit(X, y)
pobj1 = get_pobj(clf1.coef_, alpha, X, y, squared_loss)
pobj2 = get_pobj(clf2.coef_, alpha, X, y, squared_loss)
pobj3 = get_pobj(clf3.coef_, alpha, X, y, squared_loss)
assert_array_almost_equal(pobj1, pobj2, decimal=4)
assert_array_almost_equal(pobj1, pobj3, decimal=4)
assert_array_almost_equal(pobj3, pobj2, decimal=4)
|
tests if the sag pobj matches ridge reg
|
test_sag_pobj_matches_ridge_regression
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_sag.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_sag.py
|
BSD-3-Clause
|
def test_sag_regressor_computed_correctly(csr_container):
"""tests if the sag regressor is computed correctly"""
alpha = 0.1
n_features = 10
n_samples = 40
max_iter = 100
tol = 0.000001
fit_intercept = True
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
y = np.dot(X, w) + 2.0
step_size = get_step_size(X, alpha, fit_intercept, classification=False)
clf1 = Ridge(
fit_intercept=fit_intercept,
tol=tol,
solver="sag",
alpha=alpha * n_samples,
max_iter=max_iter,
random_state=rng,
)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(csr_container(X), y)
spweights1, spintercept1 = sag_sparse(
X,
y,
step_size,
alpha,
n_iter=max_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept,
random_state=rng,
)
spweights2, spintercept2 = sag_sparse(
X,
y,
step_size,
alpha,
n_iter=max_iter,
dloss=squared_dloss,
sparse=True,
fit_intercept=fit_intercept,
random_state=rng,
)
assert_array_almost_equal(clf1.coef_.ravel(), spweights1.ravel(), decimal=3)
assert_almost_equal(clf1.intercept_, spintercept1, decimal=1)
# TODO: uncomment when sparse Ridge with intercept will be fixed (#4710)
# assert_array_almost_equal(clf2.coef_.ravel(),
# spweights2.ravel(),
# decimal=3)
# assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)'''
|
tests if the sag regressor is computed correctly
|
test_sag_regressor_computed_correctly
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_sag.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_sag.py
|
BSD-3-Clause
|
def test_sag_regressor(seed, csr_container):
"""tests if the sag regressor performs well"""
xmin, xmax = -5, 5
n_samples = 300
tol = 0.001
max_iter = 100
alpha = 0.1
rng = np.random.RandomState(seed)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf1 = Ridge(
tol=tol,
solver="sag",
max_iter=max_iter,
alpha=alpha * n_samples,
random_state=rng,
)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(csr_container(X), y)
score1 = clf1.score(X, y)
score2 = clf2.score(X, y)
assert score1 > 0.98
assert score2 > 0.98
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf1 = Ridge(tol=tol, solver="sag", max_iter=max_iter, alpha=alpha * n_samples)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(csr_container(X), y)
score1 = clf1.score(X, y)
score2 = clf2.score(X, y)
assert score1 > 0.45
assert score2 > 0.45
|
tests if the sag regressor performs well
|
test_sag_regressor
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_sag.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_sag.py
|
BSD-3-Clause
|
def test_sag_classifier_computed_correctly(csr_container):
"""tests if the binary classifier is computed correctly"""
alpha = 0.1
n_samples = 50
n_iter = 50
tol = 0.00001
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0, cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
y_tmp = np.ones(n_samples)
y_tmp[y != classes[1]] = -1
y = y_tmp
clf1 = LogisticRegression(
solver="sag",
C=1.0 / alpha / n_samples,
max_iter=n_iter,
tol=tol,
random_state=77,
fit_intercept=fit_intercept,
)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(csr_container(X), y)
spweights, spintercept = sag_sparse(
X,
y,
step_size,
alpha,
n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept,
)
spweights2, spintercept2 = sag_sparse(
X,
y,
step_size,
alpha,
n_iter=n_iter,
dloss=log_dloss,
sparse=True,
fit_intercept=fit_intercept,
)
assert_array_almost_equal(clf1.coef_.ravel(), spweights.ravel(), decimal=2)
assert_almost_equal(clf1.intercept_, spintercept, decimal=1)
assert_array_almost_equal(clf2.coef_.ravel(), spweights2.ravel(), decimal=2)
assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)
|
tests if the binary classifier is computed correctly
|
test_sag_classifier_computed_correctly
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_sag.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_sag.py
|
BSD-3-Clause
|
def test_sag_multiclass_computed_correctly(csr_container):
"""tests if the multiclass classifier is computed correctly"""
alpha = 0.1
n_samples = 20
tol = 1e-5
max_iter = 70
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=3, random_state=0, cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
clf1 = OneVsRestClassifier(
LogisticRegression(
solver="sag",
C=1.0 / alpha / n_samples,
max_iter=max_iter,
tol=tol,
random_state=77,
fit_intercept=fit_intercept,
)
)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(csr_container(X), y)
coef1 = []
intercept1 = []
coef2 = []
intercept2 = []
for cl in classes:
y_encoded = np.ones(n_samples)
y_encoded[y != cl] = -1
spweights1, spintercept1 = sag_sparse(
X,
y_encoded,
step_size,
alpha,
dloss=log_dloss,
n_iter=max_iter,
fit_intercept=fit_intercept,
)
spweights2, spintercept2 = sag_sparse(
X,
y_encoded,
step_size,
alpha,
dloss=log_dloss,
n_iter=max_iter,
sparse=True,
fit_intercept=fit_intercept,
)
coef1.append(spweights1)
intercept1.append(spintercept1)
coef2.append(spweights2)
intercept2.append(spintercept2)
coef1 = np.vstack(coef1)
intercept1 = np.array(intercept1)
coef2 = np.vstack(coef2)
intercept2 = np.array(intercept2)
for i, cl in enumerate(classes):
assert_allclose(clf1.estimators_[i].coef_.ravel(), coef1[i], rtol=1e-2)
assert_allclose(clf1.estimators_[i].intercept_, intercept1[i], rtol=1e-1)
assert_allclose(clf2.estimators_[i].coef_.ravel(), coef2[i], rtol=1e-2)
# Note the very crude accuracy, i.e. high rtol.
assert_allclose(clf2.estimators_[i].intercept_, intercept2[i], rtol=5e-1)
|
tests if the multiclass classifier is computed correctly
|
test_sag_multiclass_computed_correctly
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_sag.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_sag.py
|
BSD-3-Clause
|
def test_classifier_results(csr_container):
"""tests if classifier results match target"""
alpha = 0.1
n_features = 20
n_samples = 10
tol = 0.01
max_iter = 200
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
y = np.dot(X, w)
y = np.sign(y)
clf1 = LogisticRegression(
solver="sag",
C=1.0 / alpha / n_samples,
max_iter=max_iter,
tol=tol,
random_state=77,
)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(csr_container(X), y)
pred1 = clf1.predict(X)
pred2 = clf2.predict(X)
assert_almost_equal(pred1, y, decimal=12)
assert_almost_equal(pred2, y, decimal=12)
|
tests if classifier results match target
|
test_classifier_results
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_sag.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_sag.py
|
BSD-3-Clause
|
def test_binary_classifier_class_weight(csr_container):
"""tests binary classifier with classweights for each class"""
alpha = 0.1
n_samples = 50
n_iter = 20
tol = 0.00001
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=10, cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
y_tmp = np.ones(n_samples)
y_tmp[y != classes[1]] = -1
y = y_tmp
class_weight = {1: 0.45, -1: 0.55}
clf1 = LogisticRegression(
solver="sag",
C=1.0 / alpha / n_samples,
max_iter=n_iter,
tol=tol,
random_state=77,
fit_intercept=fit_intercept,
class_weight=class_weight,
)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(csr_container(X), y)
le = LabelEncoder()
class_weight_ = compute_class_weight(class_weight, classes=np.unique(y), y=y)
sample_weight = class_weight_[le.fit_transform(y)]
spweights, spintercept = sag_sparse(
X,
y,
step_size,
alpha,
n_iter=n_iter,
dloss=log_dloss,
sample_weight=sample_weight,
fit_intercept=fit_intercept,
)
spweights2, spintercept2 = sag_sparse(
X,
y,
step_size,
alpha,
n_iter=n_iter,
dloss=log_dloss,
sparse=True,
sample_weight=sample_weight,
fit_intercept=fit_intercept,
)
assert_array_almost_equal(clf1.coef_.ravel(), spweights.ravel(), decimal=2)
assert_almost_equal(clf1.intercept_, spintercept, decimal=1)
assert_array_almost_equal(clf2.coef_.ravel(), spweights2.ravel(), decimal=2)
assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)
|
tests binary classifier with classweights for each class
|
test_binary_classifier_class_weight
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_sag.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_sag.py
|
BSD-3-Clause
|
def test_classifier_single_class():
"""tests if ValueError is thrown with only one class"""
X = [[1, 2], [3, 4]]
y = [1, 1]
msg = "This solver needs samples of at least 2 classes in the data"
with pytest.raises(ValueError, match=msg):
LogisticRegression(solver="sag").fit(X, y)
|
tests if ValueError is thrown with only one class
|
test_classifier_single_class
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_sag.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_sag.py
|
BSD-3-Clause
|
def test_sgd_l1_ratio_not_used(Estimator, l1_ratio):
"""Check that l1_ratio is not used when penalty is not 'elasticnet'"""
clf1 = Estimator(penalty="l1", l1_ratio=None, random_state=0).fit(X, Y)
clf2 = Estimator(penalty="l1", l1_ratio=l1_ratio, random_state=0).fit(X, Y)
assert_allclose(clf1.coef_, clf2.coef_)
|
Check that l1_ratio is not used when penalty is not 'elasticnet'
|
test_sgd_l1_ratio_not_used
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_sgd.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_sgd.py
|
BSD-3-Clause
|
def test_power_t_limits(klass):
"""Check that a warning is raised when `power_t` is negative."""
# Check that negative values of `power_t` raise a warning
clf = klass(power_t=-1.0)
with pytest.warns(
FutureWarning, match="Negative values for `power_t` are deprecated"
):
clf.fit(X, Y)
# Check that values of 'power_t in range [0, inf) do not raise a warning
with warnings.catch_warnings(record=True) as w:
clf = klass(power_t=0.5)
clf.fit(X, Y)
assert len(w) == 0
|
Check that a warning is raised when `power_t` is negative.
|
test_power_t_limits
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_sgd.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_sgd.py
|
BSD-3-Clause
|
def test_provide_coef(klass):
"""Check that the shape of `coef_init` is validated."""
with pytest.raises(ValueError, match="Provided coef_init does not match dataset"):
klass().fit(X, Y, coef_init=np.zeros((3,)))
|
Check that the shape of `coef_init` is validated.
|
test_provide_coef
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_sgd.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_sgd.py
|
BSD-3-Clause
|
def test_sgd_early_stopping_with_partial_fit(klass):
"""Check that we raise an error for `early_stopping` used with
`partial_fit`.
"""
err_msg = "early_stopping should be False with partial_fit"
with pytest.raises(ValueError, match=err_msg):
klass(early_stopping=True).partial_fit(X, Y)
|
Check that we raise an error for `early_stopping` used with
`partial_fit`.
|
test_sgd_early_stopping_with_partial_fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_sgd.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_sgd.py
|
BSD-3-Clause
|
def test_validation_mask_correctly_subsets(monkeypatch):
"""Test that data passed to validation callback correctly subsets.
Non-regression test for #23255.
"""
X, Y = iris.data, iris.target
n_samples = X.shape[0]
validation_fraction = 0.2
clf = linear_model.SGDClassifier(
early_stopping=True,
tol=1e-3,
max_iter=1000,
validation_fraction=validation_fraction,
)
mock = Mock(side_effect=_stochastic_gradient._ValidationScoreCallback)
monkeypatch.setattr(_stochastic_gradient, "_ValidationScoreCallback", mock)
clf.fit(X, Y)
X_val, y_val = mock.call_args[0][1:3]
assert X_val.shape[0] == int(n_samples * validation_fraction)
assert y_val.shape[0] == int(n_samples * validation_fraction)
|
Test that data passed to validation callback correctly subsets.
Non-regression test for #23255.
|
test_validation_mask_correctly_subsets
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_sgd.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_sgd.py
|
BSD-3-Clause
|
def test_sgd_one_class_svm_estimator_type():
"""Check that SGDOneClassSVM has the correct estimator type.
Non-regression test for if the mixin was not on the left.
"""
sgd_ocsvm = SGDOneClassSVM()
assert get_tags(sgd_ocsvm).estimator_type == "outlier_detector"
|
Check that SGDOneClassSVM has the correct estimator type.
Non-regression test for if the mixin was not on the left.
|
test_sgd_one_class_svm_estimator_type
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_sgd.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_sgd.py
|
BSD-3-Clause
|
def test_sparse_enet_coordinate_descent(csc_container):
"""Test that a warning is issued if model does not converge"""
clf = Lasso(max_iter=2)
n_samples = 5
n_features = 2
X = csc_container((n_samples, n_features)) * 1e50
y = np.ones(n_samples)
warning_message = (
"Objective did not converge. You might want "
"to increase the number of iterations."
)
with pytest.warns(ConvergenceWarning, match=warning_message):
clf.fit(X, y)
|
Test that a warning is issued if model does not converge
|
test_sparse_enet_coordinate_descent
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_sparse_coordinate_descent.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_sparse_coordinate_descent.py
|
BSD-3-Clause
|
def test_sparse_read_only_buffer(copy_X):
"""Test that sparse coordinate descent works for read-only buffers"""
rng = np.random.RandomState(0)
clf = ElasticNet(alpha=0.1, copy_X=copy_X, random_state=rng)
X = sp.random(100, 20, format="csc", random_state=rng)
# Make X.data read-only
X.data = create_memmap_backed_data(X.data)
y = rng.rand(100)
clf.fit(X, y)
|
Test that sparse coordinate descent works for read-only buffers
|
test_sparse_read_only_buffer
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/tests/test_sparse_coordinate_descent.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_sparse_coordinate_descent.py
|
BSD-3-Clause
|
def fit(self, X, y, sample_weight=None):
"""Fit a Generalized Linear Model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
self : object
Fitted model.
"""
X, y = validate_data(
self,
X,
y,
accept_sparse=["csc", "csr"],
dtype=[np.float64, np.float32],
y_numeric=True,
multi_output=False,
)
# required by losses
if self.solver == "lbfgs":
# lbfgs will force coef and therefore raw_prediction to be float64. The
# base_loss needs y, X @ coef and sample_weight all of same dtype
# (and contiguous).
loss_dtype = np.float64
else:
loss_dtype = min(max(y.dtype, X.dtype), np.float64)
y = check_array(y, dtype=loss_dtype, order="C", ensure_2d=False)
if sample_weight is not None:
# Note that _check_sample_weight calls check_array(order="C") required by
# losses.
sample_weight = _check_sample_weight(sample_weight, X, dtype=loss_dtype)
n_samples, n_features = X.shape
self._base_loss = self._get_loss()
linear_loss = LinearModelLoss(
base_loss=self._base_loss,
fit_intercept=self.fit_intercept,
)
if not linear_loss.base_loss.in_y_true_range(y):
raise ValueError(
"Some value(s) of y are out of the valid range of the loss"
f" {self._base_loss.__class__.__name__!r}."
)
# TODO: if alpha=0 check that X is not rank deficient
# NOTE: Rescaling of sample_weight:
# We want to minimize
# obj = 1/(2 * sum(sample_weight)) * sum(sample_weight * deviance)
# + 1/2 * alpha * L2,
# with
# deviance = 2 * loss.
# The objective is invariant to multiplying sample_weight by a constant. We
# could choose this constant such that sum(sample_weight) = 1 in order to end
# up with
# obj = sum(sample_weight * loss) + 1/2 * alpha * L2.
# But LinearModelLoss.loss() already computes
# average(loss, weights=sample_weight)
# Thus, without rescaling, we have
# obj = LinearModelLoss.loss(...)
if self.warm_start and hasattr(self, "coef_"):
if self.fit_intercept:
# LinearModelLoss needs intercept at the end of coefficient array.
coef = np.concatenate((self.coef_, np.array([self.intercept_])))
else:
coef = self.coef_
coef = coef.astype(loss_dtype, copy=False)
else:
coef = linear_loss.init_zero_coef(X, dtype=loss_dtype)
if self.fit_intercept:
coef[-1] = linear_loss.base_loss.link.link(
np.average(y, weights=sample_weight)
)
l2_reg_strength = self.alpha
n_threads = _openmp_effective_n_threads()
# Algorithms for optimization:
# Note again that our losses implement 1/2 * deviance.
if self.solver == "lbfgs":
func = linear_loss.loss_gradient
opt_res = scipy.optimize.minimize(
func,
coef,
method="L-BFGS-B",
jac=True,
options={
"maxiter": self.max_iter,
"maxls": 50, # default is 20
"iprint": self.verbose - 1,
"gtol": self.tol,
# The constant 64 was found empirically to pass the test suite.
# The point is that ftol is very small, but a bit larger than
# machine precision for float64, which is the dtype used by lbfgs.
"ftol": 64 * np.finfo(float).eps,
},
args=(X, y, sample_weight, l2_reg_strength, n_threads),
)
self.n_iter_ = _check_optimize_result(
"lbfgs", opt_res, max_iter=self.max_iter
)
coef = opt_res.x
elif self.solver == "newton-cholesky":
sol = NewtonCholeskySolver(
coef=coef,
linear_loss=linear_loss,
l2_reg_strength=l2_reg_strength,
tol=self.tol,
max_iter=self.max_iter,
n_threads=n_threads,
verbose=self.verbose,
)
coef = sol.solve(X, y, sample_weight)
self.n_iter_ = sol.iteration
elif issubclass(self.solver, NewtonSolver):
sol = self.solver(
coef=coef,
linear_loss=linear_loss,
l2_reg_strength=l2_reg_strength,
tol=self.tol,
max_iter=self.max_iter,
n_threads=n_threads,
)
coef = sol.solve(X, y, sample_weight)
self.n_iter_ = sol.iteration
else:
raise ValueError(f"Invalid solver={self.solver}.")
if self.fit_intercept:
self.intercept_ = coef[-1]
self.coef_ = coef[:-1]
else:
# set intercept to zero as the other linear models do
self.intercept_ = 0.0
self.coef_ = coef
return self
|
Fit a Generalized Linear Model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
self : object
Fitted model.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_glm/glm.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_glm/glm.py
|
BSD-3-Clause
|
def _linear_predictor(self, X):
"""Compute the linear_predictor = `X @ coef_ + intercept_`.
Note that we often use the term raw_prediction instead of linear predictor.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Samples.
Returns
-------
y_pred : array of shape (n_samples,)
Returns predicted values of linear predictor.
"""
check_is_fitted(self)
X = validate_data(
self,
X,
accept_sparse=["csr", "csc", "coo"],
dtype=[np.float64, np.float32],
ensure_2d=True,
allow_nd=False,
reset=False,
)
return X @ self.coef_ + self.intercept_
|
Compute the linear_predictor = `X @ coef_ + intercept_`.
Note that we often use the term raw_prediction instead of linear predictor.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Samples.
Returns
-------
y_pred : array of shape (n_samples,)
Returns predicted values of linear predictor.
|
_linear_predictor
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_glm/glm.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_glm/glm.py
|
BSD-3-Clause
|
def predict(self, X):
"""Predict using GLM with feature matrix X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Samples.
Returns
-------
y_pred : array of shape (n_samples,)
Returns predicted values.
"""
# check_array is done in _linear_predictor
raw_prediction = self._linear_predictor(X)
y_pred = self._base_loss.link.inverse(raw_prediction)
return y_pred
|
Predict using GLM with feature matrix X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Samples.
Returns
-------
y_pred : array of shape (n_samples,)
Returns predicted values.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_glm/glm.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_glm/glm.py
|
BSD-3-Clause
|
def score(self, X, y, sample_weight=None):
"""Compute D^2, the percentage of deviance explained.
D^2 is a generalization of the coefficient of determination R^2.
R^2 uses squared error and D^2 uses the deviance of this GLM, see the
:ref:`User Guide <regression_metrics>`.
D^2 is defined as
:math:`D^2 = 1-\\frac{D(y_{true},y_{pred})}{D_{null}}`,
:math:`D_{null}` is the null deviance, i.e. the deviance of a model
with intercept alone, which corresponds to :math:`y_{pred} = \\bar{y}`.
The mean :math:`\\bar{y}` is averaged by sample_weight.
Best possible score is 1.0 and it can be negative (because the model
can be arbitrarily worse).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,)
True values of target.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
D^2 of self.predict(X) w.r.t. y.
"""
# TODO: Adapt link to User Guide in the docstring, once
# https://github.com/scikit-learn/scikit-learn/pull/22118 is merged.
#
# Note, default score defined in RegressorMixin is R^2 score.
# TODO: make D^2 a score function in module metrics (and thereby get
# input validation and so on)
raw_prediction = self._linear_predictor(X) # validates X
# required by losses
y = check_array(y, dtype=raw_prediction.dtype, order="C", ensure_2d=False)
if sample_weight is not None:
# Note that _check_sample_weight calls check_array(order="C") required by
# losses.
sample_weight = _check_sample_weight(sample_weight, X, dtype=y.dtype)
base_loss = self._base_loss
if not base_loss.in_y_true_range(y):
raise ValueError(
"Some value(s) of y are out of the valid range of the loss"
f" {base_loss.__name__}."
)
constant = np.average(
base_loss.constant_to_optimal_zero(y_true=y, sample_weight=None),
weights=sample_weight,
)
# Missing factor of 2 in deviance cancels out.
deviance = base_loss(
y_true=y,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
n_threads=1,
)
y_mean = base_loss.link.link(np.average(y, weights=sample_weight))
deviance_null = base_loss(
y_true=y,
raw_prediction=np.tile(y_mean, y.shape[0]),
sample_weight=sample_weight,
n_threads=1,
)
return 1 - (deviance + constant) / (deviance_null + constant)
|
Compute D^2, the percentage of deviance explained.
D^2 is a generalization of the coefficient of determination R^2.
R^2 uses squared error and D^2 uses the deviance of this GLM, see the
:ref:`User Guide <regression_metrics>`.
D^2 is defined as
:math:`D^2 = 1-\frac{D(y_{true},y_{pred})}{D_{null}}`,
:math:`D_{null}` is the null deviance, i.e. the deviance of a model
with intercept alone, which corresponds to :math:`y_{pred} = \bar{y}`.
The mean :math:`\bar{y}` is averaged by sample_weight.
Best possible score is 1.0 and it can be negative (because the model
can be arbitrarily worse).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,)
True values of target.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
D^2 of self.predict(X) w.r.t. y.
|
score
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_glm/glm.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_glm/glm.py
|
BSD-3-Clause
|
def setup(self, X, y, sample_weight):
"""Precomputations
If None, initializes:
- self.coef
Sets:
- self.raw_prediction
- self.loss_value
"""
_, _, self.raw_prediction = self.linear_loss.weight_intercept_raw(self.coef, X)
self.loss_value = self.linear_loss.loss(
coef=self.coef,
X=X,
y=y,
sample_weight=sample_weight,
l2_reg_strength=self.l2_reg_strength,
n_threads=self.n_threads,
raw_prediction=self.raw_prediction,
)
|
Precomputations
If None, initializes:
- self.coef
Sets:
- self.raw_prediction
- self.loss_value
|
setup
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_glm/_newton_solver.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_glm/_newton_solver.py
|
BSD-3-Clause
|
def inner_solve(self, X, y, sample_weight):
"""Compute Newton step.
Sets:
- self.coef_newton
- self.gradient_times_newton
"""
|
Compute Newton step.
Sets:
- self.coef_newton
- self.gradient_times_newton
|
inner_solve
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_glm/_newton_solver.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_glm/_newton_solver.py
|
BSD-3-Clause
|
def fallback_lbfgs_solve(self, X, y, sample_weight):
"""Fallback solver in case of emergency.
If a solver detects convergence problems, it may fall back to this methods in
the hope to exit with success instead of raising an error.
Sets:
- self.coef
- self.converged
"""
max_iter = self.max_iter - self.iteration
opt_res = scipy.optimize.minimize(
self.linear_loss.loss_gradient,
self.coef,
method="L-BFGS-B",
jac=True,
options={
"maxiter": max_iter,
"maxls": 50, # default is 20
"iprint": self.verbose - 1,
"gtol": self.tol,
"ftol": 64 * np.finfo(np.float64).eps,
},
args=(X, y, sample_weight, self.l2_reg_strength, self.n_threads),
)
self.iteration += _check_optimize_result("lbfgs", opt_res, max_iter=max_iter)
self.coef = opt_res.x
self.converged = opt_res.status == 0
|
Fallback solver in case of emergency.
If a solver detects convergence problems, it may fall back to this methods in
the hope to exit with success instead of raising an error.
Sets:
- self.coef
- self.converged
|
fallback_lbfgs_solve
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_glm/_newton_solver.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_glm/_newton_solver.py
|
BSD-3-Clause
|
def line_search(self, X, y, sample_weight):
"""Backtracking line search.
Sets:
- self.coef_old
- self.coef
- self.loss_value_old
- self.loss_value
- self.gradient_old
- self.gradient
- self.raw_prediction
"""
# line search parameters
beta, sigma = 0.5, 0.00048828125 # 1/2, 1/2**11
eps = 16 * np.finfo(self.loss_value.dtype).eps
t = 1 # step size
# gradient_times_newton = self.gradient @ self.coef_newton
# was computed in inner_solve.
armijo_term = sigma * self.gradient_times_newton
_, _, raw_prediction_newton = self.linear_loss.weight_intercept_raw(
self.coef_newton, X
)
self.coef_old = self.coef
self.loss_value_old = self.loss_value
self.gradient_old = self.gradient
# np.sum(np.abs(self.gradient_old))
sum_abs_grad_old = -1
is_verbose = self.verbose >= 2
if is_verbose:
print(" Backtracking Line Search")
print(f" eps=16 * finfo.eps={eps}")
for i in range(21): # until and including t = beta**20 ~ 1e-6
self.coef = self.coef_old + t * self.coef_newton
raw = self.raw_prediction + t * raw_prediction_newton
self.loss_value, self.gradient = self.linear_loss.loss_gradient(
coef=self.coef,
X=X,
y=y,
sample_weight=sample_weight,
l2_reg_strength=self.l2_reg_strength,
n_threads=self.n_threads,
raw_prediction=raw,
)
# Note: If coef_newton is too large, loss_gradient may produce inf values,
# potentially accompanied by a RuntimeWarning.
# This case will be captured by the Armijo condition.
# 1. Check Armijo / sufficient decrease condition.
# The smaller (more negative) the better.
loss_improvement = self.loss_value - self.loss_value_old
check = loss_improvement <= t * armijo_term
if is_verbose:
print(
f" line search iteration={i + 1}, step size={t}\n"
f" check loss improvement <= armijo term: {loss_improvement} "
f"<= {t * armijo_term} {check}"
)
if check:
break
# 2. Deal with relative loss differences around machine precision.
tiny_loss = np.abs(self.loss_value_old * eps)
check = np.abs(loss_improvement) <= tiny_loss
if is_verbose:
print(
" check loss |improvement| <= eps * |loss_old|:"
f" {np.abs(loss_improvement)} <= {tiny_loss} {check}"
)
if check:
if sum_abs_grad_old < 0:
sum_abs_grad_old = scipy.linalg.norm(self.gradient_old, ord=1)
# 2.1 Check sum of absolute gradients as alternative condition.
sum_abs_grad = scipy.linalg.norm(self.gradient, ord=1)
check = sum_abs_grad < sum_abs_grad_old
if is_verbose:
print(
" check sum(|gradient|) < sum(|gradient_old|): "
f"{sum_abs_grad} < {sum_abs_grad_old} {check}"
)
if check:
break
t *= beta
else:
warnings.warn(
(
f"Line search of Newton solver {self.__class__.__name__} at"
f" iteration #{self.iteration} did no converge after 21 line search"
" refinement iterations. It will now resort to lbfgs instead."
),
ConvergenceWarning,
)
if self.verbose:
print(" Line search did not converge and resorts to lbfgs instead.")
self.use_fallback_lbfgs_solve = True
return
self.raw_prediction = raw
if is_verbose:
print(
f" line search successful after {i + 1} iterations with "
f"loss={self.loss_value}."
)
|
Backtracking line search.
Sets:
- self.coef_old
- self.coef
- self.loss_value_old
- self.loss_value
- self.gradient_old
- self.gradient
- self.raw_prediction
|
line_search
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_glm/_newton_solver.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_glm/_newton_solver.py
|
BSD-3-Clause
|
def check_convergence(self, X, y, sample_weight):
"""Check for convergence.
Sets self.converged.
"""
if self.verbose:
print(" Check Convergence")
# Note: Checking maximum relative change of coefficient <= tol is a bad
# convergence criterion because even a large step could have brought us close
# to the true minimum.
# coef_step = self.coef - self.coef_old
# change = np.max(np.abs(coef_step) / np.maximum(1, np.abs(self.coef_old)))
# check = change <= tol
# 1. Criterion: maximum |gradient| <= tol
# The gradient was already updated in line_search()
g_max_abs = np.max(np.abs(self.gradient))
check = g_max_abs <= self.tol
if self.verbose:
print(f" 1. max |gradient| {g_max_abs} <= {self.tol} {check}")
if not check:
return
# 2. Criterion: For Newton decrement d, check 1/2 * d^2 <= tol
# d = sqrt(grad @ hessian^-1 @ grad)
# = sqrt(coef_newton @ hessian @ coef_newton)
# See Boyd, Vanderberghe (2009) "Convex Optimization" Chapter 9.5.1.
d2 = self.coef_newton @ self.hessian @ self.coef_newton
check = 0.5 * d2 <= self.tol
if self.verbose:
print(f" 2. Newton decrement {0.5 * d2} <= {self.tol} {check}")
if not check:
return
if self.verbose:
loss_value = self.linear_loss.loss(
coef=self.coef,
X=X,
y=y,
sample_weight=sample_weight,
l2_reg_strength=self.l2_reg_strength,
n_threads=self.n_threads,
)
print(f" Solver did converge at loss = {loss_value}.")
self.converged = True
|
Check for convergence.
Sets self.converged.
|
check_convergence
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_glm/_newton_solver.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_glm/_newton_solver.py
|
BSD-3-Clause
|
def solve(self, X, y, sample_weight):
"""Solve the optimization problem.
This is the main routine.
Order of calls:
self.setup()
while iteration:
self.update_gradient_hessian()
self.inner_solve()
self.line_search()
self.check_convergence()
self.finalize()
Returns
-------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Solution of the optimization problem.
"""
# setup usually:
# - initializes self.coef if needed
# - initializes and calculates self.raw_predictions, self.loss_value
self.setup(X=X, y=y, sample_weight=sample_weight)
self.iteration = 1
self.converged = False
self.use_fallback_lbfgs_solve = False
while self.iteration <= self.max_iter and not self.converged:
if self.verbose:
print(f"Newton iter={self.iteration}")
self.use_fallback_lbfgs_solve = False # Fallback solver.
# 1. Update Hessian and gradient
self.update_gradient_hessian(X=X, y=y, sample_weight=sample_weight)
# TODO:
# if iteration == 1:
# We might stop early, e.g. we already are close to the optimum,
# usually detected by zero gradients at this stage.
# 2. Inner solver
# Calculate Newton step/direction
# This usually sets self.coef_newton and self.gradient_times_newton.
self.inner_solve(X=X, y=y, sample_weight=sample_weight)
if self.use_fallback_lbfgs_solve:
break
# 3. Backtracking line search
# This usually sets self.coef_old, self.coef, self.loss_value_old
# self.loss_value, self.gradient_old, self.gradient,
# self.raw_prediction.
self.line_search(X=X, y=y, sample_weight=sample_weight)
if self.use_fallback_lbfgs_solve:
break
# 4. Check convergence
# Sets self.converged.
self.check_convergence(X=X, y=y, sample_weight=sample_weight)
# 5. Next iteration
self.iteration += 1
if not self.converged:
if self.use_fallback_lbfgs_solve:
# Note: The fallback solver circumvents check_convergence and relies on
# the convergence checks of lbfgs instead. Enough warnings have been
# raised on the way.
self.fallback_lbfgs_solve(X=X, y=y, sample_weight=sample_weight)
else:
warnings.warn(
(
f"Newton solver did not converge after {self.iteration - 1} "
"iterations."
),
ConvergenceWarning,
)
self.iteration -= 1
self.finalize(X=X, y=y, sample_weight=sample_weight)
return self.coef
|
Solve the optimization problem.
This is the main routine.
Order of calls:
self.setup()
while iteration:
self.update_gradient_hessian()
self.inner_solve()
self.line_search()
self.check_convergence()
self.finalize()
Returns
-------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Solution of the optimization problem.
|
solve
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_glm/_newton_solver.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_glm/_newton_solver.py
|
BSD-3-Clause
|
def glm_dataset(global_random_seed, request):
"""Dataset with GLM solutions, well conditioned X.
This is inspired by ols_ridge_dataset in test_ridge.py.
The construction is based on the SVD decomposition of X = U S V'.
Parameters
----------
type : {"long", "wide"}
If "long", then n_samples > n_features.
If "wide", then n_features > n_samples.
model : a GLM model
For "wide", we return the minimum norm solution:
min ||w||_2 subject to w = argmin deviance(X, y, w)
Note that the deviance is always minimized if y = inverse_link(X w) is possible to
achieve, which it is in the wide data case. Therefore, we can construct the
solution with minimum norm like (wide) OLS:
min ||w||_2 subject to link(y) = raw_prediction = X w
Returns
-------
model : GLM model
X : ndarray
Last column of 1, i.e. intercept.
y : ndarray
coef_unpenalized : ndarray
Minimum norm solutions, i.e. min sum(loss(w)) (with minimum ||w||_2 in
case of ambiguity)
Last coefficient is intercept.
coef_penalized : ndarray
GLM solution with alpha=l2_reg_strength=1, i.e.
min 1/n * sum(loss) + ||w[:-1]||_2^2.
Last coefficient is intercept.
l2_reg_strength : float
Always equal 1.
"""
data_type, model = request.param
# Make larger dim more than double as big as the smaller one.
# This helps when constructing singular matrices like (X, X).
if data_type == "long":
n_samples, n_features = 12, 4
else:
n_samples, n_features = 4, 12
k = min(n_samples, n_features)
rng = np.random.RandomState(global_random_seed)
X = make_low_rank_matrix(
n_samples=n_samples,
n_features=n_features,
effective_rank=k,
tail_strength=0.1,
random_state=rng,
)
X[:, -1] = 1 # last columns acts as intercept
U, s, Vt = linalg.svd(X, full_matrices=False)
assert np.all(s > 1e-3) # to be sure
assert np.max(s) / np.min(s) < 100 # condition number of X
if data_type == "long":
coef_unpenalized = rng.uniform(low=1, high=3, size=n_features)
coef_unpenalized *= rng.choice([-1, 1], size=n_features)
raw_prediction = X @ coef_unpenalized
else:
raw_prediction = rng.uniform(low=-3, high=3, size=n_samples)
# minimum norm solution min ||w||_2 such that raw_prediction = X w:
# w = X'(XX')^-1 raw_prediction = V s^-1 U' raw_prediction
coef_unpenalized = Vt.T @ np.diag(1 / s) @ U.T @ raw_prediction
linear_loss = LinearModelLoss(base_loss=model._get_loss(), fit_intercept=True)
sw = np.full(shape=n_samples, fill_value=1 / n_samples)
y = linear_loss.base_loss.link.inverse(raw_prediction)
# Add penalty l2_reg_strength * ||coef||_2^2 for l2_reg_strength=1 and solve with
# optimizer. Note that the problem is well conditioned such that we get accurate
# results.
l2_reg_strength = 1
fun = partial(
linear_loss.loss,
X=X[:, :-1],
y=y,
sample_weight=sw,
l2_reg_strength=l2_reg_strength,
)
grad = partial(
linear_loss.gradient,
X=X[:, :-1],
y=y,
sample_weight=sw,
l2_reg_strength=l2_reg_strength,
)
coef_penalized_with_intercept = _special_minimize(
fun, grad, coef_unpenalized, tol_NM=1e-6, tol=1e-14
)
linear_loss = LinearModelLoss(base_loss=model._get_loss(), fit_intercept=False)
fun = partial(
linear_loss.loss,
X=X[:, :-1],
y=y,
sample_weight=sw,
l2_reg_strength=l2_reg_strength,
)
grad = partial(
linear_loss.gradient,
X=X[:, :-1],
y=y,
sample_weight=sw,
l2_reg_strength=l2_reg_strength,
)
coef_penalized_without_intercept = _special_minimize(
fun, grad, coef_unpenalized[:-1], tol_NM=1e-6, tol=1e-14
)
# To be sure
assert np.linalg.norm(coef_penalized_with_intercept) < np.linalg.norm(
coef_unpenalized
)
return (
model,
X,
y,
coef_unpenalized,
coef_penalized_with_intercept,
coef_penalized_without_intercept,
l2_reg_strength,
)
|
Dataset with GLM solutions, well conditioned X.
This is inspired by ols_ridge_dataset in test_ridge.py.
The construction is based on the SVD decomposition of X = U S V'.
Parameters
----------
type : {"long", "wide"}
If "long", then n_samples > n_features.
If "wide", then n_features > n_samples.
model : a GLM model
For "wide", we return the minimum norm solution:
min ||w||_2 subject to w = argmin deviance(X, y, w)
Note that the deviance is always minimized if y = inverse_link(X w) is possible to
achieve, which it is in the wide data case. Therefore, we can construct the
solution with minimum norm like (wide) OLS:
min ||w||_2 subject to link(y) = raw_prediction = X w
Returns
-------
model : GLM model
X : ndarray
Last column of 1, i.e. intercept.
y : ndarray
coef_unpenalized : ndarray
Minimum norm solutions, i.e. min sum(loss(w)) (with minimum ||w||_2 in
case of ambiguity)
Last coefficient is intercept.
coef_penalized : ndarray
GLM solution with alpha=l2_reg_strength=1, i.e.
min 1/n * sum(loss) + ||w[:-1]||_2^2.
Last coefficient is intercept.
l2_reg_strength : float
Always equal 1.
|
glm_dataset
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_glm/tests/test_glm.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_glm/tests/test_glm.py
|
BSD-3-Clause
|
def test_glm_regression(solver, fit_intercept, glm_dataset):
"""Test that GLM converges for all solvers to correct solution.
We work with a simple constructed data set with known solution.
"""
model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset
params = dict(
alpha=alpha,
fit_intercept=fit_intercept,
solver=solver,
tol=1e-12,
max_iter=1000,
)
model = clone(model).set_params(**params)
X = X[:, :-1] # remove intercept
if fit_intercept:
coef = coef_with_intercept
intercept = coef[-1]
coef = coef[:-1]
else:
coef = coef_without_intercept
intercept = 0
model.fit(X, y)
rtol = 5e-5 if solver == "lbfgs" else 1e-9
assert model.intercept_ == pytest.approx(intercept, rel=rtol)
assert_allclose(model.coef_, coef, rtol=rtol)
# Same with sample_weight.
model = (
clone(model).set_params(**params).fit(X, y, sample_weight=np.ones(X.shape[0]))
)
assert model.intercept_ == pytest.approx(intercept, rel=rtol)
assert_allclose(model.coef_, coef, rtol=rtol)
|
Test that GLM converges for all solvers to correct solution.
We work with a simple constructed data set with known solution.
|
test_glm_regression
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_glm/tests/test_glm.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_glm/tests/test_glm.py
|
BSD-3-Clause
|
def test_glm_regression_hstacked_X(solver, fit_intercept, glm_dataset):
"""Test that GLM converges for all solvers to correct solution on hstacked data.
We work with a simple constructed data set with known solution.
Fit on [X] with alpha is the same as fit on [X, X]/2 with alpha/2.
For long X, [X, X] is still a long but singular matrix.
"""
model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset
n_samples, n_features = X.shape
params = dict(
alpha=alpha / 2,
fit_intercept=fit_intercept,
solver=solver,
tol=1e-12,
max_iter=1000,
)
model = clone(model).set_params(**params)
X = X[:, :-1] # remove intercept
X = 0.5 * np.concatenate((X, X), axis=1)
assert np.linalg.matrix_rank(X) <= min(n_samples, n_features - 1)
if fit_intercept:
coef = coef_with_intercept
intercept = coef[-1]
coef = coef[:-1]
else:
coef = coef_without_intercept
intercept = 0
with warnings.catch_warnings():
# XXX: Investigate if the ConvergenceWarning that can appear in some
# cases should be considered a bug or not. In the mean time we don't
# fail when the assertions below pass irrespective of the presence of
# the warning.
warnings.simplefilter("ignore", ConvergenceWarning)
model.fit(X, y)
rtol = 2e-4 if solver == "lbfgs" else 5e-9
assert model.intercept_ == pytest.approx(intercept, rel=rtol)
assert_allclose(model.coef_, np.r_[coef, coef], rtol=rtol)
|
Test that GLM converges for all solvers to correct solution on hstacked data.
We work with a simple constructed data set with known solution.
Fit on [X] with alpha is the same as fit on [X, X]/2 with alpha/2.
For long X, [X, X] is still a long but singular matrix.
|
test_glm_regression_hstacked_X
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_glm/tests/test_glm.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_glm/tests/test_glm.py
|
BSD-3-Clause
|
def test_glm_regression_vstacked_X(solver, fit_intercept, glm_dataset):
"""Test that GLM converges for all solvers to correct solution on vstacked data.
We work with a simple constructed data set with known solution.
Fit on [X] with alpha is the same as fit on [X], [y]
[X], [y] with 1 * alpha.
It is the same alpha as the average loss stays the same.
For wide X, [X', X'] is a singular matrix.
"""
model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset
n_samples, n_features = X.shape
params = dict(
alpha=alpha,
fit_intercept=fit_intercept,
solver=solver,
tol=1e-12,
max_iter=1000,
)
model = clone(model).set_params(**params)
X = X[:, :-1] # remove intercept
X = np.concatenate((X, X), axis=0)
assert np.linalg.matrix_rank(X) <= min(n_samples, n_features)
y = np.r_[y, y]
if fit_intercept:
coef = coef_with_intercept
intercept = coef[-1]
coef = coef[:-1]
else:
coef = coef_without_intercept
intercept = 0
model.fit(X, y)
rtol = 3e-5 if solver == "lbfgs" else 5e-9
assert model.intercept_ == pytest.approx(intercept, rel=rtol)
assert_allclose(model.coef_, coef, rtol=rtol)
|
Test that GLM converges for all solvers to correct solution on vstacked data.
We work with a simple constructed data set with known solution.
Fit on [X] with alpha is the same as fit on [X], [y]
[X], [y] with 1 * alpha.
It is the same alpha as the average loss stays the same.
For wide X, [X', X'] is a singular matrix.
|
test_glm_regression_vstacked_X
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_glm/tests/test_glm.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_glm/tests/test_glm.py
|
BSD-3-Clause
|
def test_glm_regression_unpenalized(solver, fit_intercept, glm_dataset):
"""Test that unpenalized GLM converges for all solvers to correct solution.
We work with a simple constructed data set with known solution.
Note: This checks the minimum norm solution for wide X, i.e.
n_samples < n_features:
min ||w||_2 subject to w = argmin deviance(X, y, w)
"""
model, X, y, coef, _, _, _ = glm_dataset
n_samples, n_features = X.shape
alpha = 0 # unpenalized
params = dict(
alpha=alpha,
fit_intercept=fit_intercept,
solver=solver,
tol=1e-12,
max_iter=1000,
)
model = clone(model).set_params(**params)
if fit_intercept:
X = X[:, :-1] # remove intercept
intercept = coef[-1]
coef = coef[:-1]
else:
intercept = 0
with warnings.catch_warnings():
if solver.startswith("newton") and n_samples < n_features:
# The newton solvers should warn and automatically fallback to LBFGS
# in this case. The model should still converge.
warnings.filterwarnings("ignore", category=scipy.linalg.LinAlgWarning)
# XXX: Investigate if the ConvergenceWarning that can appear in some
# cases should be considered a bug or not. In the mean time we don't
# fail when the assertions below pass irrespective of the presence of
# the warning.
warnings.filterwarnings("ignore", category=ConvergenceWarning)
model.fit(X, y)
# FIXME: `assert_allclose(model.coef_, coef)` should work for all cases but fails
# for the wide/fat case with n_features > n_samples. Most current GLM solvers do
# NOT return the minimum norm solution with fit_intercept=True.
if n_samples > n_features:
rtol = 5e-5 if solver == "lbfgs" else 1e-7
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, coef, rtol=rtol)
else:
# As it is an underdetermined problem, prediction = y. The following shows that
# we get a solution, i.e. a (non-unique) minimum of the objective function ...
rtol = 5e-5
if solver == "newton-cholesky":
rtol = 5e-4
assert_allclose(model.predict(X), y, rtol=rtol)
norm_solution = np.linalg.norm(np.r_[intercept, coef])
norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_])
if solver == "newton-cholesky":
# XXX: This solver shows random behaviour. Sometimes it finds solutions
# with norm_model <= norm_solution! So we check conditionally.
if norm_model < (1 + 1e-12) * norm_solution:
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, coef, rtol=rtol)
elif solver == "lbfgs" and fit_intercept:
# But it is not the minimum norm solution. Otherwise the norms would be
# equal.
assert norm_model > (1 + 1e-12) * norm_solution
# See https://github.com/scikit-learn/scikit-learn/issues/23670.
# Note: Even adding a tiny penalty does not give the minimal norm solution.
# XXX: We could have naively expected LBFGS to find the minimal norm
# solution by adding a very small penalty. Even that fails for a reason we
# do not properly understand at this point.
else:
# When `fit_intercept=False`, LBFGS naturally converges to the minimum norm
# solution on this problem.
# XXX: Do we have any theoretical guarantees why this should be the case?
assert model.intercept_ == pytest.approx(intercept, rel=rtol)
assert_allclose(model.coef_, coef, rtol=rtol)
|
Test that unpenalized GLM converges for all solvers to correct solution.
We work with a simple constructed data set with known solution.
Note: This checks the minimum norm solution for wide X, i.e.
n_samples < n_features:
min ||w||_2 subject to w = argmin deviance(X, y, w)
|
test_glm_regression_unpenalized
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_glm/tests/test_glm.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_glm/tests/test_glm.py
|
BSD-3-Clause
|
def test_glm_regression_unpenalized_hstacked_X(solver, fit_intercept, glm_dataset):
"""Test that unpenalized GLM converges for all solvers to correct solution.
We work with a simple constructed data set with known solution.
GLM fit on [X] is the same as fit on [X, X]/2.
For long X, [X, X] is a singular matrix and we check against the minimum norm
solution:
min ||w||_2 subject to w = argmin deviance(X, y, w)
"""
model, X, y, coef, _, _, _ = glm_dataset
n_samples, n_features = X.shape
alpha = 0 # unpenalized
params = dict(
alpha=alpha,
fit_intercept=fit_intercept,
solver=solver,
tol=1e-12,
max_iter=1000,
)
model = clone(model).set_params(**params)
if fit_intercept:
intercept = coef[-1]
coef = coef[:-1]
if n_samples > n_features:
X = X[:, :-1] # remove intercept
X = 0.5 * np.concatenate((X, X), axis=1)
else:
# To know the minimum norm solution, we keep one intercept column and do
# not divide by 2. Later on, we must take special care.
X = np.c_[X[:, :-1], X[:, :-1], X[:, -1]]
else:
intercept = 0
X = 0.5 * np.concatenate((X, X), axis=1)
assert np.linalg.matrix_rank(X) <= min(n_samples, n_features)
with warnings.catch_warnings():
if solver.startswith("newton"):
# The newton solvers should warn and automatically fallback to LBFGS
# in this case. The model should still converge.
warnings.filterwarnings("ignore", category=scipy.linalg.LinAlgWarning)
# XXX: Investigate if the ConvergenceWarning that can appear in some
# cases should be considered a bug or not. In the mean time we don't
# fail when the assertions below pass irrespective of the presence of
# the warning.
warnings.filterwarnings("ignore", category=ConvergenceWarning)
model.fit(X, y)
if fit_intercept and n_samples < n_features:
# Here we take special care.
model_intercept = 2 * model.intercept_
model_coef = 2 * model.coef_[:-1] # exclude the other intercept term.
# For minimum norm solution, we would have
# assert model.intercept_ == pytest.approx(model.coef_[-1])
else:
model_intercept = model.intercept_
model_coef = model.coef_
if n_samples > n_features:
assert model_intercept == pytest.approx(intercept)
rtol = 1e-4
assert_allclose(model_coef, np.r_[coef, coef], rtol=rtol)
else:
# As it is an underdetermined problem, prediction = y. The following shows that
# we get a solution, i.e. a (non-unique) minimum of the objective function ...
rtol = 1e-6 if solver == "lbfgs" else 5e-6
assert_allclose(model.predict(X), y, rtol=rtol)
if (solver == "lbfgs" and fit_intercept) or solver == "newton-cholesky":
# Same as in test_glm_regression_unpenalized.
# But it is not the minimum norm solution. Otherwise the norms would be
# equal.
norm_solution = np.linalg.norm(
0.5 * np.r_[intercept, intercept, coef, coef]
)
norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_])
assert norm_model > (1 + 1e-12) * norm_solution
# For minimum norm solution, we would have
# assert model.intercept_ == pytest.approx(model.coef_[-1])
else:
assert model_intercept == pytest.approx(intercept, rel=5e-6)
assert_allclose(model_coef, np.r_[coef, coef], rtol=1e-4)
|
Test that unpenalized GLM converges for all solvers to correct solution.
We work with a simple constructed data set with known solution.
GLM fit on [X] is the same as fit on [X, X]/2.
For long X, [X, X] is a singular matrix and we check against the minimum norm
solution:
min ||w||_2 subject to w = argmin deviance(X, y, w)
|
test_glm_regression_unpenalized_hstacked_X
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_glm/tests/test_glm.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_glm/tests/test_glm.py
|
BSD-3-Clause
|
def test_glm_regression_unpenalized_vstacked_X(solver, fit_intercept, glm_dataset):
"""Test that unpenalized GLM converges for all solvers to correct solution.
We work with a simple constructed data set with known solution.
GLM fit on [X] is the same as fit on [X], [y]
[X], [y].
For wide X, [X', X'] is a singular matrix and we check against the minimum norm
solution:
min ||w||_2 subject to w = argmin deviance(X, y, w)
"""
model, X, y, coef, _, _, _ = glm_dataset
n_samples, n_features = X.shape
alpha = 0 # unpenalized
params = dict(
alpha=alpha,
fit_intercept=fit_intercept,
solver=solver,
tol=1e-12,
max_iter=1000,
)
model = clone(model).set_params(**params)
if fit_intercept:
X = X[:, :-1] # remove intercept
intercept = coef[-1]
coef = coef[:-1]
else:
intercept = 0
X = np.concatenate((X, X), axis=0)
assert np.linalg.matrix_rank(X) <= min(n_samples, n_features)
y = np.r_[y, y]
with warnings.catch_warnings():
if solver.startswith("newton") and n_samples < n_features:
# The newton solvers should warn and automatically fallback to LBFGS
# in this case. The model should still converge.
warnings.filterwarnings("ignore", category=scipy.linalg.LinAlgWarning)
# XXX: Investigate if the ConvergenceWarning that can appear in some
# cases should be considered a bug or not. In the mean time we don't
# fail when the assertions below pass irrespective of the presence of
# the warning.
warnings.filterwarnings("ignore", category=ConvergenceWarning)
model.fit(X, y)
if n_samples > n_features:
rtol = 5e-5 if solver == "lbfgs" else 1e-6
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, coef, rtol=rtol)
else:
# As it is an underdetermined problem, prediction = y. The following shows that
# we get a solution, i.e. a (non-unique) minimum of the objective function ...
rtol = 1e-6 if solver == "lbfgs" else 5e-6
assert_allclose(model.predict(X), y, rtol=rtol)
norm_solution = np.linalg.norm(np.r_[intercept, coef])
norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_])
if solver == "newton-cholesky":
# XXX: This solver shows random behaviour. Sometimes it finds solutions
# with norm_model <= norm_solution! So we check conditionally.
if not (norm_model > (1 + 1e-12) * norm_solution):
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, coef, rtol=1e-4)
elif solver == "lbfgs" and fit_intercept:
# Same as in test_glm_regression_unpenalized.
# But it is not the minimum norm solution. Otherwise the norms would be
# equal.
assert norm_model > (1 + 1e-12) * norm_solution
else:
rtol = 1e-5 if solver == "newton-cholesky" else 1e-4
assert model.intercept_ == pytest.approx(intercept, rel=rtol)
assert_allclose(model.coef_, coef, rtol=rtol)
|
Test that unpenalized GLM converges for all solvers to correct solution.
We work with a simple constructed data set with known solution.
GLM fit on [X] is the same as fit on [X], [y]
[X], [y].
For wide X, [X', X'] is a singular matrix and we check against the minimum norm
solution:
min ||w||_2 subject to w = argmin deviance(X, y, w)
|
test_glm_regression_unpenalized_vstacked_X
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_glm/tests/test_glm.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_glm/tests/test_glm.py
|
BSD-3-Clause
|
def test_sample_weights_validation():
"""Test the raised errors in the validation of sample_weight."""
# scalar value but not positive
X = [[1]]
y = [1]
weights = 0
glm = _GeneralizedLinearRegressor()
# Positive weights are accepted
glm.fit(X, y, sample_weight=1)
# 2d array
weights = [[0]]
with pytest.raises(ValueError, match="must be 1D array or scalar"):
glm.fit(X, y, weights)
# 1d but wrong length
weights = [1, 0]
msg = r"sample_weight.shape == \(2,\), expected \(1,\)!"
with pytest.raises(ValueError, match=msg):
glm.fit(X, y, weights)
|
Test the raised errors in the validation of sample_weight.
|
test_sample_weights_validation
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_glm/tests/test_glm.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_glm/tests/test_glm.py
|
BSD-3-Clause
|
def test_glm_wrong_y_range(glm):
"""
Test that fitting a GLM model raises a ValueError when `y` contains
values outside the valid range for the given distribution.
Generalized Linear Models (GLMs) with certain distributions, such as
Poisson, Gamma, and Tweedie (with power > 1), require `y` to be
non-negative. This test ensures that passing a `y` array containing
negative values triggers the expected ValueError with the correct message.
"""
y = np.array([-1, 2])
X = np.array([[1], [1]])
msg = r"Some value\(s\) of y are out of the valid range of the loss"
with pytest.raises(ValueError, match=msg):
glm.fit(X, y)
|
Test that fitting a GLM model raises a ValueError when `y` contains
values outside the valid range for the given distribution.
Generalized Linear Models (GLMs) with certain distributions, such as
Poisson, Gamma, and Tweedie (with power > 1), require `y` to be
non-negative. This test ensures that passing a `y` array containing
negative values triggers the expected ValueError with the correct message.
|
test_glm_wrong_y_range
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_glm/tests/test_glm.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_glm/tests/test_glm.py
|
BSD-3-Clause
|
def test_glm_identity_regression(fit_intercept):
"""Test GLM regression with identity link on a simple dataset."""
coef = [1.0, 2.0]
X = np.array([[1, 1, 1, 1, 1], [0, 1, 2, 3, 4]]).T
y = np.dot(X, coef)
glm = _GeneralizedLinearRegressor(
alpha=0,
fit_intercept=fit_intercept,
tol=1e-12,
)
if fit_intercept:
glm.fit(X[:, 1:], y)
assert_allclose(glm.coef_, coef[1:], rtol=1e-10)
assert_allclose(glm.intercept_, coef[0], rtol=1e-10)
else:
glm.fit(X, y)
assert_allclose(glm.coef_, coef, rtol=1e-12)
|
Test GLM regression with identity link on a simple dataset.
|
test_glm_identity_regression
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_glm/tests/test_glm.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_glm/tests/test_glm.py
|
BSD-3-Clause
|
def test_glm_sample_weight_consistency(fit_intercept, alpha, GLMEstimator):
"""Test that the impact of sample_weight is consistent"""
rng = np.random.RandomState(0)
n_samples, n_features = 10, 5
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
glm_params = dict(alpha=alpha, fit_intercept=fit_intercept)
glm = GLMEstimator(**glm_params).fit(X, y)
coef = glm.coef_.copy()
# sample_weight=np.ones(..) should be equivalent to sample_weight=None
sample_weight = np.ones(y.shape)
glm.fit(X, y, sample_weight=sample_weight)
assert_allclose(glm.coef_, coef, rtol=1e-12)
# sample_weight are normalized to 1 so, scaling them has no effect
sample_weight = 2 * np.ones(y.shape)
glm.fit(X, y, sample_weight=sample_weight)
assert_allclose(glm.coef_, coef, rtol=1e-12)
# setting one element of sample_weight to 0 is equivalent to removing
# the corresponding sample
sample_weight = np.ones(y.shape)
sample_weight[-1] = 0
glm.fit(X, y, sample_weight=sample_weight)
coef1 = glm.coef_.copy()
glm.fit(X[:-1], y[:-1])
assert_allclose(glm.coef_, coef1, rtol=1e-12)
# check that multiplying sample_weight by 2 is equivalent
# to repeating corresponding samples twice
X2 = np.concatenate([X, X[: n_samples // 2]], axis=0)
y2 = np.concatenate([y, y[: n_samples // 2]])
sample_weight_1 = np.ones(len(y))
sample_weight_1[: n_samples // 2] = 2
glm1 = GLMEstimator(**glm_params).fit(X, y, sample_weight=sample_weight_1)
glm2 = GLMEstimator(**glm_params).fit(X2, y2, sample_weight=None)
assert_allclose(glm1.coef_, glm2.coef_)
|
Test that the impact of sample_weight is consistent
|
test_glm_sample_weight_consistency
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_glm/tests/test_glm.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_glm/tests/test_glm.py
|
BSD-3-Clause
|
def test_glm_log_regression(solver, fit_intercept, estimator):
"""Test GLM regression with log link on a simple dataset."""
coef = [0.2, -0.1]
X = np.array([[0, 1, 2, 3, 4], [1, 1, 1, 1, 1]]).T
y = np.exp(np.dot(X, coef))
glm = clone(estimator).set_params(
alpha=0,
fit_intercept=fit_intercept,
solver=solver,
tol=1e-8,
)
if fit_intercept:
res = glm.fit(X[:, :-1], y)
assert_allclose(res.coef_, coef[:-1], rtol=1e-6)
assert_allclose(res.intercept_, coef[-1], rtol=1e-6)
else:
res = glm.fit(X, y)
assert_allclose(res.coef_, coef, rtol=2e-6)
|
Test GLM regression with log link on a simple dataset.
|
test_glm_log_regression
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_glm/tests/test_glm.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_glm/tests/test_glm.py
|
BSD-3-Clause
|
def test_warm_start(solver, fit_intercept, global_random_seed):
"""
Test that `warm_start=True` enables incremental fitting in PoissonRegressor.
This test verifies that when using `warm_start=True`, the model continues
optimizing from previous coefficients instead of restarting from scratch.
It ensures that after an initial fit with `max_iter=1`, the model has a
higher objective function value (indicating incomplete optimization).
The test then checks whether allowing additional iterations enables
convergence to a solution comparable to a fresh training run (`warm_start=False`).
"""
n_samples, n_features = 100, 10
X, y = make_regression(
n_samples=n_samples,
n_features=n_features,
n_informative=n_features - 2,
bias=fit_intercept * 1.0,
noise=1.0,
random_state=global_random_seed,
)
y = np.abs(y) # Poisson requires non-negative targets.
alpha = 1
params = {
"solver": solver,
"fit_intercept": fit_intercept,
"tol": 1e-10,
}
glm1 = PoissonRegressor(warm_start=False, max_iter=1000, alpha=alpha, **params)
glm1.fit(X, y)
glm2 = PoissonRegressor(warm_start=True, max_iter=1, alpha=alpha, **params)
# As we intentionally set max_iter=1 such that the solver should raise a
# ConvergenceWarning.
with pytest.warns(ConvergenceWarning):
glm2.fit(X, y)
linear_loss = LinearModelLoss(
base_loss=glm1._get_loss(),
fit_intercept=fit_intercept,
)
sw = np.full_like(y, fill_value=1 / n_samples)
objective_glm1 = linear_loss.loss(
coef=np.r_[glm1.coef_, glm1.intercept_] if fit_intercept else glm1.coef_,
X=X,
y=y,
sample_weight=sw,
l2_reg_strength=alpha,
)
objective_glm2 = linear_loss.loss(
coef=np.r_[glm2.coef_, glm2.intercept_] if fit_intercept else glm2.coef_,
X=X,
y=y,
sample_weight=sw,
l2_reg_strength=alpha,
)
assert objective_glm1 < objective_glm2
glm2.set_params(max_iter=1000)
glm2.fit(X, y)
# The two models are not exactly identical since the lbfgs solver
# computes the approximate hessian from previous iterations, which
# will not be strictly identical in the case of a warm start.
rtol = 2e-4 if solver == "lbfgs" else 1e-9
assert_allclose(glm1.coef_, glm2.coef_, rtol=rtol)
assert_allclose(glm1.score(X, y), glm2.score(X, y), rtol=1e-5)
|
Test that `warm_start=True` enables incremental fitting in PoissonRegressor.
This test verifies that when using `warm_start=True`, the model continues
optimizing from previous coefficients instead of restarting from scratch.
It ensures that after an initial fit with `max_iter=1`, the model has a
higher objective function value (indicating incomplete optimization).
The test then checks whether allowing additional iterations enables
convergence to a solution comparable to a fresh training run (`warm_start=False`).
|
test_warm_start
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_glm/tests/test_glm.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_glm/tests/test_glm.py
|
BSD-3-Clause
|
def test_normal_ridge_comparison(
n_samples, n_features, fit_intercept, sample_weight, request
):
"""Compare with Ridge regression for Normal distributions."""
test_size = 10
X, y = make_regression(
n_samples=n_samples + test_size,
n_features=n_features,
n_informative=n_features - 2,
noise=0.5,
random_state=42,
)
if n_samples > n_features:
ridge_params = {"solver": "svd"}
else:
ridge_params = {"solver": "saga", "max_iter": 1000000, "tol": 1e-7}
(
X_train,
X_test,
y_train,
y_test,
) = train_test_split(X, y, test_size=test_size, random_state=0)
alpha = 1.0
if sample_weight is None:
sw_train = None
alpha_ridge = alpha * n_samples
else:
sw_train = np.random.RandomState(0).rand(len(y_train))
alpha_ridge = alpha * sw_train.sum()
# GLM has 1/(2*n) * Loss + 1/2*L2, Ridge has Loss + L2
ridge = Ridge(
alpha=alpha_ridge,
random_state=42,
fit_intercept=fit_intercept,
**ridge_params,
)
ridge.fit(X_train, y_train, sample_weight=sw_train)
glm = _GeneralizedLinearRegressor(
alpha=alpha,
fit_intercept=fit_intercept,
max_iter=300,
tol=1e-5,
)
glm.fit(X_train, y_train, sample_weight=sw_train)
assert glm.coef_.shape == (X.shape[1],)
assert_allclose(glm.coef_, ridge.coef_, atol=5e-5)
assert_allclose(glm.intercept_, ridge.intercept_, rtol=1e-5)
assert_allclose(glm.predict(X_train), ridge.predict(X_train), rtol=2e-4)
assert_allclose(glm.predict(X_test), ridge.predict(X_test), rtol=2e-4)
|
Compare with Ridge regression for Normal distributions.
|
test_normal_ridge_comparison
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_glm/tests/test_glm.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_glm/tests/test_glm.py
|
BSD-3-Clause
|
def test_poisson_glmnet(solver):
"""Compare Poisson regression with L2 regularization and LogLink to glmnet"""
# library("glmnet")
# options(digits=10)
# df <- data.frame(a=c(-2,-1,1,2), b=c(0,0,1,1), y=c(0,1,1,2))
# x <- data.matrix(df[,c("a", "b")])
# y <- df$y
# fit <- glmnet(x=x, y=y, alpha=0, intercept=T, family="poisson",
# standardize=F, thresh=1e-10, nlambda=10000)
# coef(fit, s=1)
# (Intercept) -0.12889386979
# a 0.29019207995
# b 0.03741173122
X = np.array([[-2, -1, 1, 2], [0, 0, 1, 1]]).T
y = np.array([0, 1, 1, 2])
glm = PoissonRegressor(
alpha=1,
fit_intercept=True,
tol=1e-7,
max_iter=300,
solver=solver,
)
glm.fit(X, y)
assert_allclose(glm.intercept_, -0.12889386979, rtol=1e-5)
assert_allclose(glm.coef_, [0.29019207995, 0.03741173122], rtol=1e-5)
|
Compare Poisson regression with L2 regularization and LogLink to glmnet
|
test_poisson_glmnet
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_glm/tests/test_glm.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_glm/tests/test_glm.py
|
BSD-3-Clause
|
def test_tweedie_link_argument(name, link_class):
"""Test GLM link argument set as string."""
y = np.array([0.1, 0.5]) # in range of all distributions
X = np.array([[1], [2]])
glm = TweedieRegressor(power=1, link=name).fit(X, y)
assert isinstance(glm._base_loss.link, link_class)
|
Test GLM link argument set as string.
|
test_tweedie_link_argument
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_glm/tests/test_glm.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_glm/tests/test_glm.py
|
BSD-3-Clause
|
def test_tweedie_link_auto(power, expected_link_class):
"""Test that link='auto' delivers the expected link function"""
y = np.array([0.1, 0.5]) # in range of all distributions
X = np.array([[1], [2]])
glm = TweedieRegressor(link="auto", power=power).fit(X, y)
assert isinstance(glm._base_loss.link, expected_link_class)
|
Test that link='auto' delivers the expected link function
|
test_tweedie_link_auto
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_glm/tests/test_glm.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_glm/tests/test_glm.py
|
BSD-3-Clause
|
def test_tweedie_score(regression_data, power, link):
"""Test that GLM score equals d2_tweedie_score for Tweedie losses."""
X, y = regression_data
# make y positive
y = np.abs(y) + 1.0
glm = TweedieRegressor(power=power, link=link).fit(X, y)
assert glm.score(X, y) == pytest.approx(
d2_tweedie_score(y, glm.predict(X), power=power)
)
|
Test that GLM score equals d2_tweedie_score for Tweedie losses.
|
test_tweedie_score
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_glm/tests/test_glm.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_glm/tests/test_glm.py
|
BSD-3-Clause
|
def test_linalg_warning_with_newton_solver(global_random_seed):
"""
Test that the Newton solver raises a warning and falls back to LBFGS when
encountering a singular or ill-conditioned Hessian matrix.
This test assess the behavior of `PoissonRegressor` with the "newton-cholesky"
solver.
It verifies the following:-
- The model significantly improves upon the constant baseline deviance.
- LBFGS remains robust on collinear data.
- The Newton solver raises a `LinAlgWarning` on collinear data and falls
back to LBFGS.
"""
newton_solver = "newton-cholesky"
rng = np.random.RandomState(global_random_seed)
# Use at least 20 samples to reduce the likelihood of getting a degenerate
# dataset for any global_random_seed.
X_orig = rng.normal(size=(20, 3))
y = rng.poisson(
np.exp(X_orig @ np.ones(X_orig.shape[1])), size=X_orig.shape[0]
).astype(np.float64)
# Collinear variation of the same input features.
X_collinear = np.hstack([X_orig] * 10)
# Let's consider the deviance of a constant baseline on this problem.
baseline_pred = np.full_like(y, y.mean())
constant_model_deviance = mean_poisson_deviance(y, baseline_pred)
assert constant_model_deviance > 1.0
# No warning raised on well-conditioned design, even without regularization.
tol = 1e-10
with warnings.catch_warnings():
warnings.simplefilter("error")
reg = PoissonRegressor(solver=newton_solver, alpha=0.0, tol=tol).fit(X_orig, y)
original_newton_deviance = mean_poisson_deviance(y, reg.predict(X_orig))
# On this dataset, we should have enough data points to not make it
# possible to get a near zero deviance (for the any of the admissible
# random seeds). This will make it easier to interpret meaning of rtol in
# the subsequent assertions:
assert original_newton_deviance > 0.2
# We check that the model could successfully fit information in X_orig to
# improve upon the constant baseline by a large margin (when evaluated on
# the traing set).
assert constant_model_deviance - original_newton_deviance > 0.1
# LBFGS is robust to a collinear design because its approximation of the
# Hessian is Symmeric Positive Definite by construction. Let's record its
# solution
with warnings.catch_warnings():
warnings.simplefilter("error")
reg = PoissonRegressor(solver="lbfgs", alpha=0.0, tol=tol).fit(X_collinear, y)
collinear_lbfgs_deviance = mean_poisson_deviance(y, reg.predict(X_collinear))
# The LBFGS solution on the collinear is expected to reach a comparable
# solution to the Newton solution on the original data.
rtol = 1e-6
assert collinear_lbfgs_deviance == pytest.approx(original_newton_deviance, rel=rtol)
# Fitting a Newton solver on the collinear version of the training data
# without regularization should raise an informative warning and fallback
# to the LBFGS solver.
msg = (
"The inner solver of .*Newton.*Solver stumbled upon a singular or very "
"ill-conditioned Hessian matrix"
)
with pytest.warns(scipy.linalg.LinAlgWarning, match=msg):
reg = PoissonRegressor(solver=newton_solver, alpha=0.0, tol=tol).fit(
X_collinear, y
)
# As a result we should still automatically converge to a good solution.
collinear_newton_deviance = mean_poisson_deviance(y, reg.predict(X_collinear))
assert collinear_newton_deviance == pytest.approx(
original_newton_deviance, rel=rtol
)
# Increasing the regularization slightly should make the problem go away:
with warnings.catch_warnings():
warnings.simplefilter("error", scipy.linalg.LinAlgWarning)
reg = PoissonRegressor(solver=newton_solver, alpha=1e-10).fit(X_collinear, y)
# The slightly penalized model on the collinear data should be close enough
# to the unpenalized model on the original data.
penalized_collinear_newton_deviance = mean_poisson_deviance(
y, reg.predict(X_collinear)
)
assert penalized_collinear_newton_deviance == pytest.approx(
original_newton_deviance, rel=rtol
)
|
Test that the Newton solver raises a warning and falls back to LBFGS when
encountering a singular or ill-conditioned Hessian matrix.
This test assess the behavior of `PoissonRegressor` with the "newton-cholesky"
solver.
It verifies the following:-
- The model significantly improves upon the constant baseline deviance.
- LBFGS remains robust on collinear data.
- The Newton solver raises a `LinAlgWarning` on collinear data and falls
back to LBFGS.
|
test_linalg_warning_with_newton_solver
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_glm/tests/test_glm.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_glm/tests/test_glm.py
|
BSD-3-Clause
|
def test_newton_solver_verbosity(capsys, verbose):
"""Test the std output of verbose newton solvers."""
y = np.array([1, 2], dtype=float)
X = np.array([[1.0, 0], [0, 1]], dtype=float)
linear_loss = LinearModelLoss(base_loss=HalfPoissonLoss(), fit_intercept=False)
sol = NewtonCholeskySolver(
coef=linear_loss.init_zero_coef(X),
linear_loss=linear_loss,
l2_reg_strength=0,
verbose=verbose,
)
sol.solve(X, y, None) # returns array([0., 0.69314758])
captured = capsys.readouterr()
if verbose == 0:
assert captured.out == ""
else:
msg = [
"Newton iter=1",
"Check Convergence",
"1. max |gradient|",
"2. Newton decrement",
"Solver did converge at loss = ",
]
for m in msg:
assert m in captured.out
if verbose >= 2:
msg = ["Backtracking Line Search", "line search iteration="]
for m in msg:
assert m in captured.out
# Set the Newton solver to a state with a completely wrong Newton step.
sol = NewtonCholeskySolver(
coef=linear_loss.init_zero_coef(X),
linear_loss=linear_loss,
l2_reg_strength=0,
verbose=verbose,
)
sol.setup(X=X, y=y, sample_weight=None)
sol.iteration = 1
sol.update_gradient_hessian(X=X, y=y, sample_weight=None)
sol.coef_newton = np.array([1.0, 0])
sol.gradient_times_newton = sol.gradient @ sol.coef_newton
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
sol.line_search(X=X, y=y, sample_weight=None)
captured = capsys.readouterr()
if verbose >= 1:
assert (
"Line search did not converge and resorts to lbfgs instead." in captured.out
)
# Set the Newton solver to a state with bad Newton step such that the loss
# improvement in line search is tiny.
sol = NewtonCholeskySolver(
coef=np.array([1e-12, 0.69314758]),
linear_loss=linear_loss,
l2_reg_strength=0,
verbose=verbose,
)
sol.setup(X=X, y=y, sample_weight=None)
sol.iteration = 1
sol.update_gradient_hessian(X=X, y=y, sample_weight=None)
sol.coef_newton = np.array([1e-6, 0])
sol.gradient_times_newton = sol.gradient @ sol.coef_newton
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
sol.line_search(X=X, y=y, sample_weight=None)
captured = capsys.readouterr()
if verbose >= 2:
msg = [
"line search iteration=",
"check loss improvement <= armijo term:",
"check loss |improvement| <= eps * |loss_old|:",
"check sum(|gradient|) < sum(|gradient_old|):",
]
for m in msg:
assert m in captured.out
# Test for a case with negative hessian. We badly initialize coef for a Tweedie
# loss with non-canonical link, e.g. Inverse Gaussian deviance with a log link.
linear_loss = LinearModelLoss(
base_loss=HalfTweedieLoss(power=3), fit_intercept=False
)
sol = NewtonCholeskySolver(
coef=linear_loss.init_zero_coef(X) + 1,
linear_loss=linear_loss,
l2_reg_strength=0,
verbose=verbose,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
sol.solve(X, y, None)
captured = capsys.readouterr()
if verbose >= 1:
assert (
"The inner solver detected a pointwise Hessian with many negative values"
" and resorts to lbfgs instead." in captured.out
)
|
Test the std output of verbose newton solvers.
|
test_newton_solver_verbosity
|
python
|
scikit-learn/scikit-learn
|
sklearn/linear_model/_glm/tests/test_glm.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/_glm/tests/test_glm.py
|
BSD-3-Clause
|
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Reconstruction error.
Notes
-----
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_**2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.eigenvalues_
return np.sqrt(np.sum(G_center**2) - np.sum(evals**2)) / G.shape[0]
|
Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Reconstruction error.
Notes
-----
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
|
reconstruction_error
|
python
|
scikit-learn/scikit-learn
|
sklearn/manifold/_isomap.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/manifold/_isomap.py
|
BSD-3-Clause
|
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_queries, n_features)
If neighbors_algorithm='precomputed', X is assumed to be a
distance matrix or a sparse graph of shape
(n_queries, n_samples_fit).
Returns
-------
X_new : array-like, shape (n_queries, n_components)
X transformed in the new space.
"""
check_is_fitted(self)
if self.n_neighbors is not None:
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
else:
distances, indices = self.nbrs_.radius_neighbors(X, return_distance=True)
# Create the graph of shortest distances from X to
# training data via the nearest neighbors of X.
# This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
n_samples_fit = self.nbrs_.n_samples_fit_
n_queries = distances.shape[0]
if hasattr(X, "dtype") and X.dtype == np.float32:
dtype = np.float32
else:
dtype = np.float64
G_X = np.zeros((n_queries, n_samples_fit), dtype)
for i in range(n_queries):
G_X[i] = np.min(self.dist_matrix_[indices[i]] + distances[i][:, None], 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
|
Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_queries, n_features)
If neighbors_algorithm='precomputed', X is assumed to be a
distance matrix or a sparse graph of shape
(n_queries, n_samples_fit).
Returns
-------
X_new : array-like, shape (n_queries, n_components)
X transformed in the new space.
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/manifold/_isomap.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/manifold/_isomap.py
|
BSD-3-Clause
|
def barycenter_weights(X, Y, indices, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[indices] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Y : array-like, shape (n_samples, n_dim)
indices : array-like, shape (n_samples, n_dim)
Indices of the points in Y used to compute the barycenter
reg : float, default=1e-3
Amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Y = check_array(Y, dtype=FLOAT_DTYPES)
indices = check_array(indices, dtype=int)
n_samples, n_neighbors = indices.shape
assert X.shape[0] == n_samples
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, ind in enumerate(indices):
A = Y[ind]
C = A - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[:: n_neighbors + 1] += R
w = solve(G, v, assume_a="pos")
B[i, :] = w / np.sum(w)
return B
|
Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[indices] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Y : array-like, shape (n_samples, n_dim)
indices : array-like, shape (n_samples, n_dim)
Indices of the points in Y used to compute the barycenter
reg : float, default=1e-3
Amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
|
barycenter_weights
|
python
|
scikit-learn/scikit-learn
|
sklearn/manifold/_locally_linear.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/manifold/_locally_linear.py
|
BSD-3-Clause
|
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3, n_jobs=None):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array or a NearestNeighbors object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, default=1e-3
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
n_jobs : int or None, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See Also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors=n_neighbors + 1, n_jobs=n_jobs).fit(X)
X = knn._fit_X
n_samples = knn.n_samples_fit_
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X, ind, reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr), shape=(n_samples, n_samples))
|
Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array or a NearestNeighbors object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, default=1e-3
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
n_jobs : int or None, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See Also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
|
barycenter_kneighbors_graph
|
python
|
scikit-learn/scikit-learn
|
sklearn/manifold/_locally_linear.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/manifold/_locally_linear.py
|
BSD-3-Clause
|
def null_space(
M, k, k_skip=1, eigen_solver="arpack", tol=1e-6, max_iter=100, random_state=None
):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : int
Number of eigenvalues/vectors to return
k_skip : int, default=1
Number of low eigenvalues to skip.
eigen_solver : {'auto', 'arpack', 'dense'}, default='arpack'
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, default=1e-6
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : int, default=100
Maximum number of iterations for 'arpack' method.
Not used if eigen_solver=='dense'
random_state : int, RandomState instance, default=None
Determines the random number generator when ``solver`` == 'arpack'.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
"""
if eigen_solver == "auto":
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = "arpack"
else:
eigen_solver = "dense"
if eigen_solver == "arpack":
v0 = _init_arpack_v0(M.shape[0], random_state)
try:
eigen_values, eigen_vectors = eigsh(
M, k + k_skip, sigma=0.0, tol=tol, maxiter=max_iter, v0=v0
)
except RuntimeError as e:
raise ValueError(
"Error in determining null-space with ARPACK. Error message: "
"'%s'. Note that eigen_solver='arpack' can fail when the "
"weight matrix is singular or otherwise ill-behaved. In that "
"case, eigen_solver='dense' is recommended. See online "
"documentation for more information." % e
) from e
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == "dense":
if hasattr(M, "toarray"):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, subset_by_index=(k_skip, k + k_skip - 1), overwrite_a=True
)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
|
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : int
Number of eigenvalues/vectors to return
k_skip : int, default=1
Number of low eigenvalues to skip.
eigen_solver : {'auto', 'arpack', 'dense'}, default='arpack'
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, default=1e-6
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : int, default=100
Maximum number of iterations for 'arpack' method.
Not used if eigen_solver=='dense'
random_state : int, RandomState instance, default=None
Determines the random number generator when ``solver`` == 'arpack'.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
|
null_space
|
python
|
scikit-learn/scikit-learn
|
sklearn/manifold/_locally_linear.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/manifold/_locally_linear.py
|
BSD-3-Clause
|
def locally_linear_embedding(
X,
*,
n_neighbors,
n_components,
reg=1e-3,
eigen_solver="auto",
tol=1e-6,
max_iter=100,
method="standard",
hessian_tol=1e-4,
modified_tol=1e-12,
random_state=None,
n_jobs=None,
):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array or a NearestNeighbors object.
n_neighbors : int
Number of neighbors to consider for each point.
n_components : int
Number of coordinates for the manifold.
reg : float, default=1e-3
Regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : {'auto', 'arpack', 'dense'}, default='auto'
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, default=1e-6
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : int, default=100
Maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}, default='standard'
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, default=1e-4
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'.
modified_tol : float, default=1e-12
Tolerance for modified LLE method.
Only used if method == 'modified'.
random_state : int, RandomState instance, default=None
Determines the random number generator when ``solver`` == 'arpack'.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
n_jobs : int or None, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
Y : ndarray of shape (n_samples, n_components)
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).
.. [2] Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.
<https://citeseerx.ist.psu.edu/doc_view/pid/0b060fdbd92cbcc66b383bcaa9ba5e5e624d7ee3>`_
.. [4] Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.manifold import locally_linear_embedding
>>> X, _ = load_digits(return_X_y=True)
>>> X.shape
(1797, 64)
>>> embedding, _ = locally_linear_embedding(X[:100],n_neighbors=5, n_components=2)
>>> embedding.shape
(100, 2)
"""
return _locally_linear_embedding(
X=X,
n_neighbors=n_neighbors,
n_components=n_components,
reg=reg,
eigen_solver=eigen_solver,
tol=tol,
max_iter=max_iter,
method=method,
hessian_tol=hessian_tol,
modified_tol=modified_tol,
random_state=random_state,
n_jobs=n_jobs,
)
|
Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array or a NearestNeighbors object.
n_neighbors : int
Number of neighbors to consider for each point.
n_components : int
Number of coordinates for the manifold.
reg : float, default=1e-3
Regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : {'auto', 'arpack', 'dense'}, default='auto'
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, default=1e-6
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : int, default=100
Maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}, default='standard'
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, default=1e-4
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'.
modified_tol : float, default=1e-12
Tolerance for modified LLE method.
Only used if method == 'modified'.
random_state : int, RandomState instance, default=None
Determines the random number generator when ``solver`` == 'arpack'.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
n_jobs : int or None, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
Y : ndarray of shape (n_samples, n_components)
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).
.. [2] Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.
<https://citeseerx.ist.psu.edu/doc_view/pid/0b060fdbd92cbcc66b383bcaa9ba5e5e624d7ee3>`_
.. [4] Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.manifold import locally_linear_embedding
>>> X, _ = load_digits(return_X_y=True)
>>> X.shape
(1797, 64)
>>> embedding, _ = locally_linear_embedding(X[:100],n_neighbors=5, n_components=2)
>>> embedding.shape
(100, 2)
|
locally_linear_embedding
|
python
|
scikit-learn/scikit-learn
|
sklearn/manifold/_locally_linear.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/manifold/_locally_linear.py
|
BSD-3-Clause
|
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training set.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Returns the instance itself.
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs).
"""
check_is_fitted(self)
X = validate_data(self, X, reset=False)
ind = self.nbrs_.kneighbors(
X, n_neighbors=self.n_neighbors, return_distance=False
)
weights = barycenter_weights(X, self.nbrs_._fit_X, ind, reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
|
Transform new points into embedding space.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training set.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Returns the instance itself.
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs).
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/manifold/_locally_linear.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/manifold/_locally_linear.py
|
BSD-3-Clause
|
def _smacof_single(
dissimilarities,
metric=True,
n_components=2,
init=None,
max_iter=300,
verbose=0,
eps=1e-6,
random_state=None,
normalized_stress=False,
):
"""Computes multidimensional scaling using SMACOF algorithm.
Parameters
----------
dissimilarities : ndarray of shape (n_samples, n_samples)
Pairwise dissimilarities between the points. Must be symmetric.
metric : bool, default=True
Compute metric or nonmetric SMACOF algorithm.
When ``False`` (i.e. non-metric MDS), dissimilarities with 0 are considered as
missing values.
n_components : int, default=2
Number of dimensions in which to immerse the dissimilarities. If an
``init`` array is provided, this option is overridden and the shape of
``init`` is used to determine the dimensionality of the embedding
space.
init : ndarray of shape (n_samples, n_components), default=None
Starting configuration of the embedding to initialize the algorithm. By
default, the algorithm is initialized with a randomly chosen array.
max_iter : int, default=300
Maximum number of iterations of the SMACOF algorithm for a single run.
verbose : int, default=0
Level of verbosity.
eps : float, default=1e-6
The tolerance with respect to stress (normalized by the sum of squared
embedding distances) at which to declare convergence.
.. versionchanged:: 1.7
The default value for `eps` has changed from 1e-3 to 1e-6, as a result
of a bugfix in the computation of the convergence criterion.
random_state : int, RandomState instance or None, default=None
Determines the random number generator used to initialize the centers.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
normalized_stress : bool, default=False
Whether to return normalized stress value (Stress-1) instead of raw
stress.
.. versionadded:: 1.2
.. versionchanged:: 1.7
Normalized stress is now supported for metric MDS as well.
Returns
-------
X : ndarray of shape (n_samples, n_components)
Coordinates of the points in a ``n_components``-space.
stress : float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points).
If `normalized_stress=True`, returns Stress-1.
A value of 0 indicates "perfect" fit, 0.025 excellent, 0.05 good,
0.1 fair, and 0.2 poor [1]_.
n_iter : int
The number of iterations corresponding to the best stress.
References
----------
.. [1] "Nonmetric multidimensional scaling: a numerical method" Kruskal, J.
Psychometrika, 29 (1964)
.. [2] "Multidimensional scaling by optimizing goodness of fit to a nonmetric
hypothesis" Kruskal, J. Psychometrika, 29, (1964)
.. [3] "Modern Multidimensional Scaling - Theory and Applications" Borg, I.;
Groenen P. Springer Series in Statistics (1997)
"""
dissimilarities = check_symmetric(dissimilarities, raise_exception=True)
n_samples = dissimilarities.shape[0]
random_state = check_random_state(random_state)
dissimilarities_flat = ((1 - np.tri(n_samples)) * dissimilarities).ravel()
dissimilarities_flat_w = dissimilarities_flat[dissimilarities_flat != 0]
if init is None:
# Randomly choose initial configuration
X = random_state.uniform(size=n_samples * n_components)
X = X.reshape((n_samples, n_components))
else:
# overrides the parameter p
n_components = init.shape[1]
if n_samples != init.shape[0]:
raise ValueError(
"init matrix should be of shape (%d, %d)" % (n_samples, n_components)
)
X = init
distances = euclidean_distances(X)
# Out of bounds condition cannot happen because we are transforming
# the training set here, but does sometimes get triggered in
# practice due to machine precision issues. Hence "clip".
ir = IsotonicRegression(out_of_bounds="clip")
old_stress = None
for it in range(max_iter):
# Compute distance and monotonic regression
if metric:
disparities = dissimilarities
else:
distances_flat = distances.ravel()
# dissimilarities with 0 are considered as missing values
distances_flat_w = distances_flat[dissimilarities_flat != 0]
# Compute the disparities using isotonic regression.
# For the first SMACOF iteration, use scaled original dissimilarities.
# (This choice follows the R implementation described in this paper:
# https://www.jstatsoft.org/article/view/v102i10)
if it < 1:
disparities_flat = dissimilarities_flat_w
else:
disparities_flat = ir.fit_transform(
dissimilarities_flat_w, distances_flat_w
)
disparities = np.zeros_like(distances_flat)
disparities[dissimilarities_flat != 0] = disparities_flat
disparities = disparities.reshape((n_samples, n_samples))
disparities *= np.sqrt(
(n_samples * (n_samples - 1) / 2) / (disparities**2).sum()
)
disparities = disparities + disparities.T
# Update X using the Guttman transform
distances[distances == 0] = 1e-5
ratio = disparities / distances
B = -ratio
B[np.arange(len(B)), np.arange(len(B))] += ratio.sum(axis=1)
X = 1.0 / n_samples * np.dot(B, X)
# Compute stress
distances = euclidean_distances(X)
stress = ((distances.ravel() - disparities.ravel()) ** 2).sum() / 2
if verbose >= 2: # pragma: no cover
print(f"Iteration {it}, stress {stress:.4f}")
if old_stress is not None:
sum_squared_distances = (distances.ravel() ** 2).sum()
if ((old_stress - stress) / (sum_squared_distances / 2)) < eps:
if verbose: # pragma: no cover
print("Convergence criterion reached.")
break
old_stress = stress
if normalized_stress:
sum_squared_distances = (distances.ravel() ** 2).sum()
stress = np.sqrt(stress / (sum_squared_distances / 2))
return X, stress, it + 1
|
Computes multidimensional scaling using SMACOF algorithm.
Parameters
----------
dissimilarities : ndarray of shape (n_samples, n_samples)
Pairwise dissimilarities between the points. Must be symmetric.
metric : bool, default=True
Compute metric or nonmetric SMACOF algorithm.
When ``False`` (i.e. non-metric MDS), dissimilarities with 0 are considered as
missing values.
n_components : int, default=2
Number of dimensions in which to immerse the dissimilarities. If an
``init`` array is provided, this option is overridden and the shape of
``init`` is used to determine the dimensionality of the embedding
space.
init : ndarray of shape (n_samples, n_components), default=None
Starting configuration of the embedding to initialize the algorithm. By
default, the algorithm is initialized with a randomly chosen array.
max_iter : int, default=300
Maximum number of iterations of the SMACOF algorithm for a single run.
verbose : int, default=0
Level of verbosity.
eps : float, default=1e-6
The tolerance with respect to stress (normalized by the sum of squared
embedding distances) at which to declare convergence.
.. versionchanged:: 1.7
The default value for `eps` has changed from 1e-3 to 1e-6, as a result
of a bugfix in the computation of the convergence criterion.
random_state : int, RandomState instance or None, default=None
Determines the random number generator used to initialize the centers.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
normalized_stress : bool, default=False
Whether to return normalized stress value (Stress-1) instead of raw
stress.
.. versionadded:: 1.2
.. versionchanged:: 1.7
Normalized stress is now supported for metric MDS as well.
Returns
-------
X : ndarray of shape (n_samples, n_components)
Coordinates of the points in a ``n_components``-space.
stress : float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points).
If `normalized_stress=True`, returns Stress-1.
A value of 0 indicates "perfect" fit, 0.025 excellent, 0.05 good,
0.1 fair, and 0.2 poor [1]_.
n_iter : int
The number of iterations corresponding to the best stress.
References
----------
.. [1] "Nonmetric multidimensional scaling: a numerical method" Kruskal, J.
Psychometrika, 29 (1964)
.. [2] "Multidimensional scaling by optimizing goodness of fit to a nonmetric
hypothesis" Kruskal, J. Psychometrika, 29, (1964)
.. [3] "Modern Multidimensional Scaling - Theory and Applications" Borg, I.;
Groenen P. Springer Series in Statistics (1997)
|
_smacof_single
|
python
|
scikit-learn/scikit-learn
|
sklearn/manifold/_mds.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/manifold/_mds.py
|
BSD-3-Clause
|
def smacof(
dissimilarities,
*,
metric=True,
n_components=2,
init=None,
n_init="warn",
n_jobs=None,
max_iter=300,
verbose=0,
eps=1e-6,
random_state=None,
return_n_iter=False,
normalized_stress="auto",
):
"""Compute multidimensional scaling using the SMACOF algorithm.
The SMACOF (Scaling by MAjorizing a COmplicated Function) algorithm is a
multidimensional scaling algorithm which minimizes an objective function
(the *stress*) using a majorization technique. Stress majorization, also
known as the Guttman Transform, guarantees a monotone convergence of
stress, and is more powerful than traditional techniques such as gradient
descent.
The SMACOF algorithm for metric MDS can be summarized by the following
steps:
1. Set an initial start configuration, randomly or not.
2. Compute the stress
3. Compute the Guttman Transform
4. Iterate 2 and 3 until convergence.
The nonmetric algorithm adds a monotonic regression step before computing
the stress.
Parameters
----------
dissimilarities : array-like of shape (n_samples, n_samples)
Pairwise dissimilarities between the points. Must be symmetric.
metric : bool, default=True
Compute metric or nonmetric SMACOF algorithm.
When ``False`` (i.e. non-metric MDS), dissimilarities with 0 are considered as
missing values.
n_components : int, default=2
Number of dimensions in which to immerse the dissimilarities. If an
``init`` array is provided, this option is overridden and the shape of
``init`` is used to determine the dimensionality of the embedding
space.
init : array-like of shape (n_samples, n_components), default=None
Starting configuration of the embedding to initialize the algorithm. By
default, the algorithm is initialized with a randomly chosen array.
n_init : int, default=8
Number of times the SMACOF algorithm will be run with different
initializations. The final results will be the best output of the runs,
determined by the run with the smallest final stress. If ``init`` is
provided, this option is overridden and a single run is performed.
.. versionchanged:: 1.9
The default value for `n_iter` will change from 8 to 1 in version 1.9.
n_jobs : int, default=None
The number of jobs to use for the computation. If multiple
initializations are used (``n_init``), each run of the algorithm is
computed in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
max_iter : int, default=300
Maximum number of iterations of the SMACOF algorithm for a single run.
verbose : int, default=0
Level of verbosity.
eps : float, default=1e-6
The tolerance with respect to stress (normalized by the sum of squared
embedding distances) at which to declare convergence.
.. versionchanged:: 1.7
The default value for `eps` has changed from 1e-3 to 1e-6, as a result
of a bugfix in the computation of the convergence criterion.
random_state : int, RandomState instance or None, default=None
Determines the random number generator used to initialize the centers.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
normalized_stress : bool or "auto", default="auto"
Whether to return normalized stress value (Stress-1) instead of raw
stress. By default, metric MDS returns raw stress while non-metric MDS
returns normalized stress.
.. versionadded:: 1.2
.. versionchanged:: 1.4
The default value changed from `False` to `"auto"` in version 1.4.
.. versionchanged:: 1.7
Normalized stress is now supported for metric MDS as well.
Returns
-------
X : ndarray of shape (n_samples, n_components)
Coordinates of the points in a ``n_components``-space.
stress : float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points).
If `normalized_stress=True`, returns Stress-1.
A value of 0 indicates "perfect" fit, 0.025 excellent, 0.05 good,
0.1 fair, and 0.2 poor [1]_.
n_iter : int
The number of iterations corresponding to the best stress. Returned
only if ``return_n_iter`` is set to ``True``.
References
----------
.. [1] "Nonmetric multidimensional scaling: a numerical method" Kruskal, J.
Psychometrika, 29 (1964)
.. [2] "Multidimensional scaling by optimizing goodness of fit to a nonmetric
hypothesis" Kruskal, J. Psychometrika, 29, (1964)
.. [3] "Modern Multidimensional Scaling - Theory and Applications" Borg, I.;
Groenen P. Springer Series in Statistics (1997)
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import smacof
>>> from sklearn.metrics import euclidean_distances
>>> X = np.array([[0, 1, 2], [1, 0, 3], [2, 3, 0]])
>>> dissimilarities = euclidean_distances(X)
>>> Z, stress = smacof(
... dissimilarities, n_components=2, n_init=1, eps=1e-6, random_state=42
... )
>>> Z.shape
(3, 2)
>>> np.round(stress, 6).item()
3.2e-05
"""
if n_init == "warn":
warnings.warn(
"The default value of `n_init` will change from 8 to 1 in 1.9.",
FutureWarning,
)
n_init = 8
dissimilarities = check_array(dissimilarities)
random_state = check_random_state(random_state)
if normalized_stress == "auto":
normalized_stress = not metric
if hasattr(init, "__array__"):
init = np.asarray(init).copy()
if not n_init == 1:
warnings.warn(
"Explicit initial positions passed: "
"performing only one init of the MDS instead of %d" % n_init
)
n_init = 1
best_pos, best_stress = None, None
if effective_n_jobs(n_jobs) == 1:
for it in range(n_init):
pos, stress, n_iter_ = _smacof_single(
dissimilarities,
metric=metric,
n_components=n_components,
init=init,
max_iter=max_iter,
verbose=verbose,
eps=eps,
random_state=random_state,
normalized_stress=normalized_stress,
)
if best_stress is None or stress < best_stress:
best_stress = stress
best_pos = pos.copy()
best_iter = n_iter_
else:
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=max(verbose - 1, 0))(
delayed(_smacof_single)(
dissimilarities,
metric=metric,
n_components=n_components,
init=init,
max_iter=max_iter,
verbose=verbose,
eps=eps,
random_state=seed,
normalized_stress=normalized_stress,
)
for seed in seeds
)
positions, stress, n_iters = zip(*results)
best = np.argmin(stress)
best_stress = stress[best]
best_pos = positions[best]
best_iter = n_iters[best]
if return_n_iter:
return best_pos, best_stress, best_iter
else:
return best_pos, best_stress
|
Compute multidimensional scaling using the SMACOF algorithm.
The SMACOF (Scaling by MAjorizing a COmplicated Function) algorithm is a
multidimensional scaling algorithm which minimizes an objective function
(the *stress*) using a majorization technique. Stress majorization, also
known as the Guttman Transform, guarantees a monotone convergence of
stress, and is more powerful than traditional techniques such as gradient
descent.
The SMACOF algorithm for metric MDS can be summarized by the following
steps:
1. Set an initial start configuration, randomly or not.
2. Compute the stress
3. Compute the Guttman Transform
4. Iterate 2 and 3 until convergence.
The nonmetric algorithm adds a monotonic regression step before computing
the stress.
Parameters
----------
dissimilarities : array-like of shape (n_samples, n_samples)
Pairwise dissimilarities between the points. Must be symmetric.
metric : bool, default=True
Compute metric or nonmetric SMACOF algorithm.
When ``False`` (i.e. non-metric MDS), dissimilarities with 0 are considered as
missing values.
n_components : int, default=2
Number of dimensions in which to immerse the dissimilarities. If an
``init`` array is provided, this option is overridden and the shape of
``init`` is used to determine the dimensionality of the embedding
space.
init : array-like of shape (n_samples, n_components), default=None
Starting configuration of the embedding to initialize the algorithm. By
default, the algorithm is initialized with a randomly chosen array.
n_init : int, default=8
Number of times the SMACOF algorithm will be run with different
initializations. The final results will be the best output of the runs,
determined by the run with the smallest final stress. If ``init`` is
provided, this option is overridden and a single run is performed.
.. versionchanged:: 1.9
The default value for `n_iter` will change from 8 to 1 in version 1.9.
n_jobs : int, default=None
The number of jobs to use for the computation. If multiple
initializations are used (``n_init``), each run of the algorithm is
computed in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
max_iter : int, default=300
Maximum number of iterations of the SMACOF algorithm for a single run.
verbose : int, default=0
Level of verbosity.
eps : float, default=1e-6
The tolerance with respect to stress (normalized by the sum of squared
embedding distances) at which to declare convergence.
.. versionchanged:: 1.7
The default value for `eps` has changed from 1e-3 to 1e-6, as a result
of a bugfix in the computation of the convergence criterion.
random_state : int, RandomState instance or None, default=None
Determines the random number generator used to initialize the centers.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
normalized_stress : bool or "auto", default="auto"
Whether to return normalized stress value (Stress-1) instead of raw
stress. By default, metric MDS returns raw stress while non-metric MDS
returns normalized stress.
.. versionadded:: 1.2
.. versionchanged:: 1.4
The default value changed from `False` to `"auto"` in version 1.4.
.. versionchanged:: 1.7
Normalized stress is now supported for metric MDS as well.
Returns
-------
X : ndarray of shape (n_samples, n_components)
Coordinates of the points in a ``n_components``-space.
stress : float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points).
If `normalized_stress=True`, returns Stress-1.
A value of 0 indicates "perfect" fit, 0.025 excellent, 0.05 good,
0.1 fair, and 0.2 poor [1]_.
n_iter : int
The number of iterations corresponding to the best stress. Returned
only if ``return_n_iter`` is set to ``True``.
References
----------
.. [1] "Nonmetric multidimensional scaling: a numerical method" Kruskal, J.
Psychometrika, 29 (1964)
.. [2] "Multidimensional scaling by optimizing goodness of fit to a nonmetric
hypothesis" Kruskal, J. Psychometrika, 29, (1964)
.. [3] "Modern Multidimensional Scaling - Theory and Applications" Borg, I.;
Groenen P. Springer Series in Statistics (1997)
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import smacof
>>> from sklearn.metrics import euclidean_distances
>>> X = np.array([[0, 1, 2], [1, 0, 3], [2, 3, 0]])
>>> dissimilarities = euclidean_distances(X)
>>> Z, stress = smacof(
... dissimilarities, n_components=2, n_init=1, eps=1e-6, random_state=42
... )
>>> Z.shape
(3, 2)
>>> np.round(stress, 6).item()
3.2e-05
|
smacof
|
python
|
scikit-learn/scikit-learn
|
sklearn/manifold/_mds.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/manifold/_mds.py
|
BSD-3-Clause
|
def fit_transform(self, X, y=None, init=None):
"""
Fit the data from `X`, and returns the embedded coordinates.
Parameters
----------
X : array-like of shape (n_samples, n_features) or \
(n_samples, n_samples)
Input data. If ``dissimilarity=='precomputed'``, the input should
be the dissimilarity matrix.
y : Ignored
Not used, present for API consistency by convention.
init : ndarray of shape (n_samples, n_components), default=None
Starting configuration of the embedding to initialize the SMACOF
algorithm. By default, the algorithm is initialized with a randomly
chosen array.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
X transformed in the new space.
"""
if self.n_init == "warn":
warnings.warn(
"The default value of `n_init` will change from 4 to 1 in 1.9.",
FutureWarning,
)
self._n_init = 4
else:
self._n_init = self.n_init
X = validate_data(self, X)
if X.shape[0] == X.shape[1] and self.dissimilarity != "precomputed":
warnings.warn(
"The MDS API has changed. ``fit`` now constructs a"
" dissimilarity matrix from data. To use a custom "
"dissimilarity matrix, set "
"``dissimilarity='precomputed'``."
)
if self.dissimilarity == "precomputed":
self.dissimilarity_matrix_ = X
elif self.dissimilarity == "euclidean":
self.dissimilarity_matrix_ = euclidean_distances(X)
self.embedding_, self.stress_, self.n_iter_ = smacof(
self.dissimilarity_matrix_,
metric=self.metric,
n_components=self.n_components,
init=init,
n_init=self._n_init,
n_jobs=self.n_jobs,
max_iter=self.max_iter,
verbose=self.verbose,
eps=self.eps,
random_state=self.random_state,
return_n_iter=True,
normalized_stress=self.normalized_stress,
)
return self.embedding_
|
Fit the data from `X`, and returns the embedded coordinates.
Parameters
----------
X : array-like of shape (n_samples, n_features) or (n_samples, n_samples)
Input data. If ``dissimilarity=='precomputed'``, the input should
be the dissimilarity matrix.
y : Ignored
Not used, present for API consistency by convention.
init : ndarray of shape (n_samples, n_components), default=None
Starting configuration of the embedding to initialize the SMACOF
algorithm. By default, the algorithm is initialized with a randomly
chosen array.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
X transformed in the new space.
|
fit_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/manifold/_mds.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/manifold/_mds.py
|
BSD-3-Clause
|
def _graph_connected_component(graph, node_id):
"""Find the largest graph connected components that contains one
given node.
Parameters
----------
graph : array-like of shape (n_samples, n_samples)
Adjacency matrix of the graph, non-zero weight means an edge
between the nodes.
node_id : int
The index of the query node of the graph.
Returns
-------
connected_components_matrix : array-like of shape (n_samples,)
An array of bool value indicating the indexes of the nodes
belonging to the largest connected components of the given query
node.
"""
n_node = graph.shape[0]
if sparse.issparse(graph):
# speed up row-wise access to boolean connection mask
graph = graph.tocsr()
connected_nodes = np.zeros(n_node, dtype=bool)
nodes_to_explore = np.zeros(n_node, dtype=bool)
nodes_to_explore[node_id] = True
for _ in range(n_node):
last_num_component = connected_nodes.sum()
np.logical_or(connected_nodes, nodes_to_explore, out=connected_nodes)
if last_num_component >= connected_nodes.sum():
break
indices = np.where(nodes_to_explore)[0]
nodes_to_explore.fill(False)
for i in indices:
if sparse.issparse(graph):
# scipy not yet implemented 1D sparse slices; can be changed back to
# `neighbors = graph[i].toarray().ravel()` once implemented
neighbors = graph[[i], :].toarray().ravel()
else:
neighbors = graph[i]
np.logical_or(nodes_to_explore, neighbors, out=nodes_to_explore)
return connected_nodes
|
Find the largest graph connected components that contains one
given node.
Parameters
----------
graph : array-like of shape (n_samples, n_samples)
Adjacency matrix of the graph, non-zero weight means an edge
between the nodes.
node_id : int
The index of the query node of the graph.
Returns
-------
connected_components_matrix : array-like of shape (n_samples,)
An array of bool value indicating the indexes of the nodes
belonging to the largest connected components of the given query
node.
|
_graph_connected_component
|
python
|
scikit-learn/scikit-learn
|
sklearn/manifold/_spectral_embedding.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/manifold/_spectral_embedding.py
|
BSD-3-Clause
|
def _graph_is_connected(graph):
"""Return whether the graph is connected (True) or Not (False).
Parameters
----------
graph : {array-like, sparse matrix} of shape (n_samples, n_samples)
Adjacency matrix of the graph, non-zero weight means an edge
between the nodes.
Returns
-------
is_connected : bool
True means the graph is fully connected and False means not.
"""
if sparse.issparse(graph):
# Before Scipy 1.11.3, `connected_components` only supports 32-bit indices.
# PR: https://github.com/scipy/scipy/pull/18913
# First integration in 1.11.3: https://github.com/scipy/scipy/pull/19279
# TODO(jjerphan): Once SciPy 1.11.3 is the minimum supported version, use
# `accept_large_sparse=True`.
accept_large_sparse = sp_version >= parse_version("1.11.3")
graph = check_array(
graph, accept_sparse=True, accept_large_sparse=accept_large_sparse
)
# sparse graph, find all the connected components
n_connected_components, _ = connected_components(graph)
return n_connected_components == 1
else:
# dense graph, find all connected components start from node 0
return _graph_connected_component(graph, 0).sum() == graph.shape[0]
|
Return whether the graph is connected (True) or Not (False).
Parameters
----------
graph : {array-like, sparse matrix} of shape (n_samples, n_samples)
Adjacency matrix of the graph, non-zero weight means an edge
between the nodes.
Returns
-------
is_connected : bool
True means the graph is fully connected and False means not.
|
_graph_is_connected
|
python
|
scikit-learn/scikit-learn
|
sklearn/manifold/_spectral_embedding.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/manifold/_spectral_embedding.py
|
BSD-3-Clause
|
def _set_diag(laplacian, value, norm_laplacian):
"""Set the diagonal of the laplacian matrix and convert it to a
sparse format well suited for eigenvalue decomposition.
Parameters
----------
laplacian : {ndarray, sparse matrix}
The graph laplacian.
value : float
The value of the diagonal.
norm_laplacian : bool
Whether the value of the diagonal should be changed or not.
Returns
-------
laplacian : {array, sparse matrix}
An array of matrix in a form that is well suited to fast
eigenvalue decomposition, depending on the band width of the
matrix.
"""
n_nodes = laplacian.shape[0]
# We need all entries in the diagonal to values
if not sparse.issparse(laplacian):
if norm_laplacian:
laplacian.flat[:: n_nodes + 1] = value
else:
laplacian = laplacian.tocoo()
if norm_laplacian:
diag_idx = laplacian.row == laplacian.col
laplacian.data[diag_idx] = value
# If the matrix has a small number of diagonals (as in the
# case of structured matrices coming from images), the
# dia format might be best suited for matvec products:
n_diags = np.unique(laplacian.row - laplacian.col).size
if n_diags <= 7:
# 3 or less outer diagonals on each side
laplacian = laplacian.todia()
else:
# csr has the fastest matvec and is thus best suited to
# arpack
laplacian = laplacian.tocsr()
return laplacian
|
Set the diagonal of the laplacian matrix and convert it to a
sparse format well suited for eigenvalue decomposition.
Parameters
----------
laplacian : {ndarray, sparse matrix}
The graph laplacian.
value : float
The value of the diagonal.
norm_laplacian : bool
Whether the value of the diagonal should be changed or not.
Returns
-------
laplacian : {array, sparse matrix}
An array of matrix in a form that is well suited to fast
eigenvalue decomposition, depending on the band width of the
matrix.
|
_set_diag
|
python
|
scikit-learn/scikit-learn
|
sklearn/manifold/_spectral_embedding.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/manifold/_spectral_embedding.py
|
BSD-3-Clause
|
def spectral_embedding(
adjacency,
*,
n_components=8,
eigen_solver=None,
random_state=None,
eigen_tol="auto",
norm_laplacian=True,
drop_first=True,
):
"""Project the sample on the first eigenvectors of the graph Laplacian.
The adjacency matrix is used to compute a normalized graph Laplacian
whose spectrum (especially the eigenvectors associated to the
smallest eigenvalues) has an interpretation in terms of minimal
number of cuts necessary to split the graph into comparably sized
components.
This embedding can also 'work' even if the ``adjacency`` variable is
not strictly the adjacency matrix of a graph but more generally
an affinity or similarity matrix between samples (for instance the
heat kernel of a euclidean distance matrix or a k-NN matrix).
However care must taken to always make the affinity matrix symmetric
so that the eigenvector decomposition works as expected.
Note : Laplacian Eigenmaps is the actual algorithm implemented here.
Read more in the :ref:`User Guide <spectral_embedding>`.
Parameters
----------
adjacency : {array-like, sparse graph} of shape (n_samples, n_samples)
The adjacency matrix of the graph to embed.
n_components : int, default=8
The dimension of the projection subspace.
eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities. If None, then ``'arpack'`` is
used.
random_state : int, RandomState instance or None, default=None
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when `eigen_solver ==
'amg'`, and for the K-Means initialization. Use an int to make
the results deterministic across calls (See
:term:`Glossary <random_state>`).
.. note::
When using `eigen_solver == 'amg'`,
it is necessary to also fix the global numpy seed with
`np.random.seed(int)` to get deterministic results. See
https://github.com/pyamg/pyamg/issues/139 for further
information.
eigen_tol : float, default="auto"
Stopping criterion for eigendecomposition of the Laplacian matrix.
If `eigen_tol="auto"` then the passed tolerance will depend on the
`eigen_solver`:
- If `eigen_solver="arpack"`, then `eigen_tol=0.0`;
- If `eigen_solver="lobpcg"` or `eigen_solver="amg"`, then
`eigen_tol=None` which configures the underlying `lobpcg` solver to
automatically resolve the value according to their heuristics. See,
:func:`scipy.sparse.linalg.lobpcg` for details.
Note that when using `eigen_solver="amg"` values of `tol<1e-5` may lead
to convergence issues and should be avoided.
.. versionadded:: 1.2
Added 'auto' option.
norm_laplacian : bool, default=True
If True, then compute symmetric normalized Laplacian.
drop_first : bool, default=True
Whether to drop the first eigenvector. For spectral embedding, this
should be True as the first eigenvector should be constant vector for
connected graph, but for spectral clustering, this should be kept as
False to retain the first eigenvector.
Returns
-------
embedding : ndarray of shape (n_samples, n_components)
The reduced samples.
Notes
-----
Spectral Embedding (Laplacian Eigenmaps) is most useful when the graph
has one connected component. If there graph has many components, the first
few eigenvectors will simply uncover the connected components of the graph.
References
----------
* https://en.wikipedia.org/wiki/LOBPCG
* :doi:`"Toward the Optimal Preconditioned Eigensolver: Locally Optimal
Block Preconditioned Conjugate Gradient Method",
Andrew V. Knyazev
<10.1137/S1064827500366124>`
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.neighbors import kneighbors_graph
>>> from sklearn.manifold import spectral_embedding
>>> X, _ = load_digits(return_X_y=True)
>>> X = X[:100]
>>> affinity_matrix = kneighbors_graph(
... X, n_neighbors=int(X.shape[0] / 10), include_self=True
... )
>>> # make the matrix symmetric
>>> affinity_matrix = 0.5 * (affinity_matrix + affinity_matrix.T)
>>> embedding = spectral_embedding(affinity_matrix, n_components=2, random_state=42)
>>> embedding.shape
(100, 2)
"""
random_state = check_random_state(random_state)
return _spectral_embedding(
adjacency,
n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol,
norm_laplacian=norm_laplacian,
drop_first=drop_first,
)
|
Project the sample on the first eigenvectors of the graph Laplacian.
The adjacency matrix is used to compute a normalized graph Laplacian
whose spectrum (especially the eigenvectors associated to the
smallest eigenvalues) has an interpretation in terms of minimal
number of cuts necessary to split the graph into comparably sized
components.
This embedding can also 'work' even if the ``adjacency`` variable is
not strictly the adjacency matrix of a graph but more generally
an affinity or similarity matrix between samples (for instance the
heat kernel of a euclidean distance matrix or a k-NN matrix).
However care must taken to always make the affinity matrix symmetric
so that the eigenvector decomposition works as expected.
Note : Laplacian Eigenmaps is the actual algorithm implemented here.
Read more in the :ref:`User Guide <spectral_embedding>`.
Parameters
----------
adjacency : {array-like, sparse graph} of shape (n_samples, n_samples)
The adjacency matrix of the graph to embed.
n_components : int, default=8
The dimension of the projection subspace.
eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities. If None, then ``'arpack'`` is
used.
random_state : int, RandomState instance or None, default=None
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when `eigen_solver ==
'amg'`, and for the K-Means initialization. Use an int to make
the results deterministic across calls (See
:term:`Glossary <random_state>`).
.. note::
When using `eigen_solver == 'amg'`,
it is necessary to also fix the global numpy seed with
`np.random.seed(int)` to get deterministic results. See
https://github.com/pyamg/pyamg/issues/139 for further
information.
eigen_tol : float, default="auto"
Stopping criterion for eigendecomposition of the Laplacian matrix.
If `eigen_tol="auto"` then the passed tolerance will depend on the
`eigen_solver`:
- If `eigen_solver="arpack"`, then `eigen_tol=0.0`;
- If `eigen_solver="lobpcg"` or `eigen_solver="amg"`, then
`eigen_tol=None` which configures the underlying `lobpcg` solver to
automatically resolve the value according to their heuristics. See,
:func:`scipy.sparse.linalg.lobpcg` for details.
Note that when using `eigen_solver="amg"` values of `tol<1e-5` may lead
to convergence issues and should be avoided.
.. versionadded:: 1.2
Added 'auto' option.
norm_laplacian : bool, default=True
If True, then compute symmetric normalized Laplacian.
drop_first : bool, default=True
Whether to drop the first eigenvector. For spectral embedding, this
should be True as the first eigenvector should be constant vector for
connected graph, but for spectral clustering, this should be kept as
False to retain the first eigenvector.
Returns
-------
embedding : ndarray of shape (n_samples, n_components)
The reduced samples.
Notes
-----
Spectral Embedding (Laplacian Eigenmaps) is most useful when the graph
has one connected component. If there graph has many components, the first
few eigenvectors will simply uncover the connected components of the graph.
References
----------
* https://en.wikipedia.org/wiki/LOBPCG
* :doi:`"Toward the Optimal Preconditioned Eigensolver: Locally Optimal
Block Preconditioned Conjugate Gradient Method",
Andrew V. Knyazev
<10.1137/S1064827500366124>`
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.neighbors import kneighbors_graph
>>> from sklearn.manifold import spectral_embedding
>>> X, _ = load_digits(return_X_y=True)
>>> X = X[:100]
>>> affinity_matrix = kneighbors_graph(
... X, n_neighbors=int(X.shape[0] / 10), include_self=True
... )
>>> # make the matrix symmetric
>>> affinity_matrix = 0.5 * (affinity_matrix + affinity_matrix.T)
>>> embedding = spectral_embedding(affinity_matrix, n_components=2, random_state=42)
>>> embedding.shape
(100, 2)
|
spectral_embedding
|
python
|
scikit-learn/scikit-learn
|
sklearn/manifold/_spectral_embedding.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/manifold/_spectral_embedding.py
|
BSD-3-Clause
|
def _get_affinity_matrix(self, X, Y=None):
"""Calculate the affinity matrix from data
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
If affinity is "precomputed"
X : array-like of shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Y: Ignored
Returns
-------
affinity_matrix of shape (n_samples, n_samples)
"""
if self.affinity == "precomputed":
self.affinity_matrix_ = X
return self.affinity_matrix_
if self.affinity == "precomputed_nearest_neighbors":
estimator = NearestNeighbors(
n_neighbors=self.n_neighbors, n_jobs=self.n_jobs, metric="precomputed"
).fit(X)
connectivity = estimator.kneighbors_graph(X=X, mode="connectivity")
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
return self.affinity_matrix_
if self.affinity == "nearest_neighbors":
if sparse.issparse(X):
warnings.warn(
"Nearest neighbors affinity currently does "
"not support sparse input, falling back to "
"rbf affinity"
)
self.affinity = "rbf"
else:
self.n_neighbors_ = (
self.n_neighbors
if self.n_neighbors is not None
else max(int(X.shape[0] / 10), 1)
)
self.affinity_matrix_ = kneighbors_graph(
X, self.n_neighbors_, include_self=True, n_jobs=self.n_jobs
)
# currently only symmetric affinity_matrix supported
self.affinity_matrix_ = 0.5 * (
self.affinity_matrix_ + self.affinity_matrix_.T
)
return self.affinity_matrix_
if self.affinity == "rbf":
self.gamma_ = self.gamma if self.gamma is not None else 1.0 / X.shape[1]
self.affinity_matrix_ = rbf_kernel(X, gamma=self.gamma_)
return self.affinity_matrix_
self.affinity_matrix_ = self.affinity(X)
return self.affinity_matrix_
|
Calculate the affinity matrix from data
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
If affinity is "precomputed"
X : array-like of shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Y: Ignored
Returns
-------
affinity_matrix of shape (n_samples, n_samples)
|
_get_affinity_matrix
|
python
|
scikit-learn/scikit-learn
|
sklearn/manifold/_spectral_embedding.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/manifold/_spectral_embedding.py
|
BSD-3-Clause
|
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
If affinity is "precomputed"
X : {array-like, sparse matrix}, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
X = validate_data(self, X, accept_sparse="csr", ensure_min_samples=2)
random_state = check_random_state(self.random_state)
affinity_matrix = self._get_affinity_matrix(X)
self.embedding_ = _spectral_embedding(
affinity_matrix,
n_components=self.n_components,
eigen_solver=self.eigen_solver,
eigen_tol=self.eigen_tol,
random_state=random_state,
)
return self
|
Fit the model from data in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
If affinity is "precomputed"
X : {array-like, sparse matrix}, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/manifold/_spectral_embedding.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/manifold/_spectral_embedding.py
|
BSD-3-Clause
|
def _kl_divergence(
params,
P,
degrees_of_freedom,
n_samples,
n_components,
skip_num_points=0,
compute_error=True,
):
"""t-SNE objective function: gradient of the KL divergence
of p_ijs and q_ijs and the absolute error.
Parameters
----------
params : ndarray of shape (n_params,)
Unraveled embedding.
P : ndarray of shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
degrees_of_freedom : int
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
skip_num_points : int, default=0
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
compute_error: bool, default=True
If False, the kl_divergence is not computed and returns NaN.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : ndarray of shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
dist = pdist(X_embedded, "sqeuclidean")
dist /= degrees_of_freedom
dist += 1.0
dist **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(dist / (2.0 * np.sum(dist)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
if compute_error:
kl_divergence = 2.0 * np.dot(P, np.log(np.maximum(P, MACHINE_EPSILON) / Q))
else:
kl_divergence = np.nan
# Gradient: dC/dY
# pdist always returns double precision distances. Thus we need to take
grad = np.ndarray((n_samples, n_components), dtype=params.dtype)
PQd = squareform((P - Q) * dist)
for i in range(skip_num_points, n_samples):
grad[i] = np.dot(np.ravel(PQd[i], order="K"), X_embedded[i] - X_embedded)
grad = grad.ravel()
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad *= c
return kl_divergence, grad
|
t-SNE objective function: gradient of the KL divergence
of p_ijs and q_ijs and the absolute error.
Parameters
----------
params : ndarray of shape (n_params,)
Unraveled embedding.
P : ndarray of shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
degrees_of_freedom : int
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
skip_num_points : int, default=0
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
compute_error: bool, default=True
If False, the kl_divergence is not computed and returns NaN.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : ndarray of shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
|
_kl_divergence
|
python
|
scikit-learn/scikit-learn
|
sklearn/manifold/_t_sne.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/manifold/_t_sne.py
|
BSD-3-Clause
|
def _kl_divergence_bh(
params,
P,
degrees_of_freedom,
n_samples,
n_components,
angle=0.5,
skip_num_points=0,
verbose=False,
compute_error=True,
num_threads=1,
):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Uses Barnes-Hut tree methods to calculate the gradient that
runs in O(NlogN) instead of O(N^2).
Parameters
----------
params : ndarray of shape (n_params,)
Unraveled embedding.
P : sparse matrix of shape (n_samples, n_sample)
Sparse approximate joint probability matrix, computed only for the
k nearest-neighbors and symmetrized. Matrix should be of CSR format.
degrees_of_freedom : int
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
angle : float, default=0.5
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
skip_num_points : int, default=0
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
verbose : int, default=False
Verbosity level.
compute_error: bool, default=True
If False, the kl_divergence is not computed and returns NaN.
num_threads : int, default=1
Number of threads used to compute the gradient. This is set here to
avoid calling _openmp_effective_n_threads for each gradient step.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : ndarray of shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
params = params.astype(np.float32, copy=False)
X_embedded = params.reshape(n_samples, n_components)
val_P = P.data.astype(np.float32, copy=False)
neighbors = P.indices.astype(np.int64, copy=False)
indptr = P.indptr.astype(np.int64, copy=False)
grad = np.zeros(X_embedded.shape, dtype=np.float32)
error = _barnes_hut_tsne.gradient(
val_P,
X_embedded,
neighbors,
indptr,
grad,
angle,
n_components,
verbose,
dof=degrees_of_freedom,
compute_error=compute_error,
num_threads=num_threads,
)
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad = grad.ravel()
grad *= c
return error, grad
|
t-SNE objective function: KL divergence of p_ijs and q_ijs.
Uses Barnes-Hut tree methods to calculate the gradient that
runs in O(NlogN) instead of O(N^2).
Parameters
----------
params : ndarray of shape (n_params,)
Unraveled embedding.
P : sparse matrix of shape (n_samples, n_sample)
Sparse approximate joint probability matrix, computed only for the
k nearest-neighbors and symmetrized. Matrix should be of CSR format.
degrees_of_freedom : int
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
angle : float, default=0.5
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
skip_num_points : int, default=0
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
verbose : int, default=False
Verbosity level.
compute_error: bool, default=True
If False, the kl_divergence is not computed and returns NaN.
num_threads : int, default=1
Number of threads used to compute the gradient. This is set here to
avoid calling _openmp_effective_n_threads for each gradient step.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : ndarray of shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
|
_kl_divergence_bh
|
python
|
scikit-learn/scikit-learn
|
sklearn/manifold/_t_sne.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/manifold/_t_sne.py
|
BSD-3-Clause
|
def _gradient_descent(
objective,
p0,
it,
max_iter,
n_iter_check=1,
n_iter_without_progress=300,
momentum=0.8,
learning_rate=200.0,
min_gain=0.01,
min_grad_norm=1e-7,
verbose=0,
args=None,
kwargs=None,
):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : callable
Should return a tuple of cost and gradient for a given parameter
vector. When expensive to compute, the cost can optionally
be None and can be computed every n_iter_check steps using
the objective_error function.
p0 : array-like of shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
max_iter : int
Maximum number of gradient descent iterations.
n_iter_check : int, default=1
Number of iterations before evaluating the global error. If the error
is sufficiently low, we abort the optimization.
n_iter_without_progress : int, default=300
Maximum number of iterations without progress before we abort the
optimization.
momentum : float within (0.0, 1.0), default=0.8
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, default=200.0
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers.
min_gain : float, default=0.01
Minimum individual gain for each parameter.
min_grad_norm : float, default=1e-7
If the gradient norm is below this threshold, the optimization will
be aborted.
verbose : int, default=0
Verbosity level.
args : sequence, default=None
Arguments to pass to objective function.
kwargs : dict, default=None
Keyword arguments to pass to objective function.
Returns
-------
p : ndarray of shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(float).max
best_error = np.finfo(float).max
best_iter = i = it
tic = time()
for i in range(it, max_iter):
check_convergence = (i + 1) % n_iter_check == 0
# only compute the error when needed
kwargs["compute_error"] = check_convergence or i == max_iter - 1
error, grad = objective(p, *args, **kwargs)
inc = update * grad < 0.0
dec = np.invert(inc)
gains[inc] += 0.2
gains[dec] *= 0.8
np.clip(gains, min_gain, np.inf, out=gains)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if check_convergence:
toc = time()
duration = toc - tic
tic = toc
grad_norm = linalg.norm(grad)
if verbose >= 2:
print(
"[t-SNE] Iteration %d: error = %.7f,"
" gradient norm = %.7f"
" (%s iterations in %0.3fs)"
% (i + 1, error, grad_norm, n_iter_check, duration)
)
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print(
"[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress)
)
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print(
"[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm)
)
break
return p, error, i
|
Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : callable
Should return a tuple of cost and gradient for a given parameter
vector. When expensive to compute, the cost can optionally
be None and can be computed every n_iter_check steps using
the objective_error function.
p0 : array-like of shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
max_iter : int
Maximum number of gradient descent iterations.
n_iter_check : int, default=1
Number of iterations before evaluating the global error. If the error
is sufficiently low, we abort the optimization.
n_iter_without_progress : int, default=300
Maximum number of iterations without progress before we abort the
optimization.
momentum : float within (0.0, 1.0), default=0.8
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, default=200.0
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers.
min_gain : float, default=0.01
Minimum individual gain for each parameter.
min_grad_norm : float, default=1e-7
If the gradient norm is below this threshold, the optimization will
be aborted.
verbose : int, default=0
Verbosity level.
args : sequence, default=None
Arguments to pass to objective function.
kwargs : dict, default=None
Keyword arguments to pass to objective function.
Returns
-------
p : ndarray of shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
|
_gradient_descent
|
python
|
scikit-learn/scikit-learn
|
sklearn/manifold/_t_sne.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/manifold/_t_sne.py
|
BSD-3-Clause
|
def trustworthiness(X, X_embedded, *, n_neighbors=5, metric="euclidean"):
r"""Indicate to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in \mathcal{N}_{i}^{k}} \max(0, (r(i, j) - k))
where for each sample i, :math:`\mathcal{N}_{i}^{k}` are its k nearest
neighbors in the output space, and every sample j is its :math:`r(i, j)`-th
nearest neighbor in the input space. In other words, any unexpected nearest
neighbors in the output space are penalised in proportion to their rank in
the input space.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : {array-like, sparse matrix} of shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, default=5
The number of neighbors that will be considered. Should be fewer than
`n_samples / 2` to ensure the trustworthiness to lies within [0, 1], as
mentioned in [1]_. An error will be raised otherwise.
metric : str or callable, default='euclidean'
Which metric to use for computing pairwise distances between samples
from the original input space. If metric is 'precomputed', X must be a
matrix of pairwise distances or squared distances. Otherwise, for a list
of available metrics, see the documentation of argument metric in
`sklearn.pairwise.pairwise_distances` and metrics listed in
`sklearn.metrics.pairwise.PAIRWISE_DISTANCE_FUNCTIONS`. Note that the
"cosine" metric uses :func:`~sklearn.metrics.pairwise.cosine_distances`.
.. versionadded:: 0.20
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
References
----------
.. [1] Jarkko Venna and Samuel Kaski. 2001. Neighborhood
Preservation in Nonlinear Projection Methods: An Experimental Study.
In Proceedings of the International Conference on Artificial Neural Networks
(ICANN '01). Springer-Verlag, Berlin, Heidelberg, 485-491.
.. [2] Laurens van der Maaten. Learning a Parametric Embedding by Preserving
Local Structure. Proceedings of the Twelfth International Conference on
Artificial Intelligence and Statistics, PMLR 5:384-391, 2009.
Examples
--------
>>> from sklearn.datasets import make_blobs
>>> from sklearn.decomposition import PCA
>>> from sklearn.manifold import trustworthiness
>>> X, _ = make_blobs(n_samples=100, n_features=10, centers=3, random_state=42)
>>> X_embedded = PCA(n_components=2).fit_transform(X)
>>> print(f"{trustworthiness(X, X_embedded, n_neighbors=5):.2f}")
0.92
"""
n_samples = _num_samples(X)
if n_neighbors >= n_samples / 2:
raise ValueError(
f"n_neighbors ({n_neighbors}) should be less than n_samples / 2"
f" ({n_samples / 2})"
)
dist_X = pairwise_distances(X, metric=metric)
if metric == "precomputed":
dist_X = dist_X.copy()
# we set the diagonal to np.inf to exclude the points themselves from
# their own neighborhood
np.fill_diagonal(dist_X, np.inf)
ind_X = np.argsort(dist_X, axis=1)
# `ind_X[i]` is the index of sorted distances between i and other samples
ind_X_embedded = (
NearestNeighbors(n_neighbors=n_neighbors)
.fit(X_embedded)
.kneighbors(return_distance=False)
)
# We build an inverted index of neighbors in the input space: For sample i,
# we define `inverted_index[i]` as the inverted index of sorted distances:
# inverted_index[i][ind_X[i]] = np.arange(1, n_sample + 1)
inverted_index = np.zeros((n_samples, n_samples), dtype=int)
ordered_indices = np.arange(n_samples + 1)
inverted_index[ordered_indices[:-1, np.newaxis], ind_X] = ordered_indices[1:]
ranks = (
inverted_index[ordered_indices[:-1, np.newaxis], ind_X_embedded] - n_neighbors
)
t = np.sum(ranks[ranks > 0])
t = 1.0 - t * (
2.0 / (n_samples * n_neighbors * (2.0 * n_samples - 3.0 * n_neighbors - 1.0))
)
return t
|
Indicate to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in \mathcal{N}_{i}^{k}} \max(0, (r(i, j) - k))
where for each sample i, :math:`\mathcal{N}_{i}^{k}` are its k nearest
neighbors in the output space, and every sample j is its :math:`r(i, j)`-th
nearest neighbor in the input space. In other words, any unexpected nearest
neighbors in the output space are penalised in proportion to their rank in
the input space.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : {array-like, sparse matrix} of shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, default=5
The number of neighbors that will be considered. Should be fewer than
`n_samples / 2` to ensure the trustworthiness to lies within [0, 1], as
mentioned in [1]_. An error will be raised otherwise.
metric : str or callable, default='euclidean'
Which metric to use for computing pairwise distances between samples
from the original input space. If metric is 'precomputed', X must be a
matrix of pairwise distances or squared distances. Otherwise, for a list
of available metrics, see the documentation of argument metric in
`sklearn.pairwise.pairwise_distances` and metrics listed in
`sklearn.metrics.pairwise.PAIRWISE_DISTANCE_FUNCTIONS`. Note that the
"cosine" metric uses :func:`~sklearn.metrics.pairwise.cosine_distances`.
.. versionadded:: 0.20
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
References
----------
.. [1] Jarkko Venna and Samuel Kaski. 2001. Neighborhood
Preservation in Nonlinear Projection Methods: An Experimental Study.
In Proceedings of the International Conference on Artificial Neural Networks
(ICANN '01). Springer-Verlag, Berlin, Heidelberg, 485-491.
.. [2] Laurens van der Maaten. Learning a Parametric Embedding by Preserving
Local Structure. Proceedings of the Twelfth International Conference on
Artificial Intelligence and Statistics, PMLR 5:384-391, 2009.
Examples
--------
>>> from sklearn.datasets import make_blobs
>>> from sklearn.decomposition import PCA
>>> from sklearn.manifold import trustworthiness
>>> X, _ = make_blobs(n_samples=100, n_features=10, centers=3, random_state=42)
>>> X_embedded = PCA(n_components=2).fit_transform(X)
>>> print(f"{trustworthiness(X, X_embedded, n_neighbors=5):.2f}")
0.92
|
trustworthiness
|
python
|
scikit-learn/scikit-learn
|
sklearn/manifold/_t_sne.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/manifold/_t_sne.py
|
BSD-3-Clause
|
def _fit(self, X, skip_num_points=0):
"""Private function to fit the model using X as training data."""
if isinstance(self.init, str) and self.init == "pca" and issparse(X):
raise TypeError(
"PCA initialization is currently not supported "
"with the sparse input matrix. Use "
'init="random" instead.'
)
if self.learning_rate == "auto":
# See issue #18018
self.learning_rate_ = X.shape[0] / self.early_exaggeration / 4
self.learning_rate_ = np.maximum(self.learning_rate_, 50)
else:
self.learning_rate_ = self.learning_rate
if self.method == "barnes_hut":
X = validate_data(
self,
X,
accept_sparse=["csr"],
ensure_min_samples=2,
dtype=[np.float32, np.float64],
)
else:
X = validate_data(
self,
X,
accept_sparse=["csr", "csc", "coo"],
dtype=[np.float32, np.float64],
)
if self.metric == "precomputed":
if isinstance(self.init, str) and self.init == "pca":
raise ValueError(
'The parameter init="pca" cannot be used with metric="precomputed".'
)
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
check_non_negative(
X,
(
"TSNE.fit(). With metric='precomputed', X "
"should contain positive distances."
),
)
if self.method == "exact" and issparse(X):
raise TypeError(
'TSNE with method="exact" does not accept sparse '
'precomputed distance matrix. Use method="barnes_hut" '
"or provide the dense distance matrix."
)
if self.method == "barnes_hut" and self.n_components > 3:
raise ValueError(
"'n_components' should be inferior to 4 for the "
"barnes_hut algorithm as it relies on "
"quad-tree or oct-tree."
)
random_state = check_random_state(self.random_state)
n_samples = X.shape[0]
neighbors_nn = None
if self.method == "exact":
# Retrieve the distance matrix, either using the precomputed one or
# computing it.
if self.metric == "precomputed":
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
# Euclidean is squared here, rather than using **= 2,
# because euclidean_distances already calculates
# squared distances, and returns np.sqrt(dist) for
# squared=False.
# Also, Euclidean is slower for n_jobs>1, so don't set here
distances = pairwise_distances(X, metric=self.metric, squared=True)
else:
metric_params_ = self.metric_params or {}
distances = pairwise_distances(
X, metric=self.metric, n_jobs=self.n_jobs, **metric_params_
)
if np.any(distances < 0):
raise ValueError(
"All distances should be positive, the metric given is not correct"
)
if self.metric != "euclidean":
distances **= 2
# compute the joint probability distribution for the input space
P = _joint_probabilities(distances, self.perplexity, self.verbose)
assert np.all(np.isfinite(P)), "All probabilities should be finite"
assert np.all(P >= 0), "All probabilities should be non-negative"
assert np.all(P <= 1), (
"All probabilities should be less or then equal to one"
)
else:
# Compute the number of nearest neighbors to find.
# LvdM uses 3 * perplexity as the number of neighbors.
# In the event that we have very small # of points
# set the neighbors to n - 1.
n_neighbors = min(n_samples - 1, int(3.0 * self.perplexity + 1))
if self.verbose:
print("[t-SNE] Computing {} nearest neighbors...".format(n_neighbors))
# Find the nearest neighbors for every point
knn = NearestNeighbors(
algorithm="auto",
n_jobs=self.n_jobs,
n_neighbors=n_neighbors,
metric=self.metric,
metric_params=self.metric_params,
)
t0 = time()
knn.fit(X)
duration = time() - t0
if self.verbose:
print(
"[t-SNE] Indexed {} samples in {:.3f}s...".format(
n_samples, duration
)
)
t0 = time()
distances_nn = knn.kneighbors_graph(mode="distance")
duration = time() - t0
if self.verbose:
print(
"[t-SNE] Computed neighbors for {} samples in {:.3f}s...".format(
n_samples, duration
)
)
# Free the memory used by the ball_tree
del knn
# knn return the euclidean distance but we need it squared
# to be consistent with the 'exact' method. Note that the
# the method was derived using the euclidean method as in the
# input space. Not sure of the implication of using a different
# metric.
distances_nn.data **= 2
# compute the joint probability distribution for the input space
P = _joint_probabilities_nn(distances_nn, self.perplexity, self.verbose)
if isinstance(self.init, np.ndarray):
X_embedded = self.init
elif self.init == "pca":
pca = PCA(
n_components=self.n_components,
svd_solver="randomized",
random_state=random_state,
)
# Always output a numpy array, no matter what is configured globally
pca.set_output(transform="default")
X_embedded = pca.fit_transform(X).astype(np.float32, copy=False)
# PCA is rescaled so that PC1 has standard deviation 1e-4 which is
# the default value for random initialization. See issue #18018.
X_embedded = X_embedded / np.std(X_embedded[:, 0]) * 1e-4
elif self.init == "random":
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
X_embedded = 1e-4 * random_state.standard_normal(
size=(n_samples, self.n_components)
).astype(np.float32)
# Degrees of freedom of the Student's t-distribution. The suggestion
# degrees_of_freedom = n_components - 1 comes from
# "Learning a Parametric Embedding by Preserving Local Structure"
# Laurens van der Maaten, 2009.
degrees_of_freedom = max(self.n_components - 1, 1)
return self._tsne(
P,
degrees_of_freedom,
n_samples,
X_embedded=X_embedded,
neighbors=neighbors_nn,
skip_num_points=skip_num_points,
)
|
Private function to fit the model using X as training data.
|
_fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/manifold/_t_sne.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/manifold/_t_sne.py
|
BSD-3-Clause
|
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed output.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'. If the method is 'barnes_hut' and the metric is
'precomputed', X may be a precomputed sparse graph.
y : None
Ignored.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
self._check_params_vs_input(X)
embedding = self._fit(X)
self.embedding_ = embedding
return self.embedding_
|
Fit X into an embedded space and return that transformed output.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'. If the method is 'barnes_hut' and the metric is
'precomputed', X may be a precomputed sparse graph.
y : None
Ignored.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
|
fit_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/manifold/_t_sne.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/manifold/_t_sne.py
|
BSD-3-Clause
|
def test_isomap_fitted_attributes_dtype(global_dtype):
"""Check that the fitted attributes are stored accordingly to the
data type of X."""
iso = manifold.Isomap(n_neighbors=2)
X = np.array([[1, 2], [3, 4], [5, 6]], dtype=global_dtype)
iso.fit(X)
assert iso.dist_matrix_.dtype == global_dtype
assert iso.embedding_.dtype == global_dtype
|
Check that the fitted attributes are stored accordingly to the
data type of X.
|
test_isomap_fitted_attributes_dtype
|
python
|
scikit-learn/scikit-learn
|
sklearn/manifold/tests/test_isomap.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/manifold/tests/test_isomap.py
|
BSD-3-Clause
|
def test_isomap_dtype_equivalence():
"""Check the equivalence of the results with 32 and 64 bits input."""
iso_32 = manifold.Isomap(n_neighbors=2)
X_32 = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32)
iso_32.fit(X_32)
iso_64 = manifold.Isomap(n_neighbors=2)
X_64 = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float64)
iso_64.fit(X_64)
assert_allclose(iso_32.dist_matrix_, iso_64.dist_matrix_)
|
Check the equivalence of the results with 32 and 64 bits input.
|
test_isomap_dtype_equivalence
|
python
|
scikit-learn/scikit-learn
|
sklearn/manifold/tests/test_isomap.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/manifold/tests/test_isomap.py
|
BSD-3-Clause
|
def test_normed_stress(k):
"""Test that non-metric MDS normalized stress is scale-invariant."""
sim = np.array([[0, 5, 3, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]])
X1, stress1 = mds.smacof(sim, metric=False, max_iter=5, random_state=0)
X2, stress2 = mds.smacof(k * sim, metric=False, max_iter=5, random_state=0)
assert_allclose(stress1, stress2, rtol=1e-5)
assert_allclose(X1, X2, rtol=1e-5)
|
Test that non-metric MDS normalized stress is scale-invariant.
|
test_normed_stress
|
python
|
scikit-learn/scikit-learn
|
sklearn/manifold/tests/test_mds.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/manifold/tests/test_mds.py
|
BSD-3-Clause
|
def _assert_equal_with_sign_flipping(A, B, tol=0.0):
"""Check array A and B are equal with possible sign flipping on
each column"""
tol_squared = tol**2
for A_col, B_col in zip(A.T, B.T):
assert (
np.max((A_col - B_col) ** 2) <= tol_squared
or np.max((A_col + B_col) ** 2) <= tol_squared
)
|
Check array A and B are equal with possible sign flipping on
each column
|
_assert_equal_with_sign_flipping
|
python
|
scikit-learn/scikit-learn
|
sklearn/manifold/tests/test_spectral_embedding.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/manifold/tests/test_spectral_embedding.py
|
BSD-3-Clause
|
def test_spectral_embedding_preserves_dtype(eigen_solver, dtype):
"""Check that `SpectralEmbedding is preserving the dtype of the fitted
attribute and transformed data.
Ideally, this test should be covered by the common test
`check_transformer_preserve_dtypes`. However, this test only run
with transformers implementing `transform` while `SpectralEmbedding`
implements only `fit_transform`.
"""
X = S.astype(dtype)
se = SpectralEmbedding(
n_components=2, affinity="rbf", eigen_solver=eigen_solver, random_state=0
)
X_trans = se.fit_transform(X)
assert X_trans.dtype == dtype
assert se.embedding_.dtype == dtype
assert se.affinity_matrix_.dtype == dtype
|
Check that `SpectralEmbedding is preserving the dtype of the fitted
attribute and transformed data.
Ideally, this test should be covered by the common test
`check_transformer_preserve_dtypes`. However, this test only run
with transformers implementing `transform` while `SpectralEmbedding`
implements only `fit_transform`.
|
test_spectral_embedding_preserves_dtype
|
python
|
scikit-learn/scikit-learn
|
sklearn/manifold/tests/test_spectral_embedding.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/manifold/tests/test_spectral_embedding.py
|
BSD-3-Clause
|
def test_spectral_eigen_tol_auto(monkeypatch, solver, csr_container):
"""Test that `eigen_tol="auto"` is resolved correctly"""
if solver == "amg" and not pyamg_available:
pytest.skip("PyAMG is not available.")
X, _ = make_blobs(
n_samples=200, random_state=0, centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
solver_func = eigsh if solver == "arpack" else lobpcg
default_value = 0 if solver == "arpack" else None
if solver == "amg":
S = csr_container(S)
mocked_solver = Mock(side_effect=solver_func)
monkeypatch.setattr(_spectral_embedding, solver_func.__qualname__, mocked_solver)
spectral_embedding(S, random_state=42, eigen_solver=solver, eigen_tol="auto")
mocked_solver.assert_called()
_, kwargs = mocked_solver.call_args
assert kwargs["tol"] == default_value
|
Test that `eigen_tol="auto"` is resolved correctly
|
test_spectral_eigen_tol_auto
|
python
|
scikit-learn/scikit-learn
|
sklearn/manifold/tests/test_spectral_embedding.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/manifold/tests/test_spectral_embedding.py
|
BSD-3-Clause
|
def test_trustworthiness_n_neighbors_error():
"""Raise an error when n_neighbors >= n_samples / 2.
Non-regression test for #18567.
"""
regex = "n_neighbors .+ should be less than .+"
rng = np.random.RandomState(42)
X = rng.rand(7, 4)
X_embedded = rng.rand(7, 2)
with pytest.raises(ValueError, match=regex):
trustworthiness(X, X_embedded, n_neighbors=5)
trust = trustworthiness(X, X_embedded, n_neighbors=3)
assert 0 <= trust <= 1
|
Raise an error when n_neighbors >= n_samples / 2.
Non-regression test for #18567.
|
test_trustworthiness_n_neighbors_error
|
python
|
scikit-learn/scikit-learn
|
sklearn/manifold/tests/test_t_sne.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/manifold/tests/test_t_sne.py
|
BSD-3-Clause
|
def test_optimization_minimizes_kl_divergence():
"""t-SNE should give a lower KL divergence with more iterations."""
random_state = check_random_state(0)
X, _ = make_blobs(n_features=3, random_state=random_state)
kl_divergences = []
for max_iter in [250, 300, 350]:
tsne = TSNE(
n_components=2,
init="random",
perplexity=10,
learning_rate=100.0,
max_iter=max_iter,
random_state=0,
)
tsne.fit_transform(X)
kl_divergences.append(tsne.kl_divergence_)
assert kl_divergences[1] <= kl_divergences[0]
assert kl_divergences[2] <= kl_divergences[1]
|
t-SNE should give a lower KL divergence with more iterations.
|
test_optimization_minimizes_kl_divergence
|
python
|
scikit-learn/scikit-learn
|
sklearn/manifold/tests/test_t_sne.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/manifold/tests/test_t_sne.py
|
BSD-3-Clause
|
def test_sparse_precomputed_distance(sparse_container):
"""Make sure that TSNE works identically for sparse and dense matrix"""
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D_sparse = kneighbors_graph(X, n_neighbors=100, mode="distance", include_self=True)
D = pairwise_distances(X)
assert sp.issparse(D_sparse)
assert_almost_equal(D_sparse.toarray(), D)
tsne = TSNE(
metric="precomputed", random_state=0, init="random", learning_rate="auto"
)
Xt_dense = tsne.fit_transform(D)
Xt_sparse = tsne.fit_transform(sparse_container(D_sparse))
assert_almost_equal(Xt_dense, Xt_sparse)
|
Make sure that TSNE works identically for sparse and dense matrix
|
test_sparse_precomputed_distance
|
python
|
scikit-learn/scikit-learn
|
sklearn/manifold/tests/test_t_sne.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/manifold/tests/test_t_sne.py
|
BSD-3-Clause
|
def test_uniform_grid(method):
"""Make sure that TSNE can approximately recover a uniform 2D grid
Due to ties in distances between point in X_2d_grid, this test is platform
dependent for ``method='barnes_hut'`` due to numerical imprecision.
Also, t-SNE is not assured to converge to the right solution because bad
initialization can lead to convergence to bad local minimum (the
optimization problem is non-convex). To avoid breaking the test too often,
we re-run t-SNE from the final point when the convergence is not good
enough.
"""
seeds = range(3)
max_iter = 500
for seed in seeds:
tsne = TSNE(
n_components=2,
init="random",
random_state=seed,
perplexity=50,
max_iter=max_iter,
method=method,
learning_rate="auto",
)
Y = tsne.fit_transform(X_2d_grid)
try_name = "{}_{}".format(method, seed)
try:
assert_uniform_grid(Y, try_name)
except AssertionError:
# If the test fails a first time, re-run with init=Y to see if
# this was caused by a bad initialization. Note that this will
# also run an early_exaggeration step.
try_name += ":rerun"
tsne.init = Y
Y = tsne.fit_transform(X_2d_grid)
assert_uniform_grid(Y, try_name)
|
Make sure that TSNE can approximately recover a uniform 2D grid
Due to ties in distances between point in X_2d_grid, this test is platform
dependent for ``method='barnes_hut'`` due to numerical imprecision.
Also, t-SNE is not assured to converge to the right solution because bad
initialization can lead to convergence to bad local minimum (the
optimization problem is non-convex). To avoid breaking the test too often,
we re-run t-SNE from the final point when the convergence is not good
enough.
|
test_uniform_grid
|
python
|
scikit-learn/scikit-learn
|
sklearn/manifold/tests/test_t_sne.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/manifold/tests/test_t_sne.py
|
BSD-3-Clause
|
def test_tsne_with_different_distance_metrics(metric, dist_func, method):
"""Make sure that TSNE works for different distance metrics"""
if method == "barnes_hut" and metric == "manhattan":
# The distances computed by `manhattan_distances` differ slightly from those
# computed internally by NearestNeighbors via the PairwiseDistancesReduction
# Cython code-based. This in turns causes T-SNE to converge to a different
# solution but this should not impact the qualitative results as both
# methods.
# NOTE: it's probably not valid from a mathematical point of view to use the
# Manhattan distance for T-SNE...
# TODO: re-enable this test if/when `manhattan_distances` is refactored to
# reuse the same underlying Cython code NearestNeighbors.
# For reference, see:
# https://github.com/scikit-learn/scikit-learn/pull/23865/files#r925721573
pytest.xfail(
"Distance computations are different for method == 'barnes_hut' and metric"
" == 'manhattan', but this is expected."
)
random_state = check_random_state(0)
n_components_original = 3
n_components_embedding = 2
X = random_state.randn(50, n_components_original).astype(np.float32)
X_transformed_tsne = TSNE(
metric=metric,
method=method,
n_components=n_components_embedding,
random_state=0,
max_iter=300,
init="random",
learning_rate="auto",
).fit_transform(X)
X_transformed_tsne_precomputed = TSNE(
metric="precomputed",
method=method,
n_components=n_components_embedding,
random_state=0,
max_iter=300,
init="random",
learning_rate="auto",
).fit_transform(dist_func(X))
assert_array_equal(X_transformed_tsne, X_transformed_tsne_precomputed)
|
Make sure that TSNE works for different distance metrics
|
test_tsne_with_different_distance_metrics
|
python
|
scikit-learn/scikit-learn
|
sklearn/manifold/tests/test_t_sne.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/manifold/tests/test_t_sne.py
|
BSD-3-Clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.