code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def test_randomized_eigsh_reconst_low_rank(n, rank): """Check that randomized_eigsh is able to reconstruct a low rank psd matrix Tests that the decomposition provided by `_randomized_eigsh` leads to orthonormal eigenvectors, and that a low rank PSD matrix can be effectively reconstructed with good accuracy using it. """ assert rank < n # create a low rank PSD rng = np.random.RandomState(69) X = rng.randn(n, rank) A = X @ X.T # approximate A with the "right" number of components S, V = _randomized_eigsh(A, n_components=rank, random_state=rng) # orthonormality checks assert_array_almost_equal(np.linalg.norm(V, axis=0), np.ones(S.shape)) assert_array_almost_equal(V.T @ V, np.diag(np.ones(S.shape))) # reconstruction A_reconstruct = V @ np.diag(S) @ V.T # test that the approximation is good assert_array_almost_equal(A_reconstruct, A, decimal=6)
Check that randomized_eigsh is able to reconstruct a low rank psd matrix Tests that the decomposition provided by `_randomized_eigsh` leads to orthonormal eigenvectors, and that a low rank PSD matrix can be effectively reconstructed with good accuracy using it.
test_randomized_eigsh_reconst_low_rank
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_extmath.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_extmath.py
BSD-3-Clause
def max_loading_is_positive(u, v): """ returns bool tuple indicating if the values maximising np.abs are positive across all rows for u and across all columns for v. """ u_based = (np.abs(u).max(axis=0) == u.max(axis=0)).all() v_based = (np.abs(v).max(axis=1) == v.max(axis=1)).all() return u_based, v_based
returns bool tuple indicating if the values maximising np.abs are positive across all rows for u and across all columns for v.
max_loading_is_positive
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_extmath.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_extmath.py
BSD-3-Clause
def test_cartesian_mix_types(arrays, output_dtype): """Check that the cartesian product works with mixed types.""" output = cartesian(arrays) assert output.dtype == output_dtype
Check that the cartesian product works with mixed types.
test_cartesian_mix_types
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_extmath.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_extmath.py
BSD-3-Clause
def test_approximate_mode(): """Make sure sklearn.utils.extmath._approximate_mode returns valid results for cases where "class_counts * n_draws" is enough to overflow 32-bit signed integer. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/20774 """ X = np.array([99000, 1000], dtype=np.int32) ret = _approximate_mode(class_counts=X, n_draws=25000, rng=0) # Draws 25% of the total population, so in this case a fair draw means: # 25% * 99.000 = 24.750 # 25% * 1.000 = 250 assert_array_equal(ret, [24750, 250])
Make sure sklearn.utils.extmath._approximate_mode returns valid results for cases where "class_counts * n_draws" is enough to overflow 32-bit signed integer. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/20774
test_approximate_mode
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_extmath.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_extmath.py
BSD-3-Clause
def test_smallest_admissible_index_dtype_without_checking_contents( params, expected_dtype ): """Check the behaviour of `smallest_admissible_index_dtype` using the passed arrays but without checking the contents of the arrays. """ assert _smallest_admissible_index_dtype(**params) == expected_dtype
Check the behaviour of `smallest_admissible_index_dtype` using the passed arrays but without checking the contents of the arrays.
test_smallest_admissible_index_dtype_without_checking_contents
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_fixes.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_fixes.py
BSD-3-Clause
def test_safe_indexing_list_axis_1_unsupported(indices): """Check that we raise a ValueError when axis=1 with input as list.""" X = [[1, 2], [4, 5], [7, 8]] err_msg = "axis=1 is not supported for lists" with pytest.raises(ValueError, match=err_msg): _safe_indexing(X, indices, axis=1)
Check that we raise a ValueError when axis=1 with input as list.
test_safe_indexing_list_axis_1_unsupported
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_indexing.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_indexing.py
BSD-3-Clause
def test_get_column_indices_interchange(): """Check _get_column_indices for edge cases with the interchange""" pl = pytest.importorskip("polars") # Polars dataframes go down the interchange path. df = pl.DataFrame([[1, 2, 3], [4, 5, 6]], schema=["a", "b", "c"]) key_results = [ (slice(1, None), [1, 2]), (slice(None, 2), [0, 1]), (slice(1, 2), [1]), (["b", "c"], [1, 2]), (slice("a", "b"), [0, 1]), (slice("a", None), [0, 1, 2]), (slice(None, "a"), [0]), (["c", "a"], [2, 0]), ([], []), ] for key, result in key_results: assert _get_column_indices(df, key) == result msg = "A given column is not a column of the dataframe" with pytest.raises(ValueError, match=msg): _get_column_indices(df, ["not_a_column"]) msg = "key.step must be 1 or None" with pytest.raises(NotImplementedError, match=msg): _get_column_indices(df, slice("a", None, 2))
Check _get_column_indices for edge cases with the interchange
test_get_column_indices_interchange
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_indexing.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_indexing.py
BSD-3-Clause
def test_available_if_methods_can_be_pickled(): """Check that available_if methods can be pickled. Non-regression test for #21344. """ return_value = 10 est = AvailableParameterEstimator(available=True, return_value=return_value) pickled_bytes = pickle.dumps(est.available_func) unpickled_func = pickle.loads(pickled_bytes) assert unpickled_func() == return_value
Check that available_if methods can be pickled. Non-regression test for #21344.
test_available_if_methods_can_be_pickled
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_metaestimators.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_metaestimators.py
BSD-3-Clause
def test_type_of_target_too_many_unique_classes(): """Check that we raise a warning when the number of unique classes is greater than 50% of the number of samples. We need to check that we don't raise if we have less than 20 samples. """ y = np.arange(25) msg = r"The number of unique classes is greater than 50% of the number of samples." with pytest.warns(UserWarning, match=msg): type_of_target(y) # less than 20 samples, no warning should be raised y = np.arange(10) with warnings.catch_warnings(): warnings.simplefilter("error") type_of_target(y)
Check that we raise a warning when the number of unique classes is greater than 50% of the number of samples. We need to check that we don't raise if we have less than 20 samples.
test_type_of_target_too_many_unique_classes
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_multiclass.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_multiclass.py
BSD-3-Clause
def test_type_of_target_pandas_nullable(): """Check that type_of_target works with pandas nullable dtypes.""" pd = pytest.importorskip("pandas") for dtype in ["Int32", "Float32"]: y_true = pd.Series([1, 0, 2, 3, 4], dtype=dtype) assert type_of_target(y_true) == "multiclass" y_true = pd.Series([1, 0, 1, 0], dtype=dtype) assert type_of_target(y_true) == "binary" y_true = pd.DataFrame([[1.4, 3.1], [3.1, 1.4]], dtype="Float32") assert type_of_target(y_true) == "continuous-multioutput" y_true = pd.DataFrame([[0, 1], [1, 1]], dtype="Int32") assert type_of_target(y_true) == "multilabel-indicator" y_true = pd.DataFrame([[1, 2], [3, 1]], dtype="Int32") assert type_of_target(y_true) == "multiclass-multioutput"
Check that type_of_target works with pandas nullable dtypes.
test_type_of_target_pandas_nullable
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_multiclass.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_multiclass.py
BSD-3-Clause
def test_unique_labels_pandas_nullable(dtype): """Checks that unique_labels work with pandas nullable dtypes. Non-regression test for gh-25634. """ pd = pytest.importorskip("pandas") y_true = pd.Series([1, 0, 0, 1, 0, 1, 1, 0, 1], dtype=dtype) y_predicted = pd.Series([0, 0, 1, 1, 0, 1, 1, 1, 1], dtype="int64") labels = unique_labels(y_true, y_predicted) assert_array_equal(labels, [0, 1])
Checks that unique_labels work with pandas nullable dtypes. Non-regression test for gh-25634.
test_unique_labels_pandas_nullable
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_multiclass.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_multiclass.py
BSD-3-Clause
def test_newton_cg_verbosity(capsys, verbose): """Test the std output of verbose newton_cg solver.""" A = np.eye(2) b = np.array([1, 2], dtype=float) _newton_cg( grad_hess=lambda x: (A @ x - b, lambda z: A @ z), func=lambda x: 0.5 * x @ A @ x - b @ x, grad=lambda x: A @ x - b, x0=np.zeros(A.shape[0]), verbose=verbose, ) # returns array([1., 2]) captured = capsys.readouterr() if verbose == 0: assert captured.out == "" else: msg = [ "Newton-CG iter = 1", "Check Convergence", "max |gradient|", "Solver did converge at loss = ", ] for m in msg: assert m in captured.out if verbose >= 2: msg = [ "Inner CG solver iteration 1 stopped with", "sum(|residuals|) <= tol", "Line Search", "try line search wolfe1", "wolfe1 line search was successful", ] for m in msg: assert m in captured.out if verbose >= 2: # Set up a badly scaled singular Hessian with a completely wrong starting # position. This should trigger 2nd line search check A = np.array([[1.0, 2], [2, 4]]) * 1e30 # collinear columns b = np.array([1.0, 2.0]) # Note that scipy.optimize._linesearch LineSearchWarning inherits from # RuntimeWarning, but we do not want to import from non public APIs. with pytest.warns(RuntimeWarning): _newton_cg( grad_hess=lambda x: (A @ x - b, lambda z: A @ z), func=lambda x: 0.5 * x @ A @ x - b @ x, grad=lambda x: A @ x - b, x0=np.array([-2.0, 1]), # null space of hessian verbose=verbose, ) captured = capsys.readouterr() msg = [ "wolfe1 line search was not successful", "check loss |improvement| <= eps * |loss_old|:", "check sum(|gradient|) < sum(|gradient_old|):", "last resort: try line search wolfe2", ] for m in msg: assert m in captured.out # Set up a badly conditioned Hessian that leads to tiny curvature. # X.T @ X have singular values array([1.00000400e+01, 1.00008192e-11]) A = np.array([[1.0, 2], [1, 2 + 1e-15]]) b = np.array([-2.0, 1]) with pytest.warns(ConvergenceWarning): _newton_cg( grad_hess=lambda x: (A @ x - b, lambda z: A @ z), func=lambda x: 0.5 * x @ A @ x - b @ x, grad=lambda x: A @ x - b, x0=b, verbose=verbose, maxiter=2, ) captured = capsys.readouterr() msg = [ "tiny_|p| = eps * ||p||^2", ] for m in msg: assert m in captured.out # Test for a case with negative Hessian. # We do not trigger "Inner CG solver iteration {i} stopped with negative # curvature", but that is very hard to trigger. A = np.eye(2) b = np.array([-2.0, 1]) with pytest.warns(RuntimeWarning): _newton_cg( # Note the wrong sign in the hessian product. grad_hess=lambda x: (A @ x - b, lambda z: -A @ z), func=lambda x: 0.5 * x @ A @ x - b @ x, grad=lambda x: A @ x - b, x0=np.array([1.0, 1.0]), verbose=verbose, maxiter=3, ) captured = capsys.readouterr() msg = [ "Inner CG solver iteration 0 fell back to steepest descent", ] for m in msg: assert m in captured.out A = np.diag([1e-3, 1, 1e3]) b = np.array([-2.0, 1, 2.0]) with pytest.warns(ConvergenceWarning): _newton_cg( grad_hess=lambda x: (A @ x - b, lambda z: A @ z), func=lambda x: 0.5 * x @ A @ x - b @ x, grad=lambda x: A @ x - b, x0=np.ones_like(b), verbose=verbose, maxiter=2, maxinner=1, ) captured = capsys.readouterr() msg = [ "Inner CG solver stopped reaching maxiter=1", ] for m in msg: assert m in captured.out
Test the std output of verbose newton_cg solver.
test_newton_cg_verbosity
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_optimize.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_optimize.py
BSD-3-Clause
def test_parallel_delayed_warnings(): """Informative warnings should be raised when mixing sklearn and joblib API""" # We should issue a warning when one wants to use sklearn.utils.fixes.Parallel # with joblib.delayed. The config will not be propagated to the workers. warn_msg = "`sklearn.utils.parallel.Parallel` needs to be used in conjunction" with pytest.warns(UserWarning, match=warn_msg) as records: Parallel()(joblib.delayed(time.sleep)(0) for _ in range(10)) assert len(records) == 10 # We should issue a warning if one wants to use sklearn.utils.fixes.delayed with # joblib.Parallel warn_msg = ( "`sklearn.utils.parallel.delayed` should be used with " "`sklearn.utils.parallel.Parallel` to make it possible to propagate" ) with pytest.warns(UserWarning, match=warn_msg) as records: joblib.Parallel()(delayed(time.sleep)(0) for _ in range(10)) assert len(records) == 10
Informative warnings should be raised when mixing sklearn and joblib API
test_parallel_delayed_warnings
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_parallel.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_parallel.py
BSD-3-Clause
def test_dispatch_config_parallel(n_jobs): """Check that we properly dispatch the configuration in parallel processing. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/25239 """ pd = pytest.importorskip("pandas") iris = load_iris(as_frame=True) class TransformerRequiredDataFrame(StandardScaler): def fit(self, X, y=None): assert isinstance(X, pd.DataFrame), "X should be a DataFrame" return super().fit(X, y) def transform(self, X, y=None): assert isinstance(X, pd.DataFrame), "X should be a DataFrame" return super().transform(X, y) dropper = make_column_transformer( ("drop", [0]), remainder="passthrough", n_jobs=n_jobs, ) param_grid = {"randomforestclassifier__max_depth": [1, 2, 3]} search_cv = GridSearchCV( make_pipeline( dropper, TransformerRequiredDataFrame(), RandomForestClassifier(n_estimators=5, n_jobs=n_jobs), ), param_grid, cv=5, n_jobs=n_jobs, error_score="raise", # this search should not fail ) # make sure that `fit` would fail in case we don't request dataframe with pytest.raises(AssertionError, match="X should be a DataFrame"): search_cv.fit(iris.data, iris.target) with config_context(transform_output="pandas"): # we expect each intermediate steps to output a DataFrame search_cv.fit(iris.data, iris.target) assert not np.isnan(search_cv.cv_results_["mean_test_score"]).any()
Check that we properly dispatch the configuration in parallel processing. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/25239
test_dispatch_config_parallel
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_parallel.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_parallel.py
BSD-3-Clause
def test_filter_warning_propagates(n_jobs, backend): """Check warning propagates to the job.""" with warnings.catch_warnings(): warnings.simplefilter("error", category=ConvergenceWarning) with pytest.raises(ConvergenceWarning): Parallel(n_jobs=n_jobs, backend=backend)( delayed(raise_warning)() for _ in range(2) )
Check warning propagates to the job.
test_filter_warning_propagates
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_parallel.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_parallel.py
BSD-3-Clause
def test_check_warnings_threading(): """Check that warnings filters are set correctly in the threading backend.""" with warnings.catch_warnings(): warnings.simplefilter("error", category=ConvergenceWarning) filters = warnings.filters assert ("error", None, ConvergenceWarning, None, 0) in filters all_warnings = Parallel(n_jobs=2, backend="threading")( delayed(get_warnings)() for _ in range(2) ) assert all(w == filters for w in all_warnings)
Check that warnings filters are set correctly in the threading backend.
test_check_warnings_threading
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_parallel.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_parallel.py
BSD-3-Clause
def test_interval_range(interval_type): """Check the range of values depending on closed.""" interval = Interval(interval_type, -2, 2, closed="left") assert -2 in interval assert 2 not in interval interval = Interval(interval_type, -2, 2, closed="right") assert -2 not in interval assert 2 in interval interval = Interval(interval_type, -2, 2, closed="both") assert -2 in interval assert 2 in interval interval = Interval(interval_type, -2, 2, closed="neither") assert -2 not in interval assert 2 not in interval
Check the range of values depending on closed.
test_interval_range
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_param_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_param_validation.py
BSD-3-Clause
def test_interval_large_integers(interval_type): """Check that Interval constraint work with large integers. non-regression test for #26648. """ interval = Interval(interval_type, 0, 2, closed="neither") assert 2**65 not in interval assert 2**128 not in interval assert float(2**65) not in interval assert float(2**128) not in interval interval = Interval(interval_type, 0, 2**128, closed="neither") assert 2**65 in interval assert 2**128 not in interval assert float(2**65) in interval assert float(2**128) not in interval assert 2**1024 not in interval
Check that Interval constraint work with large integers. non-regression test for #26648.
test_interval_large_integers
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_param_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_param_validation.py
BSD-3-Clause
def test_interval_inf_in_bounds(): """Check that inf is included iff a bound is closed and set to None. Only valid for real intervals. """ interval = Interval(Real, 0, None, closed="right") assert np.inf in interval interval = Interval(Real, None, 0, closed="left") assert -np.inf in interval interval = Interval(Real, None, None, closed="neither") assert np.inf not in interval assert -np.inf not in interval
Check that inf is included iff a bound is closed and set to None. Only valid for real intervals.
test_interval_inf_in_bounds
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_param_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_param_validation.py
BSD-3-Clause
def test_stroptions(): """Sanity check for the StrOptions constraint""" options = StrOptions({"a", "b", "c"}, deprecated={"c"}) assert options.is_satisfied_by("a") assert options.is_satisfied_by("c") assert not options.is_satisfied_by("d") assert "'c' (deprecated)" in str(options)
Sanity check for the StrOptions constraint
test_stroptions
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_param_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_param_validation.py
BSD-3-Clause
def test_options(): """Sanity check for the Options constraint""" options = Options(Real, {-0.5, 0.5, np.inf}, deprecated={-0.5}) assert options.is_satisfied_by(-0.5) assert options.is_satisfied_by(np.inf) assert not options.is_satisfied_by(1.23) assert "-0.5 (deprecated)" in str(options)
Sanity check for the Options constraint
test_options
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_param_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_param_validation.py
BSD-3-Clause
def test_instances_of_type_human_readable(type, expected_type_name): """Check the string representation of the _InstancesOf constraint.""" constraint = _InstancesOf(type) assert str(constraint) == f"an instance of '{expected_type_name}'"
Check the string representation of the _InstancesOf constraint.
test_instances_of_type_human_readable
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_param_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_param_validation.py
BSD-3-Clause
def test_generate_invalid_param_val(constraint): """Check that the value generated does not satisfy the constraint""" bad_value = generate_invalid_param_val(constraint) assert not constraint.is_satisfied_by(bad_value)
Check that the value generated does not satisfy the constraint
test_generate_invalid_param_val
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_param_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_param_validation.py
BSD-3-Clause
def test_generate_invalid_param_val_2_intervals(integer_interval, real_interval): """Check that the value generated for an interval constraint does not satisfy any of the interval constraints. """ bad_value = generate_invalid_param_val(constraint=real_interval) assert not real_interval.is_satisfied_by(bad_value) assert not integer_interval.is_satisfied_by(bad_value) bad_value = generate_invalid_param_val(constraint=integer_interval) assert not real_interval.is_satisfied_by(bad_value) assert not integer_interval.is_satisfied_by(bad_value)
Check that the value generated for an interval constraint does not satisfy any of the interval constraints.
test_generate_invalid_param_val_2_intervals
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_param_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_param_validation.py
BSD-3-Clause
def test_make_constraint(constraint_declaration, expected_constraint_class): """Check that make_constraint dispatches to the appropriate constraint class""" constraint = make_constraint(constraint_declaration) assert constraint.__class__ is expected_constraint_class
Check that make_constraint dispatches to the appropriate constraint class
test_make_constraint
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_param_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_param_validation.py
BSD-3-Clause
def test_make_constraint_unknown(): """Check that an informative error is raised when an unknown constraint is passed""" with pytest.raises(ValueError, match="Unknown constraint"): make_constraint("not a valid constraint")
Check that an informative error is raised when an unknown constraint is passed
test_make_constraint_unknown
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_param_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_param_validation.py
BSD-3-Clause
def test_validate_params(): """Check that validate_params works no matter how the arguments are passed""" with pytest.raises( InvalidParameterError, match="The 'a' parameter of _func must be" ): _func("wrong", c=1) with pytest.raises( InvalidParameterError, match="The 'b' parameter of _func must be" ): _func(*[1, "wrong"], c=1) with pytest.raises( InvalidParameterError, match="The 'c' parameter of _func must be" ): _func(1, **{"c": "wrong"}) with pytest.raises( InvalidParameterError, match="The 'd' parameter of _func must be" ): _func(1, c=1, d="wrong") # check in the presence of extra positional and keyword args with pytest.raises( InvalidParameterError, match="The 'b' parameter of _func must be" ): _func(0, *["wrong", 2, 3], c=4, **{"e": 5}) with pytest.raises( InvalidParameterError, match="The 'c' parameter of _func must be" ): _func(0, *[1, 2, 3], c="four", **{"e": 5})
Check that validate_params works no matter how the arguments are passed
test_validate_params
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_param_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_param_validation.py
BSD-3-Clause
def test_validate_params_missing_params(): """Check that no error is raised when there are parameters without constraints """ @validate_params({"a": [int]}, prefer_skip_nested_validation=True) def func(a, b): pass func(1, 2)
Check that no error is raised when there are parameters without constraints
test_validate_params_missing_params
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_param_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_param_validation.py
BSD-3-Clause
def test_decorate_validated_function(): """Check that validate_params functions can be decorated""" decorated_function = deprecated()(_func) with pytest.warns(FutureWarning, match="Function _func is deprecated"): decorated_function(1, 2, c=3) # outer decorator does not interfere with validation with pytest.warns(FutureWarning, match="Function _func is deprecated"): with pytest.raises( InvalidParameterError, match=r"The 'c' parameter of _func must be" ): decorated_function(1, 2, c="wrong")
Check that validate_params functions can be decorated
test_decorate_validated_function
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_param_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_param_validation.py
BSD-3-Clause
def test_validate_params_estimator(): """Check that validate_params works with Estimator instances""" # no validation in init est = _Estimator("wrong") with pytest.raises( InvalidParameterError, match="The 'a' parameter of _Estimator must be" ): est.fit()
Check that validate_params works with Estimator instances
test_validate_params_estimator
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_param_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_param_validation.py
BSD-3-Clause
def test_stroptions_deprecated_subset(): """Check that the deprecated parameter must be a subset of options.""" with pytest.raises(ValueError, match="deprecated options must be a subset"): StrOptions({"a", "b", "c"}, deprecated={"a", "d"})
Check that the deprecated parameter must be a subset of options.
test_stroptions_deprecated_subset
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_param_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_param_validation.py
BSD-3-Clause
def test_hidden_constraint(): """Check that internal constraints are not exposed in the error message.""" @validate_params( {"param": [Hidden(list), dict]}, prefer_skip_nested_validation=True ) def f(param): pass # list and dict are valid params f({"a": 1, "b": 2, "c": 3}) f([1, 2, 3]) with pytest.raises( InvalidParameterError, match="The 'param' parameter" ) as exc_info: f(param="bad") # the list option is not exposed in the error message err_msg = str(exc_info.value) assert "an instance of 'dict'" in err_msg assert "an instance of 'list'" not in err_msg
Check that internal constraints are not exposed in the error message.
test_hidden_constraint
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_param_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_param_validation.py
BSD-3-Clause
def test_hidden_stroptions(): """Check that we can have 2 StrOptions constraints, one being hidden.""" @validate_params( {"param": [StrOptions({"auto"}), Hidden(StrOptions({"warn"}))]}, prefer_skip_nested_validation=True, ) def f(param): pass # "auto" and "warn" are valid params f("auto") f("warn") with pytest.raises( InvalidParameterError, match="The 'param' parameter" ) as exc_info: f(param="bad") # the "warn" option is not exposed in the error message err_msg = str(exc_info.value) assert "auto" in err_msg assert "warn" not in err_msg
Check that we can have 2 StrOptions constraints, one being hidden.
test_hidden_stroptions
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_param_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_param_validation.py
BSD-3-Clause
def test_validate_params_set_param_constraints_attribute(): """Check that the validate_params decorator properly sets the parameter constraints as attribute of the decorated function/method. """ assert hasattr(_func, "_skl_parameter_constraints") assert hasattr(_Class()._method, "_skl_parameter_constraints")
Check that the validate_params decorator properly sets the parameter constraints as attribute of the decorated function/method.
test_validate_params_set_param_constraints_attribute
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_param_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_param_validation.py
BSD-3-Clause
def test_boolean_constraint_deprecated_int(): """Check that validate_params raise a deprecation message but still passes validation when using an int for a parameter accepting a boolean. """ @validate_params({"param": ["boolean"]}, prefer_skip_nested_validation=True) def f(param): pass # True/False and np.bool_(True/False) are valid params f(True) f(np.bool_(False))
Check that validate_params raise a deprecation message but still passes validation when using an int for a parameter accepting a boolean.
test_boolean_constraint_deprecated_int
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_param_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_param_validation.py
BSD-3-Clause
def test_no_validation(): """Check that validation can be skipped for a parameter.""" @validate_params( {"param1": [int, None], "param2": "no_validation"}, prefer_skip_nested_validation=True, ) def f(param1=None, param2=None): pass # param1 is validated with pytest.raises(InvalidParameterError, match="The 'param1' parameter"): f(param1="wrong") # param2 is not validated: any type is valid. class SomeType: pass f(param2=SomeType) f(param2=SomeType())
Check that validation can be skipped for a parameter.
test_no_validation
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_param_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_param_validation.py
BSD-3-Clause
def test_pandas_na_constraint_with_pd_na(): """Add a specific test for checking support for `pandas.NA`.""" pd = pytest.importorskip("pandas") na_constraint = _PandasNAConstraint() assert na_constraint.is_satisfied_by(pd.NA) assert not na_constraint.is_satisfied_by(np.array([1, 2, 3]))
Add a specific test for checking support for `pandas.NA`.
test_pandas_na_constraint_with_pd_na
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_param_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_param_validation.py
BSD-3-Clause
def test_iterable_not_string(): """Check that a string does not satisfy the _IterableNotString constraint.""" constraint = _IterablesNotString() assert constraint.is_satisfied_by([1, 2, 3]) assert constraint.is_satisfied_by(range(10)) assert not constraint.is_satisfied_by("some string")
Check that a string does not satisfy the _IterableNotString constraint.
test_iterable_not_string
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_param_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_param_validation.py
BSD-3-Clause
def test_cv_objects(): """Check that the _CVObjects constraint accepts all current ways to pass cv objects.""" constraint = _CVObjects() assert constraint.is_satisfied_by(5) assert constraint.is_satisfied_by(LeaveOneOut()) assert constraint.is_satisfied_by([([1, 2], [3, 4]), ([3, 4], [1, 2])]) assert constraint.is_satisfied_by(None) assert not constraint.is_satisfied_by("not a CV object")
Check that the _CVObjects constraint accepts all current ways to pass cv objects.
test_cv_objects
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_param_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_param_validation.py
BSD-3-Clause
def test_third_party_estimator(): """Check that the validation from a scikit-learn estimator inherited by a third party estimator does not impose a match between the dict of constraints and the parameters of the estimator. """ class ThirdPartyEstimator(_Estimator): def __init__(self, b): self.b = b super().__init__(a=0) def fit(self, X=None, y=None): super().fit(X, y) # does not raise, even though "b" is not in the constraints dict and "a" is not # a parameter of the estimator. ThirdPartyEstimator(b=0).fit()
Check that the validation from a scikit-learn estimator inherited by a third party estimator does not impose a match between the dict of constraints and the parameters of the estimator.
test_third_party_estimator
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_param_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_param_validation.py
BSD-3-Clause
def test_interval_real_not_int(): """Check for the type RealNotInt in the Interval constraint.""" constraint = Interval(RealNotInt, 0, 1, closed="both") assert constraint.is_satisfied_by(1.0) assert not constraint.is_satisfied_by(1)
Check for the type RealNotInt in the Interval constraint.
test_interval_real_not_int
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_param_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_param_validation.py
BSD-3-Clause
def test_skip_param_validation(): """Check that param validation can be skipped using config_context.""" @validate_params({"a": [int]}, prefer_skip_nested_validation=True) def f(a): pass with pytest.raises(InvalidParameterError, match="The 'a' parameter"): f(a="1") # does not raise with config_context(skip_parameter_validation=True): f(a="1")
Check that param validation can be skipped using config_context.
test_skip_param_validation
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_param_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_param_validation.py
BSD-3-Clause
def test_skip_nested_validation(prefer_skip_nested_validation): """Check that nested validation can be skipped.""" @validate_params({"a": [int]}, prefer_skip_nested_validation=True) def f(a): pass @validate_params( {"b": [int]}, prefer_skip_nested_validation=prefer_skip_nested_validation, ) def g(b): # calls f with a bad parameter type return f(a="invalid_param_value") # Validation for g is never skipped. with pytest.raises(InvalidParameterError, match="The 'b' parameter"): g(b="invalid_param_value") if prefer_skip_nested_validation: g(b=1) # does not raise because inner f is not validated else: with pytest.raises(InvalidParameterError, match="The 'a' parameter"): g(b=1)
Check that nested validation can be skipped.
test_skip_nested_validation
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_param_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_param_validation.py
BSD-3-Clause
def test_skip_nested_validation_and_config_context( skip_parameter_validation, prefer_skip_nested_validation, expected_skipped ): """Check interaction between global skip and local skip.""" @validate_params( {"a": [int]}, prefer_skip_nested_validation=prefer_skip_nested_validation ) def g(a): return get_config()["skip_parameter_validation"] with config_context(skip_parameter_validation=skip_parameter_validation): actual_skipped = g(1) assert actual_skipped == expected_skipped
Check interaction between global skip and local skip.
test_skip_nested_validation_and_config_context
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_param_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_param_validation.py
BSD-3-Clause
def test_validate_curve_kwargs_single_legend( name, legend_metric, legend_metric_name, curve_kwargs ): """Check `_validate_curve_kwargs` returns correct kwargs for single legend entry.""" n_curves = 3 curve_kwargs_out = _BinaryClassifierCurveDisplayMixin._validate_curve_kwargs( n_curves=n_curves, name=name, legend_metric=legend_metric, legend_metric_name=legend_metric_name, curve_kwargs=curve_kwargs, ) assert isinstance(curve_kwargs_out, list) assert len(curve_kwargs_out) == n_curves expected_label = None if isinstance(name, list): name = name[0] if name is not None: expected_label = name if legend_metric["mean"] is not None: expected_label = expected_label + f" ({legend_metric_name} = 0.80 +/- 0.20)" # `name` is None elif legend_metric["mean"] is not None: expected_label = f"{legend_metric_name} = 0.80 +/- 0.20" assert curve_kwargs_out[0]["label"] == expected_label # All remaining curves should have None as "label" assert curve_kwargs_out[1]["label"] is None assert curve_kwargs_out[2]["label"] is None # Default multi-curve kwargs if curve_kwargs is None: assert all(len(kwargs) == 4 for kwargs in curve_kwargs_out) assert all(kwargs["alpha"] == 0.5 for kwargs in curve_kwargs_out) assert all(kwargs["linestyle"] == "--" for kwargs in curve_kwargs_out) assert all(kwargs["color"] == "blue" for kwargs in curve_kwargs_out) else: assert all(len(kwargs) == 2 for kwargs in curve_kwargs_out) assert all(kwargs["color"] == "red" for kwargs in curve_kwargs_out)
Check `_validate_curve_kwargs` returns correct kwargs for single legend entry.
test_validate_curve_kwargs_single_legend
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_plotting.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_plotting.py
BSD-3-Clause
def test_validate_curve_kwargs_multi_legend(name, legend_metric, legend_metric_name): """Check `_validate_curve_kwargs` returns correct kwargs for multi legend entry.""" n_curves = 3 curve_kwargs = [{"color": "red"}, {"color": "yellow"}, {"color": "blue"}] curve_kwargs_out = _BinaryClassifierCurveDisplayMixin._validate_curve_kwargs( n_curves=n_curves, name=name, legend_metric=legend_metric, legend_metric_name=legend_metric_name, curve_kwargs=curve_kwargs, ) assert isinstance(curve_kwargs_out, list) assert len(curve_kwargs_out) == n_curves expected_labels = [None, None, None] if isinstance(name, str): expected_labels = "curve_name" if legend_metric["metric"][0] is not None: expected_labels = expected_labels + f" ({legend_metric_name} = 1.00)" expected_labels = [expected_labels] * n_curves elif isinstance(name, list) and legend_metric["metric"][0] is None: expected_labels = name elif isinstance(name, list) and legend_metric["metric"][0] is not None: expected_labels = [ f"{name_single} ({legend_metric_name} = 1.00)" for name_single in name ] # `name` is None elif legend_metric["metric"][0] is not None: expected_labels = [f"{legend_metric_name} = 1.00"] * n_curves for idx, expected_label in enumerate(expected_labels): assert curve_kwargs_out[idx]["label"] == expected_label assert all(len(kwargs) == 2 for kwargs in curve_kwargs_out) for curve_kwarg, curve_kwarg_out in zip(curve_kwargs, curve_kwargs_out): assert curve_kwarg_out["color"] == curve_kwarg["color"]
Check `_validate_curve_kwargs` returns correct kwargs for multi legend entry.
test_validate_curve_kwargs_multi_legend
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_plotting.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_plotting.py
BSD-3-Clause
def test_validate_score_name(score_name, scoring, negate_score, expected_score_name): """Check that we return the right score name.""" assert ( _validate_score_name(score_name, scoring, negate_score) == expected_score_name )
Check that we return the right score name.
test_validate_score_name
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_plotting.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_plotting.py
BSD-3-Clause
def test_validate_style_kwargs(default_kwargs, user_kwargs, expected): """Check the behaviour of `validate_style_kwargs` with various type of entries.""" result = _validate_style_kwargs(default_kwargs, user_kwargs) assert result == expected, ( "The validation of style keywords does not provide the expected results: " f"Got {result} instead of {expected}." )
Check the behaviour of `validate_style_kwargs` with various type of entries.
test_validate_style_kwargs
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_plotting.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_plotting.py
BSD-3-Clause
def test_get_response_values_regressor_error(response_method): """Check the error message with regressor an not supported response method.""" my_estimator = _MockEstimatorOnOffPrediction(response_methods=[response_method]) X = "mocking_data", "mocking_target" err_msg = f"{my_estimator.__class__.__name__} should either be a classifier" with pytest.raises(ValueError, match=err_msg): _get_response_values(my_estimator, X, response_method=response_method)
Check the error message with regressor an not supported response method.
test_get_response_values_regressor_error
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_response.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_response.py
BSD-3-Clause
def test_get_response_values_regressor(return_response_method_used): """Check the behaviour of `_get_response_values` with regressor.""" X, y = make_regression(n_samples=10, random_state=0) regressor = LinearRegression().fit(X, y) results = _get_response_values( regressor, X, response_method="predict", return_response_method_used=return_response_method_used, ) assert_array_equal(results[0], regressor.predict(X)) assert results[1] is None if return_response_method_used: assert results[2] == "predict"
Check the behaviour of `_get_response_values` with regressor.
test_get_response_values_regressor
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_response.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_response.py
BSD-3-Clause
def test_get_response_values_outlier_detection( response_method, return_response_method_used ): """Check the behaviour of `_get_response_values` with outlier detector.""" X, y = make_classification(n_samples=50, random_state=0) outlier_detector = IsolationForest(random_state=0).fit(X, y) results = _get_response_values( outlier_detector, X, response_method=response_method, return_response_method_used=return_response_method_used, ) chosen_response_method = ( response_method[0] if isinstance(response_method, list) else response_method ) prediction_method = getattr(outlier_detector, chosen_response_method) assert_array_equal(results[0], prediction_method(X)) assert results[1] is None if return_response_method_used: assert results[2] == chosen_response_method
Check the behaviour of `_get_response_values` with outlier detector.
test_get_response_values_outlier_detection
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_response.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_response.py
BSD-3-Clause
def test_get_response_values_classifier_unknown_pos_label(response_method): """Check that `_get_response_values` raises the proper error message with classifier.""" X, y = make_classification(n_samples=10, n_classes=2, random_state=0) classifier = LogisticRegression().fit(X, y) # provide a `pos_label` which is not in `y` err_msg = r"pos_label=whatever is not a valid label: It should be one of \[0 1\]" with pytest.raises(ValueError, match=err_msg): _get_response_values( classifier, X, response_method=response_method, pos_label="whatever", )
Check that `_get_response_values` raises the proper error message with classifier.
test_get_response_values_classifier_unknown_pos_label
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_response.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_response.py
BSD-3-Clause
def test_get_response_values_classifier_inconsistent_y_pred_for_binary_proba( response_method, ): """Check that `_get_response_values` will raise an error when `y_pred` has a single class with `predict_proba`.""" X, y_two_class = make_classification(n_samples=10, n_classes=2, random_state=0) y_single_class = np.zeros_like(y_two_class) classifier = DecisionTreeClassifier().fit(X, y_single_class) err_msg = ( r"Got predict_proba of shape \(10, 1\), but need classifier with " r"two classes" ) with pytest.raises(ValueError, match=err_msg): _get_response_values(classifier, X, response_method=response_method)
Check that `_get_response_values` will raise an error when `y_pred` has a single class with `predict_proba`.
test_get_response_values_classifier_inconsistent_y_pred_for_binary_proba
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_response.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_response.py
BSD-3-Clause
def test_get_response_values_binary_classifier_decision_function( return_response_method_used, ): """Check the behaviour of `_get_response_values` with `decision_function` and binary classifier.""" X, y = make_classification( n_samples=10, n_classes=2, weights=[0.3, 0.7], random_state=0, ) classifier = LogisticRegression().fit(X, y) response_method = "decision_function" # default `pos_label` results = _get_response_values( classifier, X, response_method=response_method, pos_label=None, return_response_method_used=return_response_method_used, ) assert_allclose(results[0], classifier.decision_function(X)) assert results[1] == 1 if return_response_method_used: assert results[2] == "decision_function" # when forcing `pos_label=classifier.classes_[0]` results = _get_response_values( classifier, X, response_method=response_method, pos_label=classifier.classes_[0], return_response_method_used=return_response_method_used, ) assert_allclose(results[0], classifier.decision_function(X) * -1) assert results[1] == 0 if return_response_method_used: assert results[2] == "decision_function"
Check the behaviour of `_get_response_values` with `decision_function` and binary classifier.
test_get_response_values_binary_classifier_decision_function
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_response.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_response.py
BSD-3-Clause
def test_get_response_values_binary_classifier_predict_proba( return_response_method_used, response_method ): """Check that `_get_response_values` with `predict_proba` and binary classifier.""" X, y = make_classification( n_samples=10, n_classes=2, weights=[0.3, 0.7], random_state=0, ) classifier = LogisticRegression().fit(X, y) # default `pos_label` results = _get_response_values( classifier, X, response_method=response_method, pos_label=None, return_response_method_used=return_response_method_used, ) assert_allclose(results[0], getattr(classifier, response_method)(X)[:, 1]) assert results[1] == 1 if return_response_method_used: assert len(results) == 3 assert results[2] == response_method else: assert len(results) == 2 # when forcing `pos_label=classifier.classes_[0]` y_pred, pos_label, *_ = _get_response_values( classifier, X, response_method=response_method, pos_label=classifier.classes_[0], return_response_method_used=return_response_method_used, ) assert_allclose(y_pred, getattr(classifier, response_method)(X)[:, 0]) assert pos_label == 0
Check that `_get_response_values` with `predict_proba` and binary classifier.
test_get_response_values_binary_classifier_predict_proba
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_response.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_response.py
BSD-3-Clause
def test_get_response_error(estimator, X, y, err_msg, params): """Check that we raise the proper error messages in _get_response_values_binary.""" estimator.fit(X, y) with pytest.raises(ValueError, match=err_msg): _get_response_values_binary(estimator, X, **params)
Check that we raise the proper error messages in _get_response_values_binary.
test_get_response_error
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_response.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_response.py
BSD-3-Clause
def test_get_response_values_multiclass(estimator, response_method): """Check that we can call `_get_response_values` with a multiclass estimator. It should return the predictions untouched. """ estimator.fit(X, y) predictions, pos_label = _get_response_values( estimator, X, response_method=response_method ) assert pos_label is None assert predictions.shape == (X.shape[0], len(estimator.classes_)) if response_method == "predict_proba": assert np.logical_and(predictions >= 0, predictions <= 1).all() elif response_method == "predict_log_proba": assert (predictions <= 0.0).all()
Check that we can call `_get_response_values` with a multiclass estimator. It should return the predictions untouched.
test_get_response_values_multiclass
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_response.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_response.py
BSD-3-Clause
def test_get_response_values_with_response_list(): """Check the behaviour of passing a list of responses to `_get_response_values`.""" classifier = LogisticRegression().fit(X_binary, y_binary) # it should use `predict_proba` y_pred, pos_label, response_method = _get_response_values( classifier, X_binary, response_method=["predict_proba", "decision_function"], return_response_method_used=True, ) assert_allclose(y_pred, classifier.predict_proba(X_binary)[:, 1]) assert pos_label == 1 assert response_method == "predict_proba" # it should use `decision_function` y_pred, pos_label, response_method = _get_response_values( classifier, X_binary, response_method=["decision_function", "predict_proba"], return_response_method_used=True, ) assert_allclose(y_pred, classifier.decision_function(X_binary)) assert pos_label == 1 assert response_method == "decision_function"
Check the behaviour of passing a list of responses to `_get_response_values`.
test_get_response_values_with_response_list
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_response.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_response.py
BSD-3-Clause
def test_pandas_adapter(): """Check pandas adapter has expected behavior.""" pd = pytest.importorskip("pandas") X_np = np.asarray([[1, 0, 3], [0, 0, 1]]) columns = np.asarray(["f0", "f1", "f2"], dtype=object) index = np.asarray([0, 1]) X_df_orig = pd.DataFrame([[1, 2], [1, 3]], index=index) adapter = ADAPTERS_MANAGER.adapters["pandas"] X_container = adapter.create_container(X_np, X_df_orig, columns=lambda: columns) assert isinstance(X_container, pd.DataFrame) assert_array_equal(X_container.columns, columns) assert_array_equal(X_container.index, index) # Input dataframe's index does not change new_columns = np.asarray(["f0", "f1"], dtype=object) X_df = pd.DataFrame([[1, 2], [1, 3]], index=[10, 12]) new_df = adapter.create_container(X_df, X_df_orig, columns=new_columns) assert_array_equal(new_df.columns, new_columns) assert_array_equal(new_df.index, X_df.index) assert adapter.is_supported_container(X_df) assert not adapter.is_supported_container(X_np) # adapter.update_columns updates the columns new_columns = np.array(["a", "c"], dtype=object) new_df = adapter.rename_columns(X_df, new_columns) assert_array_equal(new_df.columns, new_columns) # adapter.hstack stacks the dataframes horizontally. X_df_1 = pd.DataFrame([[1, 2, 5], [3, 4, 6]], columns=["a", "b", "e"]) X_df_2 = pd.DataFrame([[4], [5]], columns=["c"]) X_stacked = adapter.hstack([X_df_1, X_df_2]) expected_df = pd.DataFrame( [[1, 2, 5, 4], [3, 4, 6, 5]], columns=["a", "b", "e", "c"] ) pd.testing.assert_frame_equal(X_stacked, expected_df) # check that we update properly the columns even with duplicate column names # this use-case potentially happen when using ColumnTransformer # non-regression test for gh-28260 X_df = pd.DataFrame([[1, 2], [1, 3]], columns=["a", "a"]) new_columns = np.array(["x__a", "y__a"], dtype=object) new_df = adapter.rename_columns(X_df, new_columns) assert_array_equal(new_df.columns, new_columns) # check the behavior of the inplace parameter in `create_container` # we should trigger a copy X_df = pd.DataFrame([[1, 2], [1, 3]], index=index) X_output = adapter.create_container(X_df, X_df, columns=["a", "b"], inplace=False) assert X_output is not X_df assert list(X_df.columns) == [0, 1] assert list(X_output.columns) == ["a", "b"] # the operation is inplace X_df = pd.DataFrame([[1, 2], [1, 3]], index=index) X_output = adapter.create_container(X_df, X_df, columns=["a", "b"], inplace=True) assert X_output is X_df assert list(X_df.columns) == ["a", "b"] assert list(X_output.columns) == ["a", "b"]
Check pandas adapter has expected behavior.
test_pandas_adapter
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_set_output.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_set_output.py
BSD-3-Clause
def test_polars_adapter(): """Check Polars adapter has expected behavior.""" pl = pytest.importorskip("polars") X_np = np.array([[1, 0, 3], [0, 0, 1]]) columns = ["f1", "f2", "f3"] X_df_orig = pl.DataFrame(X_np, schema=columns, orient="row") adapter = ADAPTERS_MANAGER.adapters["polars"] X_container = adapter.create_container(X_np, X_df_orig, columns=lambda: columns) assert isinstance(X_container, pl.DataFrame) assert_array_equal(X_container.columns, columns) # Update columns with create_container new_columns = np.asarray(["a", "b", "c"], dtype=object) new_df = adapter.create_container(X_df_orig, X_df_orig, columns=new_columns) assert_array_equal(new_df.columns, new_columns) assert adapter.is_supported_container(X_df_orig) assert not adapter.is_supported_container(X_np) # adapter.update_columns updates the columns new_columns = np.array(["a", "c", "g"], dtype=object) new_df = adapter.rename_columns(X_df_orig, new_columns) assert_array_equal(new_df.columns, new_columns) # adapter.hstack stacks the dataframes horizontally. X_df_1 = pl.DataFrame([[1, 2, 5], [3, 4, 6]], schema=["a", "b", "e"], orient="row") X_df_2 = pl.DataFrame([[4], [5]], schema=["c"], orient="row") X_stacked = adapter.hstack([X_df_1, X_df_2]) expected_df = pl.DataFrame( [[1, 2, 5, 4], [3, 4, 6, 5]], schema=["a", "b", "e", "c"], orient="row" ) from polars.testing import assert_frame_equal assert_frame_equal(X_stacked, expected_df) # check the behavior of the inplace parameter in `create_container` # we should trigger a copy X_df = pl.DataFrame([[1, 2], [1, 3]], schema=["a", "b"], orient="row") X_output = adapter.create_container(X_df, X_df, columns=["c", "d"], inplace=False) assert X_output is not X_df assert list(X_df.columns) == ["a", "b"] assert list(X_output.columns) == ["c", "d"] # the operation is inplace X_df = pl.DataFrame([[1, 2], [1, 3]], schema=["a", "b"], orient="row") X_output = adapter.create_container(X_df, X_df, columns=["c", "d"], inplace=True) assert X_output is X_df assert list(X_df.columns) == ["c", "d"] assert list(X_output.columns) == ["c", "d"]
Check Polars adapter has expected behavior.
test_polars_adapter
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_set_output.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_set_output.py
BSD-3-Clause
def test_set_output_method(dataframe_lib): """Check that the output is a dataframe.""" lib = pytest.importorskip(dataframe_lib) X = np.asarray([[1, 0, 3], [0, 0, 1]]) est = EstimatorWithSetOutput().fit(X) # transform=None is a no-op est2 = est.set_output(transform=None) assert est2 is est X_trans_np = est2.transform(X) assert isinstance(X_trans_np, np.ndarray) est.set_output(transform=dataframe_lib) X_trans_pd = est.transform(X) assert isinstance(X_trans_pd, lib.DataFrame)
Check that the output is a dataframe.
test_set_output_method
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_set_output.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_set_output.py
BSD-3-Clause
def test_set_output_method_error(): """Check transform fails with invalid transform.""" X = np.asarray([[1, 0, 3], [0, 0, 1]]) est = EstimatorWithSetOutput().fit(X) est.set_output(transform="bad") msg = "output config must be in" with pytest.raises(ValueError, match=msg): est.transform(X)
Check transform fails with invalid transform.
test_set_output_method_error
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_set_output.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_set_output.py
BSD-3-Clause
def test_get_output_auto_wrap_false(): """Check that auto_wrap_output_keys=None does not wrap.""" est = EstimatorWithSetOutputNoAutoWrap() assert not hasattr(est, "set_output") X = np.asarray([[1, 0, 3], [0, 0, 1]]) assert X is est.transform(X)
Check that auto_wrap_output_keys=None does not wrap.
test_get_output_auto_wrap_false
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_set_output.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_set_output.py
BSD-3-Clause
def test_set_output_mixin_custom_mixin(): """Check that multiple init_subclasses passes parameters up.""" class BothMixinEstimator(_SetOutputMixin, AnotherMixin, custom_parameter=123): def transform(self, X, y=None): return X def get_feature_names_out(self, input_features=None): return input_features est = BothMixinEstimator() assert est.custom_parameter == 123 assert hasattr(est, "set_output")
Check that multiple init_subclasses passes parameters up.
test_set_output_mixin_custom_mixin
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_set_output.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_set_output.py
BSD-3-Clause
def test_set_output_mro(): """Check that multi-inheritance resolves to the correct class method. Non-regression test gh-25293. """ class Base(_SetOutputMixin): def transform(self, X): return "Base" class A(Base): pass class B(Base): def transform(self, X): return "B" class C(A, B): pass assert C().transform(None) == "B"
Check that multi-inheritance resolves to the correct class method. Non-regression test gh-25293.
test_set_output_mro
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_set_output.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_set_output.py
BSD-3-Clause
def test_set_output_pandas_keep_index(): """Check that set_output does not override index. Non-regression test for gh-25730. """ pd = pytest.importorskip("pandas") X = pd.DataFrame([[1, 2, 3], [4, 5, 6]], index=[0, 1]) est = EstimatorWithSetOutputIndex().set_output(transform="pandas") est.fit(X) X_trans = est.transform(X) assert_array_equal(X_trans.index, ["s0", "s1"])
Check that set_output does not override index. Non-regression test for gh-25730.
test_set_output_pandas_keep_index
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_set_output.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_set_output.py
BSD-3-Clause
def test_set_output_named_tuple_out(): """Check that namedtuples are kept by default.""" Output = namedtuple("Output", "X, Y") X = np.asarray([[1, 2, 3]]) est = EstimatorReturnTuple(OutputTuple=Output) X_trans = est.transform(X) assert isinstance(X_trans, Output) assert_array_equal(X_trans.X, X) assert_array_equal(X_trans.Y, 2 * X)
Check that namedtuples are kept by default.
test_set_output_named_tuple_out
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_set_output.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_set_output.py
BSD-3-Clause
def test_set_output_list_input(dataframe_lib): """Check set_output for list input. Non-regression test for #27037. """ lib = pytest.importorskip(dataframe_lib) X = [[0, 1, 2, 3], [4, 5, 6, 7]] est = EstimatorWithListInput() est.set_output(transform=dataframe_lib) X_out = est.fit(X).transform(X) assert isinstance(X_out, lib.DataFrame) assert_array_equal(X_out.columns, ["X0", "X1", "X2", "X3"])
Check set_output for list input. Non-regression test for #27037.
test_set_output_list_input
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_set_output.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_set_output.py
BSD-3-Clause
def test_incr_mean_variance_axis_dim_mismatch(sparse_constructor): """Check that we raise proper error when axis=1 and the dimension mismatch. Non-regression test for: https://github.com/scikit-learn/scikit-learn/pull/18655 """ n_samples, n_features = 60, 4 rng = np.random.RandomState(42) X = sparse_constructor(rng.rand(n_samples, n_features)) last_mean = np.zeros(n_features) last_var = np.zeros_like(last_mean) last_n = np.zeros(last_mean.shape, dtype=np.int64) kwargs = dict(last_mean=last_mean, last_var=last_var, last_n=last_n) mean0, var0, _ = incr_mean_variance_axis(X, axis=0, **kwargs) assert_allclose(np.mean(X.toarray(), axis=0), mean0) assert_allclose(np.var(X.toarray(), axis=0), var0) # test ValueError if axis=1 and last_mean.size == n_features with pytest.raises(ValueError): incr_mean_variance_axis(X, axis=1, **kwargs) # test inconsistent shapes of last_mean, last_var, last_n kwargs = dict(last_mean=last_mean[:-1], last_var=last_var, last_n=last_n) with pytest.raises(ValueError): incr_mean_variance_axis(X, axis=0, **kwargs)
Check that we raise proper error when axis=1 and the dimension mismatch. Non-regression test for: https://github.com/scikit-learn/scikit-learn/pull/18655
test_incr_mean_variance_axis_dim_mismatch
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_sparsefuncs.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_sparsefuncs.py
BSD-3-Clause
def centered_matrices(request): """Returns equivalent tuple[sp.linalg.LinearOperator, np.ndarray].""" sparse_container = request.param random_state = np.random.default_rng(42) X_sparse = sparse_container( sp.random(500, 100, density=0.1, format="csr", random_state=random_state) ) X_dense = X_sparse.toarray() mu = np.asarray(X_sparse.mean(axis=0)).ravel() X_sparse_centered = _implicit_column_offset(X_sparse, mu) X_dense_centered = X_dense - mu return X_sparse_centered, X_dense_centered
Returns equivalent tuple[sp.linalg.LinearOperator, np.ndarray].
centered_matrices
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_sparsefuncs.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_sparsefuncs.py
BSD-3-Clause
def test_weighted_percentile(): """Check `weighted_percentile` on artificial data with obvious median.""" y = np.empty(102, dtype=np.float64) y[:50] = 0 y[-51:] = 2 y[-1] = 100000 y[50] = 1 sw = np.ones(102, dtype=np.float64) sw[-1] = 0.0 value = _weighted_percentile(y, sw, 50) assert approx(value) == 1
Check `weighted_percentile` on artificial data with obvious median.
test_weighted_percentile
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_stats.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_stats.py
BSD-3-Clause
def test_weighted_percentile_equal(): """Check `weighted_percentile` with all weights equal to 1.""" y = np.empty(102, dtype=np.float64) y.fill(0.0) sw = np.ones(102, dtype=np.float64) score = _weighted_percentile(y, sw, 50) assert approx(score) == 0
Check `weighted_percentile` with all weights equal to 1.
test_weighted_percentile_equal
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_stats.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_stats.py
BSD-3-Clause
def test_weighted_percentile_zero_weight(): """Check `weighted_percentile` with all weights equal to 0.""" y = np.empty(102, dtype=np.float64) y.fill(1.0) sw = np.ones(102, dtype=np.float64) sw.fill(0.0) value = _weighted_percentile(y, sw, 50) assert approx(value) == 1.0
Check `weighted_percentile` with all weights equal to 0.
test_weighted_percentile_zero_weight
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_stats.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_stats.py
BSD-3-Clause
def test_weighted_percentile_zero_weight_zero_percentile(): """Check `weighted_percentile(percentile_rank=0)` behaves correctly. Ensures that (leading)zero-weight observations ignored when `percentile_rank=0`. See #20528 for details. """ y = np.array([0, 1, 2, 3, 4, 5]) sw = np.array([0, 0, 1, 1, 1, 0]) value = _weighted_percentile(y, sw, 0) assert approx(value) == 2 value = _weighted_percentile(y, sw, 50) assert approx(value) == 3 value = _weighted_percentile(y, sw, 100) assert approx(value) == 4
Check `weighted_percentile(percentile_rank=0)` behaves correctly. Ensures that (leading)zero-weight observations ignored when `percentile_rank=0`. See #20528 for details.
test_weighted_percentile_zero_weight_zero_percentile
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_stats.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_stats.py
BSD-3-Clause
def test_weighted_median_equal_weights(global_random_seed): """Checks `_weighted_percentile(percentile_rank=50)` is the same as `np.median`. `sample_weights` are all 1s and the number of samples is odd. When number of samples is odd, `_weighted_percentile` always falls on a single observation (not between 2 values, in which case the lower value would be taken) and is thus equal to `np.median`. For an even number of samples, this check will not always hold as (note that for some other percentile methods it will always hold). See #17370 for details. """ rng = np.random.RandomState(global_random_seed) x = rng.randint(10, size=11) weights = np.ones(x.shape) median = np.median(x) w_median = _weighted_percentile(x, weights) assert median == approx(w_median)
Checks `_weighted_percentile(percentile_rank=50)` is the same as `np.median`. `sample_weights` are all 1s and the number of samples is odd. When number of samples is odd, `_weighted_percentile` always falls on a single observation (not between 2 values, in which case the lower value would be taken) and is thus equal to `np.median`. For an even number of samples, this check will not always hold as (note that for some other percentile methods it will always hold). See #17370 for details.
test_weighted_median_equal_weights
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_stats.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_stats.py
BSD-3-Clause
def test_weighted_percentile_array_api_consistency( global_random_seed, array_namespace, device, dtype_name, data, weights, percentile ): """Check `_weighted_percentile` gives consistent results with array API.""" if array_namespace == "array_api_strict": try: import array_api_strict except ImportError: pass else: if device == array_api_strict.Device("device1"): # See https://github.com/data-apis/array-api-strict/issues/134 pytest.xfail( "array_api_strict has bug when indexing with tuple of arrays " "on non-'CPU_DEVICE' devices." ) xp = _array_api_for_tests(array_namespace, device) # Skip test for percentile=0 edge case (#20528) on namespace/device where # xp.nextafter is broken. This is the case for torch with MPS device: # https://github.com/pytorch/pytorch/issues/150027 zero = xp.zeros(1, device=device) one = xp.ones(1, device=device) if percentile == 0 and xp.all(xp.nextafter(zero, one) == zero): pytest.xfail(f"xp.nextafter is broken on {device}") rng = np.random.RandomState(global_random_seed) X_np = data(rng) if callable(data) else data weights_np = weights(rng) if callable(weights) else weights # Ensure `data` of correct dtype X_np = X_np.astype(dtype_name) result_np = _weighted_percentile(X_np, weights_np, percentile) # Convert to Array API arrays X_xp = xp.asarray(X_np, device=device) weights_xp = xp.asarray(weights_np, device=device) with config_context(array_api_dispatch=True): result_xp = _weighted_percentile(X_xp, weights_xp, percentile) assert array_device(result_xp) == array_device(X_xp) assert get_namespace(result_xp)[0] == get_namespace(X_xp)[0] result_xp_np = _convert_to_numpy(result_xp, xp=xp) assert result_xp_np.dtype == result_np.dtype assert result_xp_np.shape == result_np.shape assert_allclose(result_np, result_xp_np) # Check dtype correct (`sample_weight` should follow `array`) if dtype_name == "float32": assert result_xp_np.dtype == result_np.dtype == np.float32 else: assert result_xp_np.dtype == np.float64
Check `_weighted_percentile` gives consistent results with array API.
test_weighted_percentile_array_api_consistency
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_stats.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_stats.py
BSD-3-Clause
def test_weighted_percentile_nan_filtered(sample_weight_ndim, global_random_seed): """Test that calling _weighted_percentile on an array with nan values returns the same results as calling _weighted_percentile on a filtered version of the data. We test both with sample_weight of the same shape as the data and with one-dimensional sample_weight.""" rng = np.random.RandomState(global_random_seed) array_with_nans = rng.rand(100, 10) array_with_nans[rng.rand(*array_with_nans.shape) < 0.5] = np.nan nan_mask = np.isnan(array_with_nans) if sample_weight_ndim == 2: sample_weight = rng.randint(1, 6, size=(100, 10)) else: sample_weight = rng.randint(1, 6, size=(100,)) # Find the weighted percentile on the array with nans: results = _weighted_percentile(array_with_nans, sample_weight, 30) # Find the weighted percentile on the filtered array: filtered_array = [ array_with_nans[~nan_mask[:, col], col] for col in range(array_with_nans.shape[1]) ] if sample_weight.ndim == 1: sample_weight = np.repeat(sample_weight, array_with_nans.shape[1]).reshape( array_with_nans.shape[0], array_with_nans.shape[1] ) filtered_weights = [ sample_weight[~nan_mask[:, col], col] for col in range(array_with_nans.shape[1]) ] expected_results = np.array( [ _weighted_percentile(filtered_array[col], filtered_weights[col], 30) for col in range(array_with_nans.shape[1]) ] ) assert_array_equal(expected_results, results)
Test that calling _weighted_percentile on an array with nan values returns the same results as calling _weighted_percentile on a filtered version of the data. We test both with sample_weight of the same shape as the data and with one-dimensional sample_weight.
test_weighted_percentile_nan_filtered
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_stats.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_stats.py
BSD-3-Clause
def test_weighted_percentile_all_nan_column(): """Check that nans are ignored in general, except for all NaN columns.""" array = np.array( [ [np.nan, 5], [np.nan, 1], [np.nan, np.nan], [np.nan, np.nan], [np.nan, 2], [np.nan, np.nan], ] ) weights = np.ones_like(array) percentile_rank = 90 values = _weighted_percentile(array, weights, percentile_rank) # The percentile of the second column should be `5` even though there are many nan # values present; the percentile of the first column can only be nan, since there # are no other possible values: assert np.array_equal(values, np.array([np.nan, 5]), equal_nan=True)
Check that nans are ignored in general, except for all NaN columns.
test_weighted_percentile_all_nan_column
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_stats.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_stats.py
BSD-3-Clause
def test_weighted_percentile_like_numpy_quantile(percentile, global_random_seed): """Check that _weighted_percentile delivers equivalent results as np.quantile with weights.""" rng = np.random.RandomState(global_random_seed) array = rng.rand(10, 100) sample_weight = rng.randint(1, 6, size=(10, 100)) percentile_weighted_percentile = _weighted_percentile( array, sample_weight, percentile ) percentile_numpy_quantile = np.quantile( array, percentile / 100, weights=sample_weight, axis=0, method="inverted_cdf" ) assert_array_equal(percentile_weighted_percentile, percentile_numpy_quantile)
Check that _weighted_percentile delivers equivalent results as np.quantile with weights.
test_weighted_percentile_like_numpy_quantile
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_stats.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_stats.py
BSD-3-Clause
def test_weighted_percentile_like_numpy_nanquantile(percentile, global_random_seed): """Check that _weighted_percentile delivers equivalent results as np.nanquantile with weights.""" rng = np.random.RandomState(global_random_seed) array_with_nans = rng.rand(10, 100) array_with_nans[rng.rand(*array_with_nans.shape) < 0.5] = np.nan sample_weight = rng.randint(1, 6, size=(10, 100)) percentile_weighted_percentile = _weighted_percentile( array_with_nans, sample_weight, percentile ) percentile_numpy_nanquantile = np.nanquantile( array_with_nans, percentile / 100, weights=sample_weight, axis=0, method="inverted_cdf", ) assert_array_equal(percentile_weighted_percentile, percentile_numpy_nanquantile)
Check that _weighted_percentile delivers equivalent results as np.nanquantile with weights.
test_weighted_percentile_like_numpy_nanquantile
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_stats.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_stats.py
BSD-3-Clause
def test_no___sklearn_tags__with_more_tags(): """Test that calling `get_tags` on a class that defines `_more_tags` but not `__sklearn_tags__` raises an error. """ class MoreTagsEstimator(BaseEstimator): def _more_tags(self): return {"requires_y": True} # pragma: no cover with pytest.raises( TypeError, match="has defined either `_more_tags` or `_get_tags`" ): check_estimator_tags_renamed("MoreTagsEstimator", MoreTagsEstimator())
Test that calling `get_tags` on a class that defines `_more_tags` but not `__sklearn_tags__` raises an error.
test_no___sklearn_tags__with_more_tags
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_tags.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_tags.py
BSD-3-Clause
def test_tags_no_sklearn_tags_concrete_implementation(): """Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/30479 Either the estimator doesn't implement `__sklearn_tags` or there is no class implementing `__sklearn_tags__` without calling `super().__sklearn_tags__()` in its mro. Thus, we raise a warning and request to inherit from `BaseEstimator` that implements `__sklearn_tags__`. """ X = np.array([[1, 2], [2, 3], [3, 4]]) y = np.array([1, 0, 1]) # 1st case, the estimator inherits from a class that only implements # `__sklearn_tags__` by calling `super().__sklearn_tags__()`. class MyEstimator(ClassifierMixin): def __init__(self, *, param=1): self.param = param def fit(self, X, y=None): self.is_fitted_ = True return self def predict(self, X): return np.full(shape=X.shape[0], fill_value=self.param) my_pipeline = Pipeline([("estimator", MyEstimator(param=1))]) with pytest.warns(DeprecationWarning, match="The following error was raised"): my_pipeline.fit(X, y).predict(X) # 2nd case, the estimator doesn't implement `__sklearn_tags__` at all. class MyEstimator2: def __init__(self, *, param=1): self.param = param def fit(self, X, y=None): self.is_fitted_ = True return self def predict(self, X): return np.full(shape=X.shape[0], fill_value=self.param) my_pipeline = Pipeline([("estimator", MyEstimator2(param=1))]) with pytest.warns(DeprecationWarning, match="The following error was raised"): my_pipeline.fit(X, y).predict(X) # check that we still raise an error if it is not a AttributeError or related to # __sklearn_tags__ class MyEstimator3(MyEstimator, BaseEstimator): def __init__(self, *, param=1, error_type=AttributeError): self.param = param self.error_type = error_type def __sklearn_tags__(self): super().__sklearn_tags__() raise self.error_type("test") for error_type in (AttributeError, TypeError, ValueError): estimator = MyEstimator3(param=1, error_type=error_type) with pytest.raises(error_type): get_tags(estimator)
Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/30479 Either the estimator doesn't implement `__sklearn_tags` or there is no class implementing `__sklearn_tags__` without calling `super().__sklearn_tags__()` in its mro. Thus, we raise a warning and request to inherit from `BaseEstimator` that implements `__sklearn_tags__`.
test_tags_no_sklearn_tags_concrete_implementation
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_tags.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_tags.py
BSD-3-Clause
def score(self, X): """This is available only if delegate has score. Parameters --------- y : ndarray Parameter y """
This is available only if delegate has score. Parameters --------- y : ndarray Parameter y
score
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_testing.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_testing.py
BSD-3-Clause
def test_convert_container( constructor_name, container_type, dtype, superdtype, ): """Check that we convert the container to the right type of array with the right data type.""" if constructor_name in ( "dataframe", "index", "polars", "polars_series", "pyarrow", "pyarrow_array", "series", ): # delay the import of pandas/polars within the function to only skip this test # instead of the whole file container_type = container_type() container = [0, 1] container_converted = _convert_container( container, constructor_name, dtype=dtype, ) assert isinstance(container_converted, container_type) if constructor_name in ("list", "tuple", "index"): # list and tuple will use Python class dtype: int, float # pandas index will always use high precision: np.int64 and np.float64 assert np.issubdtype(type(container_converted[0]), superdtype) elif constructor_name in ("polars", "polars_series", "pyarrow", "pyarrow_array"): return elif hasattr(container_converted, "dtype"): assert container_converted.dtype == dtype elif hasattr(container_converted, "dtypes"): assert container_converted.dtypes[0] == dtype
Check that we convert the container to the right type of array with the right data type.
test_convert_container
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_testing.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_testing.py
BSD-3-Clause
def test_attach_unique_not_ndarray(): """Test that when not np.ndarray, we don't touch the array.""" arr = [1, 2, 2, 3, 4, 4, 5] arr_ = attach_unique(arr) assert arr_ is arr
Test that when not np.ndarray, we don't touch the array.
test_attach_unique_not_ndarray
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_unique.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_unique.py
BSD-3-Clause
def test_attach_unique_returns_view(): """Test that attach_unique returns a view of the array.""" arr = np.array([1, 2, 2, 3, 4, 4, 5]) arr_ = attach_unique(arr) assert arr_.base is arr
Test that attach_unique returns a view of the array.
test_attach_unique_returns_view
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_unique.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_unique.py
BSD-3-Clause
def test_check_array_keeps_unique(): """Test that check_array keeps the unique metadata.""" arr = np.array([[1, 2, 2, 3, 4, 4, 5]]) arr_ = attach_unique(arr) arr_ = check_array(arr_) assert_array_equal(arr_.dtype.metadata["unique"], np.array([1, 2, 3, 4, 5])) assert_array_equal(arr_, arr)
Test that check_array keeps the unique metadata.
test_check_array_keeps_unique
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_unique.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_unique.py
BSD-3-Clause
def test_check_array_series_err_msg(): """ Check that we raise a proper error message when passing a Series and we expect a 2-dimensional container. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/27498 """ pd = pytest.importorskip("pandas") ser = pd.Series([1, 2, 3]) msg = f"Expected a 2-dimensional container but got {type(ser)} instead." with pytest.raises(ValueError, match=msg): check_array(ser, ensure_2d=True)
Check that we raise a proper error message when passing a Series and we expect a 2-dimensional container. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/27498
test_check_array_series_err_msg
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_validation.py
BSD-3-Clause
def test_check_array_numeric_error(X): """Test that check_array errors when it receives an array of bytes/string while a numeric dtype is required.""" expected_msg = r"dtype='numeric' is not compatible with arrays of bytes/strings" with pytest.raises(ValueError, match=expected_msg): check_array(X, dtype="numeric")
Test that check_array errors when it receives an array of bytes/string while a numeric dtype is required.
test_check_array_numeric_error
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_validation.py
BSD-3-Clause
def test_check_array_panadas_na_support_series(): """Check check_array is correct with pd.NA in a series.""" pd = pytest.importorskip("pandas") X_int64 = pd.Series([1, 2, pd.NA], dtype="Int64") msg = "Input contains NaN" with pytest.raises(ValueError, match=msg): check_array(X_int64, ensure_all_finite=True, ensure_2d=False) X_out = check_array(X_int64, ensure_all_finite=False, ensure_2d=False) assert_allclose(X_out, [1, 2, np.nan]) assert X_out.dtype == np.float64 X_out = check_array( X_int64, ensure_all_finite=False, ensure_2d=False, dtype=np.float32 ) assert_allclose(X_out, [1, 2, np.nan]) assert X_out.dtype == np.float32
Check check_array is correct with pd.NA in a series.
test_check_array_panadas_na_support_series
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_validation.py
BSD-3-Clause
def test_check_is_fitted_stateless(): """Check that check_is_fitted passes for stateless estimators.""" class StatelessEstimator(BaseEstimator): def fit(self, **kwargs): return self # pragma: no cover def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.requires_fit = False return tags check_is_fitted(StatelessEstimator())
Check that check_is_fitted passes for stateless estimators.
test_check_is_fitted_stateless
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_validation.py
BSD-3-Clause
def test_check_consistent_length(): """Test that `check_consistent_length` raises on inconsistent lengths and wrong input types trigger TypeErrors.""" check_consistent_length([1], [2], [3], [4], [5]) check_consistent_length([[1, 2], [[1, 2]]], [1, 2], ["a", "b"]) check_consistent_length([1], (2,), np.array([3]), sp.csr_matrix((1, 2))) with pytest.raises(ValueError, match="inconsistent numbers of samples"): check_consistent_length([1, 2], [1]) with pytest.raises(TypeError, match=r"got <\w+ 'int'>"): check_consistent_length([1, 2], 1) with pytest.raises(TypeError, match=r"got <\w+ 'object'>"): check_consistent_length([1, 2], object()) with pytest.raises(TypeError): check_consistent_length([1, 2], np.array(1)) # Despite ensembles having __len__ they must raise TypeError with pytest.raises(TypeError, match="Expected sequence or array-like"): check_consistent_length([1, 2], RandomForestRegressor()) # XXX: We should have a test with a string, but what is correct behaviour?
Test that `check_consistent_length` raises on inconsistent lengths and wrong input types trigger TypeErrors.
test_check_consistent_length
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_validation.py
BSD-3-Clause
def test_check_consistent_length_array_api(array_namespace, device, _): """Test that check_consistent_length works with different array types.""" xp = _array_api_for_tests(array_namespace, device) with config_context(array_api_dispatch=True): check_consistent_length( xp.asarray([1, 2, 3], device=device), xp.asarray([[1, 1], [2, 2], [3, 3]], device=device), [1, 2, 3], ["a", "b", "c"], np.asarray(("a", "b", "c"), dtype=object), sp.csr_array([[0, 1], [1, 0], [0, 0]]), ) with pytest.raises(ValueError, match="inconsistent numbers of samples"): check_consistent_length( xp.asarray([1, 2], device=device), xp.asarray([1], device=device) )
Test that check_consistent_length works with different array types.
test_check_consistent_length_array_api
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_validation.py
BSD-3-Clause
def test_check_dataframe_with_only_bool(): """Check that dataframe with bool return a boolean arrays.""" pd = importorskip("pandas") df = pd.DataFrame({"bool": [True, False, True]}) array = check_array(df, dtype=None) assert array.dtype == np.bool_ assert_array_equal(array, [[True], [False], [True]]) # common dtype is int for bool + int df = pd.DataFrame( {"bool": [True, False, True], "int": [1, 2, 3]}, columns=["bool", "int"], ) array = check_array(df, dtype="numeric") assert array.dtype == np.int64 assert_array_equal(array, [[1, 1], [0, 2], [1, 3]])
Check that dataframe with bool return a boolean arrays.
test_check_dataframe_with_only_bool
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_validation.py
BSD-3-Clause
def test_check_dataframe_with_only_boolean(): """Check that dataframe with boolean return a float array with dtype=None""" pd = importorskip("pandas") df = pd.DataFrame({"bool": pd.Series([True, False, True], dtype="boolean")}) array = check_array(df, dtype=None) assert array.dtype == np.float64 assert_array_equal(array, [[True], [False], [True]])
Check that dataframe with boolean return a float array with dtype=None
test_check_dataframe_with_only_boolean
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_validation.py
BSD-3-Clause
def test_estimator_has( estimator_name, estimator_value, delegates, expected_result, expected_exception ): """ Tests the _estimator_has function by verifying: - Functionality with default and custom delegates. - Raises ValueError if delegates are missing. - Raises AttributeError if the specified attribute is missing. """ # always checks for attribute - "attribute_present" # ["estimator_", "estimator"] is default value for delegates if delegates is None: check = _estimator_has("attribute_present") else: check = _estimator_has("attribute_present", delegates=delegates) class MockEstimator: pass a = MockEstimator() setattr(a, estimator_name, estimator_value) if expected_exception: with pytest.raises(expected_exception): check(a) else: assert check(a) == expected_result
Tests the _estimator_has function by verifying: - Functionality with default and custom delegates. - Raises ValueError if delegates are missing. - Raises AttributeError if the specified attribute is missing.
test_estimator_has
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_validation.py
BSD-3-Clause
def test_check_scalar_valid(x): """Test that check_scalar returns no error/warning if valid inputs are provided""" with warnings.catch_warnings(): warnings.simplefilter("error") scalar = check_scalar( x, "test_name", target_type=numbers.Real, min_val=2, max_val=5, include_boundaries="both", ) assert scalar == x
Test that check_scalar returns no error/warning if valid inputs are provided
test_check_scalar_valid
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_validation.py
BSD-3-Clause
def test_check_scalar_invalid( x, target_name, target_type, min_val, max_val, include_boundaries, err_msg ): """Test that check_scalar returns the right error if a wrong input is given""" with pytest.raises(Exception) as raised_error: check_scalar( x, target_name, target_type=target_type, min_val=min_val, max_val=max_val, include_boundaries=include_boundaries, ) assert str(raised_error.value) == str(err_msg) assert isinstance(raised_error.value, type(err_msg))
Test that check_scalar returns the right error if a wrong input is given
test_check_scalar_invalid
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_validation.py
BSD-3-Clause
def test_check_pandas_sparse_mixed_dtypes(ntype1, ntype2): """Check that pandas dataframes having sparse extension arrays with mixed dtypes works.""" pd = pytest.importorskip("pandas") df = pd.DataFrame( { "col1": pd.arrays.SparseArray([0, 1, 0], dtype=ntype1, fill_value=0), "col2": pd.arrays.SparseArray([1, 0, 1], dtype=ntype2, fill_value=0), } ) check_array(df, accept_sparse=["csr", "csc"])
Check that pandas dataframes having sparse extension arrays with mixed dtypes works.
test_check_pandas_sparse_mixed_dtypes
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_validation.py
BSD-3-Clause
def test_get_feature_names_pandas_with_ints_no_warning(names): """Get feature names with pandas dataframes without warning. Column names with consistent dtypes will not warn, such as int or MultiIndex. """ pd = pytest.importorskip("pandas") X = pd.DataFrame([[1, 2], [4, 5], [5, 6]], columns=names) with warnings.catch_warnings(): warnings.simplefilter("error", FutureWarning) names = _get_feature_names(X) assert names is None
Get feature names with pandas dataframes without warning. Column names with consistent dtypes will not warn, such as int or MultiIndex.
test_get_feature_names_pandas_with_ints_no_warning
python
scikit-learn/scikit-learn
sklearn/utils/tests/test_validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/tests/test_validation.py
BSD-3-Clause