code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def _get_data_info_by_name(
name: str,
version: Union[int, str],
data_home: Optional[str],
n_retries: int = 3,
delay: float = 1.0,
):
"""
Utilizes the openml dataset listing api to find a dataset by
name/version
OpenML api function:
https://www.openml.org/api_docs#!/data/get_data_list_data_name_data_name
Parameters
----------
name : str
name of the dataset
version : int or str
If version is an integer, the exact name/version will be obtained from
OpenML. If version is a string (value: "active") it will take the first
version from OpenML that is annotated as active. Any other string
values except "active" are treated as integer.
data_home : str or None
Location to cache the response. None if no cache is required.
n_retries : int, default=3
Number of retries when HTTP errors are encountered. Error with status
code 412 won't be retried as they represent OpenML generic errors.
delay : float, default=1.0
Number of seconds between retries.
Returns
-------
first_dataset : json
json representation of the first dataset object that adhired to the
search criteria
"""
if version == "active":
# situation in which we return the oldest active version
url = _SEARCH_NAME.format(name) + "/status/active/"
error_msg = "No active dataset {} found.".format(name)
json_data = _get_json_content_from_openml_api(
url,
error_msg,
data_home=data_home,
n_retries=n_retries,
delay=delay,
)
res = json_data["data"]["dataset"]
if len(res) > 1:
first_version = version = res[0]["version"]
warning_msg = (
"Multiple active versions of the dataset matching the name"
f" {name} exist. Versions may be fundamentally different, "
f"returning version {first_version}. "
"Available versions:\n"
)
for r in res:
warning_msg += f"- version {r['version']}, status: {r['status']}\n"
warning_msg += (
f" url: https://www.openml.org/search?type=data&id={r['did']}\n"
)
warn(warning_msg)
return res[0]
# an integer version has been provided
url = (_SEARCH_NAME + "/data_version/{}").format(name, version)
try:
json_data = _get_json_content_from_openml_api(
url,
error_message=None,
data_home=data_home,
n_retries=n_retries,
delay=delay,
)
except OpenMLError:
# we can do this in 1 function call if OpenML does not require the
# specification of the dataset status (i.e., return datasets with a
# given name / version regardless of active, deactivated, etc. )
# TODO: feature request OpenML.
url += "/status/deactivated"
error_msg = "Dataset {} with version {} not found.".format(name, version)
json_data = _get_json_content_from_openml_api(
url,
error_msg,
data_home=data_home,
n_retries=n_retries,
delay=delay,
)
return json_data["data"]["dataset"][0]
|
Utilizes the openml dataset listing api to find a dataset by
name/version
OpenML api function:
https://www.openml.org/api_docs#!/data/get_data_list_data_name_data_name
Parameters
----------
name : str
name of the dataset
version : int or str
If version is an integer, the exact name/version will be obtained from
OpenML. If version is a string (value: "active") it will take the first
version from OpenML that is annotated as active. Any other string
values except "active" are treated as integer.
data_home : str or None
Location to cache the response. None if no cache is required.
n_retries : int, default=3
Number of retries when HTTP errors are encountered. Error with status
code 412 won't be retried as they represent OpenML generic errors.
delay : float, default=1.0
Number of seconds between retries.
Returns
-------
first_dataset : json
json representation of the first dataset object that adhired to the
search criteria
|
_get_data_info_by_name
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_openml.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_openml.py
|
BSD-3-Clause
|
def _get_num_samples(data_qualities: OpenmlQualitiesType) -> int:
"""Get the number of samples from data qualities.
Parameters
----------
data_qualities : list of dict
Used to retrieve the number of instances (samples) in the dataset.
Returns
-------
n_samples : int
The number of samples in the dataset or -1 if data qualities are
unavailable.
"""
# If the data qualities are unavailable, we return -1
default_n_samples = -1
qualities = {d["name"]: d["value"] for d in data_qualities}
return int(float(qualities.get("NumberOfInstances", default_n_samples)))
|
Get the number of samples from data qualities.
Parameters
----------
data_qualities : list of dict
Used to retrieve the number of instances (samples) in the dataset.
Returns
-------
n_samples : int
The number of samples in the dataset or -1 if data qualities are
unavailable.
|
_get_num_samples
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_openml.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_openml.py
|
BSD-3-Clause
|
def _load_arff_response(
url: str,
data_home: Optional[str],
parser: str,
output_type: str,
openml_columns_info: dict,
feature_names_to_select: List[str],
target_names_to_select: List[str],
shape: Optional[Tuple[int, int]],
md5_checksum: str,
n_retries: int = 3,
delay: float = 1.0,
read_csv_kwargs: Optional[Dict] = None,
):
"""Load the ARFF data associated with the OpenML URL.
In addition of loading the data, this function will also check the
integrity of the downloaded file from OpenML using MD5 checksum.
Parameters
----------
url : str
The URL of the ARFF file on OpenML.
data_home : str
The location where to cache the data.
parser : {"liac-arff", "pandas"}
The parser used to parse the ARFF file.
output_type : {"numpy", "pandas", "sparse"}
The type of the arrays that will be returned. The possibilities are:
- `"numpy"`: both `X` and `y` will be NumPy arrays;
- `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array;
- `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a
pandas Series or DataFrame.
openml_columns_info : dict
The information provided by OpenML regarding the columns of the ARFF
file.
feature_names_to_select : list of str
The list of the features to be selected.
target_names_to_select : list of str
The list of the target variables to be selected.
shape : tuple or None
With `parser="liac-arff"`, when using a generator to load the data,
one needs to provide the shape of the data beforehand.
md5_checksum : str
The MD5 checksum provided by OpenML to check the data integrity.
n_retries : int, default=3
The number of times to retry downloading the data if it fails.
delay : float, default=1.0
The delay between two consecutive downloads in seconds.
read_csv_kwargs : dict, default=None
Keyword arguments to pass to `pandas.read_csv` when using the pandas parser.
It allows to overwrite the default options.
.. versionadded:: 1.3
Returns
-------
X : {ndarray, sparse matrix, dataframe}
The data matrix.
y : {ndarray, dataframe, series}
The target.
frame : dataframe or None
A dataframe containing both `X` and `y`. `None` if
`output_array_type != "pandas"`.
categories : list of str or None
The names of the features that are categorical. `None` if
`output_array_type == "pandas"`.
"""
gzip_file = _open_openml_url(url, data_home, n_retries=n_retries, delay=delay)
with closing(gzip_file):
md5 = hashlib.md5()
for chunk in iter(lambda: gzip_file.read(4096), b""):
md5.update(chunk)
actual_md5_checksum = md5.hexdigest()
if actual_md5_checksum != md5_checksum:
raise ValueError(
f"md5 checksum of local file for {url} does not match description: "
f"expected: {md5_checksum} but got {actual_md5_checksum}. "
"Downloaded file could have been modified / corrupted, clean cache "
"and retry..."
)
def _open_url_and_load_gzip_file(url, data_home, n_retries, delay, arff_params):
gzip_file = _open_openml_url(url, data_home, n_retries=n_retries, delay=delay)
with closing(gzip_file):
return load_arff_from_gzip_file(gzip_file, **arff_params)
arff_params: Dict = dict(
parser=parser,
output_type=output_type,
openml_columns_info=openml_columns_info,
feature_names_to_select=feature_names_to_select,
target_names_to_select=target_names_to_select,
shape=shape,
read_csv_kwargs=read_csv_kwargs or {},
)
try:
X, y, frame, categories = _open_url_and_load_gzip_file(
url, data_home, n_retries, delay, arff_params
)
except Exception as exc:
if parser != "pandas":
raise
from pandas.errors import ParserError
if not isinstance(exc, ParserError):
raise
# A parsing error could come from providing the wrong quotechar
# to pandas. By default, we use a double quote. Thus, we retry
# with a single quote before to raise the error.
arff_params["read_csv_kwargs"].update(quotechar="'")
X, y, frame, categories = _open_url_and_load_gzip_file(
url, data_home, n_retries, delay, arff_params
)
return X, y, frame, categories
|
Load the ARFF data associated with the OpenML URL.
In addition of loading the data, this function will also check the
integrity of the downloaded file from OpenML using MD5 checksum.
Parameters
----------
url : str
The URL of the ARFF file on OpenML.
data_home : str
The location where to cache the data.
parser : {"liac-arff", "pandas"}
The parser used to parse the ARFF file.
output_type : {"numpy", "pandas", "sparse"}
The type of the arrays that will be returned. The possibilities are:
- `"numpy"`: both `X` and `y` will be NumPy arrays;
- `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array;
- `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a
pandas Series or DataFrame.
openml_columns_info : dict
The information provided by OpenML regarding the columns of the ARFF
file.
feature_names_to_select : list of str
The list of the features to be selected.
target_names_to_select : list of str
The list of the target variables to be selected.
shape : tuple or None
With `parser="liac-arff"`, when using a generator to load the data,
one needs to provide the shape of the data beforehand.
md5_checksum : str
The MD5 checksum provided by OpenML to check the data integrity.
n_retries : int, default=3
The number of times to retry downloading the data if it fails.
delay : float, default=1.0
The delay between two consecutive downloads in seconds.
read_csv_kwargs : dict, default=None
Keyword arguments to pass to `pandas.read_csv` when using the pandas parser.
It allows to overwrite the default options.
.. versionadded:: 1.3
Returns
-------
X : {ndarray, sparse matrix, dataframe}
The data matrix.
y : {ndarray, dataframe, series}
The target.
frame : dataframe or None
A dataframe containing both `X` and `y`. `None` if
`output_array_type != "pandas"`.
categories : list of str or None
The names of the features that are categorical. `None` if
`output_array_type == "pandas"`.
|
_load_arff_response
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_openml.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_openml.py
|
BSD-3-Clause
|
def _download_data_to_bunch(
url: str,
sparse: bool,
data_home: Optional[str],
*,
as_frame: bool,
openml_columns_info: List[dict],
data_columns: List[str],
target_columns: List[str],
shape: Optional[Tuple[int, int]],
md5_checksum: str,
n_retries: int = 3,
delay: float = 1.0,
parser: str,
read_csv_kwargs: Optional[Dict] = None,
):
"""Download ARFF data, load it to a specific container and create to Bunch.
This function has a mechanism to retry/cache/clean the data.
Parameters
----------
url : str
The URL of the ARFF file on OpenML.
sparse : bool
Whether the dataset is expected to use the sparse ARFF format.
data_home : str
The location where to cache the data.
as_frame : bool
Whether or not to return the data into a pandas DataFrame.
openml_columns_info : list of dict
The information regarding the columns provided by OpenML for the
ARFF dataset. The information is stored as a list of dictionaries.
data_columns : list of str
The list of the features to be selected.
target_columns : list of str
The list of the target variables to be selected.
shape : tuple or None
With `parser="liac-arff"`, when using a generator to load the data,
one needs to provide the shape of the data beforehand.
md5_checksum : str
The MD5 checksum provided by OpenML to check the data integrity.
n_retries : int, default=3
Number of retries when HTTP errors are encountered. Error with status
code 412 won't be retried as they represent OpenML generic errors.
delay : float, default=1.0
Number of seconds between retries.
parser : {"liac-arff", "pandas"}
The parser used to parse the ARFF file.
read_csv_kwargs : dict, default=None
Keyword arguments to pass to `pandas.read_csv` when using the pandas parser.
It allows to overwrite the default options.
.. versionadded:: 1.3
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
X : {ndarray, sparse matrix, dataframe}
The data matrix.
y : {ndarray, dataframe, series}
The target.
frame : dataframe or None
A dataframe containing both `X` and `y`. `None` if
`output_array_type != "pandas"`.
categories : list of str or None
The names of the features that are categorical. `None` if
`output_array_type == "pandas"`.
"""
# Prepare which columns and data types should be returned for the X and y
features_dict = {feature["name"]: feature for feature in openml_columns_info}
if sparse:
output_type = "sparse"
elif as_frame:
output_type = "pandas"
else:
output_type = "numpy"
# XXX: target columns should all be categorical or all numeric
_verify_target_data_type(features_dict, target_columns)
for name in target_columns:
column_info = features_dict[name]
n_missing_values = int(column_info["number_of_missing_values"])
if n_missing_values > 0:
raise ValueError(
f"Target column '{column_info['name']}' has {n_missing_values} missing "
"values. Missing values are not supported for target columns."
)
no_retry_exception = None
if parser == "pandas":
# If we get a ParserError with pandas, then we don't want to retry and we raise
# early.
from pandas.errors import ParserError
no_retry_exception = ParserError
X, y, frame, categories = _retry_with_clean_cache(
url, data_home, no_retry_exception
)(_load_arff_response)(
url,
data_home,
parser=parser,
output_type=output_type,
openml_columns_info=features_dict,
feature_names_to_select=data_columns,
target_names_to_select=target_columns,
shape=shape,
md5_checksum=md5_checksum,
n_retries=n_retries,
delay=delay,
read_csv_kwargs=read_csv_kwargs,
)
return Bunch(
data=X,
target=y,
frame=frame,
categories=categories,
feature_names=data_columns,
target_names=target_columns,
)
|
Download ARFF data, load it to a specific container and create to Bunch.
This function has a mechanism to retry/cache/clean the data.
Parameters
----------
url : str
The URL of the ARFF file on OpenML.
sparse : bool
Whether the dataset is expected to use the sparse ARFF format.
data_home : str
The location where to cache the data.
as_frame : bool
Whether or not to return the data into a pandas DataFrame.
openml_columns_info : list of dict
The information regarding the columns provided by OpenML for the
ARFF dataset. The information is stored as a list of dictionaries.
data_columns : list of str
The list of the features to be selected.
target_columns : list of str
The list of the target variables to be selected.
shape : tuple or None
With `parser="liac-arff"`, when using a generator to load the data,
one needs to provide the shape of the data beforehand.
md5_checksum : str
The MD5 checksum provided by OpenML to check the data integrity.
n_retries : int, default=3
Number of retries when HTTP errors are encountered. Error with status
code 412 won't be retried as they represent OpenML generic errors.
delay : float, default=1.0
Number of seconds between retries.
parser : {"liac-arff", "pandas"}
The parser used to parse the ARFF file.
read_csv_kwargs : dict, default=None
Keyword arguments to pass to `pandas.read_csv` when using the pandas parser.
It allows to overwrite the default options.
.. versionadded:: 1.3
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
X : {ndarray, sparse matrix, dataframe}
The data matrix.
y : {ndarray, dataframe, series}
The target.
frame : dataframe or None
A dataframe containing both `X` and `y`. `None` if
`output_array_type != "pandas"`.
categories : list of str or None
The names of the features that are categorical. `None` if
`output_array_type == "pandas"`.
|
_download_data_to_bunch
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_openml.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_openml.py
|
BSD-3-Clause
|
def fetch_openml(
name: Optional[str] = None,
*,
version: Union[str, int] = "active",
data_id: Optional[int] = None,
data_home: Optional[Union[str, os.PathLike]] = None,
target_column: Optional[Union[str, List]] = "default-target",
cache: bool = True,
return_X_y: bool = False,
as_frame: Union[str, bool] = "auto",
n_retries: int = 3,
delay: float = 1.0,
parser: str = "auto",
read_csv_kwargs: Optional[Dict] = None,
):
"""Fetch dataset from openml by name or dataset id.
Datasets are uniquely identified by either an integer ID or by a
combination of name and version (i.e. there might be multiple
versions of the 'iris' dataset). Please give either name or data_id
(not both). In case a name is given, a version can also be
provided.
Read more in the :ref:`User Guide <openml>`.
.. versionadded:: 0.20
.. note:: EXPERIMENTAL
The API is experimental (particularly the return value structure),
and might have small backward-incompatible changes without notice
or warning in future releases.
Parameters
----------
name : str, default=None
String identifier of the dataset. Note that OpenML can have multiple
datasets with the same name.
version : int or 'active', default='active'
Version of the dataset. Can only be provided if also ``name`` is given.
If 'active' the oldest version that's still active is used. Since
there may be more than one active version of a dataset, and those
versions may fundamentally be different from one another, setting an
exact version is highly recommended.
data_id : int, default=None
OpenML ID of the dataset. The most specific way of retrieving a
dataset. If data_id is not given, name (and potential version) are
used to obtain a dataset.
data_home : str or path-like, default=None
Specify another download and cache folder for the data sets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
target_column : str, list or None, default='default-target'
Specify the column name in the data to use as target. If
'default-target', the standard target column a stored on the server
is used. If ``None``, all columns are returned as data and the
target is ``None``. If list (of strings), all columns with these names
are returned as multi-target (Note: not all scikit-learn classifiers
can handle all types of multi-output combinations).
cache : bool, default=True
Whether to cache the downloaded datasets into `data_home`.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object. See
below for more information about the `data` and `target` objects.
as_frame : bool or 'auto', default='auto'
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric, string or categorical). The target is
a pandas DataFrame or Series depending on the number of target_columns.
The Bunch will contain a ``frame`` attribute with the target and the
data. If ``return_X_y`` is True, then ``(data, target)`` will be pandas
DataFrames or Series as describe above.
If `as_frame` is 'auto', the data and target will be converted to
DataFrame or Series as if `as_frame` is set to True, unless the dataset
is stored in sparse format.
If `as_frame` is False, the data and target will be NumPy arrays and
the `data` will only contain numerical values when `parser="liac-arff"`
where the categories are provided in the attribute `categories` of the
`Bunch` instance. When `parser="pandas"`, no ordinal encoding is made.
.. versionchanged:: 0.24
The default value of `as_frame` changed from `False` to `'auto'`
in 0.24.
n_retries : int, default=3
Number of retries when HTTP errors or network timeouts are encountered.
Error with status code 412 won't be retried as they represent OpenML
generic errors.
delay : float, default=1.0
Number of seconds between retries.
parser : {"auto", "pandas", "liac-arff"}, default="auto"
Parser used to load the ARFF file. Two parsers are implemented:
- `"pandas"`: this is the most efficient parser. However, it requires
pandas to be installed and can only open dense datasets.
- `"liac-arff"`: this is a pure Python ARFF parser that is much less
memory- and CPU-efficient. It deals with sparse ARFF datasets.
If `"auto"`, the parser is chosen automatically such that `"liac-arff"`
is selected for sparse ARFF datasets, otherwise `"pandas"` is selected.
.. versionadded:: 1.2
.. versionchanged:: 1.4
The default value of `parser` changes from `"liac-arff"` to
`"auto"`.
read_csv_kwargs : dict, default=None
Keyword arguments passed to :func:`pandas.read_csv` when loading the data
from a ARFF file and using the pandas parser. It can allow to
overwrite some default parameters.
.. versionadded:: 1.3
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : np.array, scipy.sparse.csr_matrix of floats, or pandas DataFrame
The feature matrix. Categorical features are encoded as ordinals.
target : np.array, pandas Series or DataFrame
The regression target or classification labels, if applicable.
Dtype is float if numeric, and object if categorical. If
``as_frame`` is True, ``target`` is a pandas object.
DESCR : str
The full description of the dataset.
feature_names : list
The names of the dataset columns.
target_names: list
The names of the target columns.
.. versionadded:: 0.22
categories : dict or None
Maps each categorical feature name to a list of values, such
that the value encoded as i is ith in the list. If ``as_frame``
is True, this is None.
details : dict
More metadata from OpenML.
frame : pandas DataFrame
Only present when `as_frame=True`. DataFrame with ``data`` and
``target``.
(data, target) : tuple if ``return_X_y`` is True
.. note:: EXPERIMENTAL
This interface is **experimental** and subsequent releases may
change attributes without notice (although there should only be
minor changes to ``data`` and ``target``).
Missing values in the 'data' are represented as NaN's. Missing values
in 'target' are represented as NaN's (numerical target) or None
(categorical target).
Notes
-----
The `"pandas"` and `"liac-arff"` parsers can lead to different data types
in the output. The notable differences are the following:
- The `"liac-arff"` parser always encodes categorical features as `str` objects.
To the contrary, the `"pandas"` parser instead infers the type while
reading and numerical categories will be casted into integers whenever
possible.
- The `"liac-arff"` parser uses float64 to encode numerical features
tagged as 'REAL' and 'NUMERICAL' in the metadata. The `"pandas"`
parser instead infers if these numerical features corresponds
to integers and uses panda's Integer extension dtype.
- In particular, classification datasets with integer categories are
typically loaded as such `(0, 1, ...)` with the `"pandas"` parser while
`"liac-arff"` will force the use of string encoded class labels such as
`"0"`, `"1"` and so on.
- The `"pandas"` parser will not strip single quotes - i.e. `'` - from
string columns. For instance, a string `'my string'` will be kept as is
while the `"liac-arff"` parser will strip the single quotes. For
categorical columns, the single quotes are stripped from the values.
In addition, when `as_frame=False` is used, the `"liac-arff"` parser
returns ordinally encoded data where the categories are provided in the
attribute `categories` of the `Bunch` instance. Instead, `"pandas"` returns
a NumPy array were the categories are not encoded.
Examples
--------
>>> from sklearn.datasets import fetch_openml
>>> adult = fetch_openml("adult", version=2) # doctest: +SKIP
>>> adult.frame.info() # doctest: +SKIP
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 48842 entries, 0 to 48841
Data columns (total 15 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 age 48842 non-null int64
1 workclass 46043 non-null category
2 fnlwgt 48842 non-null int64
3 education 48842 non-null category
4 education-num 48842 non-null int64
5 marital-status 48842 non-null category
6 occupation 46033 non-null category
7 relationship 48842 non-null category
8 race 48842 non-null category
9 sex 48842 non-null category
10 capital-gain 48842 non-null int64
11 capital-loss 48842 non-null int64
12 hours-per-week 48842 non-null int64
13 native-country 47985 non-null category
14 class 48842 non-null category
dtypes: category(9), int64(6)
memory usage: 2.7 MB
"""
if cache is False:
# no caching will be applied
data_home = None
else:
data_home = get_data_home(data_home=data_home)
data_home = join(str(data_home), "openml")
# check valid function arguments. data_id XOR (name, version) should be
# provided
if name is not None:
# OpenML is case-insensitive, but the caching mechanism is not
# convert all data names (str) to lower case
name = name.lower()
if data_id is not None:
raise ValueError(
"Dataset data_id={} and name={} passed, but you can only "
"specify a numeric data_id or a name, not "
"both.".format(data_id, name)
)
data_info = _get_data_info_by_name(
name, version, data_home, n_retries=n_retries, delay=delay
)
data_id = data_info["did"]
elif data_id is not None:
# from the previous if statement, it is given that name is None
if version != "active":
raise ValueError(
"Dataset data_id={} and version={} passed, but you can only "
"specify a numeric data_id or a version, not "
"both.".format(data_id, version)
)
else:
raise ValueError(
"Neither name nor data_id are provided. Please provide name or data_id."
)
data_description = _get_data_description_by_id(data_id, data_home)
if data_description["status"] != "active":
warn(
"Version {} of dataset {} is inactive, meaning that issues have "
"been found in the dataset. Try using a newer version from "
"this URL: {}".format(
data_description["version"],
data_description["name"],
data_description["url"],
)
)
if "error" in data_description:
warn(
"OpenML registered a problem with the dataset. It might be "
"unusable. Error: {}".format(data_description["error"])
)
if "warning" in data_description:
warn(
"OpenML raised a warning on the dataset. It might be "
"unusable. Warning: {}".format(data_description["warning"])
)
return_sparse = data_description["format"].lower() == "sparse_arff"
as_frame = not return_sparse if as_frame == "auto" else as_frame
if parser == "auto":
parser_ = "liac-arff" if return_sparse else "pandas"
else:
parser_ = parser
if parser_ == "pandas":
try:
check_pandas_support("`fetch_openml`")
except ImportError as exc:
if as_frame:
err_msg = (
"Returning pandas objects requires pandas to be installed. "
"Alternatively, explicitly set `as_frame=False` and "
"`parser='liac-arff'`."
)
else:
err_msg = (
f"Using `parser={parser!r}` with dense data requires pandas to be "
"installed. Alternatively, explicitly set `parser='liac-arff'`."
)
raise ImportError(err_msg) from exc
if return_sparse:
if as_frame:
raise ValueError(
"Sparse ARFF datasets cannot be loaded with as_frame=True. "
"Use as_frame=False or as_frame='auto' instead."
)
if parser_ == "pandas":
raise ValueError(
f"Sparse ARFF datasets cannot be loaded with parser={parser!r}. "
"Use parser='liac-arff' or parser='auto' instead."
)
# download data features, meta-info about column types
features_list = _get_data_features(data_id, data_home)
if not as_frame:
for feature in features_list:
if "true" in (feature["is_ignore"], feature["is_row_identifier"]):
continue
if feature["data_type"] == "string":
raise ValueError(
"STRING attributes are not supported for "
"array representation. Try as_frame=True"
)
if target_column == "default-target":
# determines the default target based on the data feature results
# (which is currently more reliable than the data description;
# see issue: https://github.com/openml/OpenML/issues/768)
target_columns = [
feature["name"]
for feature in features_list
if feature["is_target"] == "true"
]
elif isinstance(target_column, str):
# for code-simplicity, make target_column by default a list
target_columns = [target_column]
elif target_column is None:
target_columns = []
else:
# target_column already is of type list
target_columns = target_column
data_columns = _valid_data_column_names(features_list, target_columns)
shape: Optional[Tuple[int, int]]
# determine arff encoding to return
if not return_sparse:
# The shape must include the ignored features to keep the right indexes
# during the arff data conversion.
data_qualities = _get_data_qualities(data_id, data_home)
shape = _get_num_samples(data_qualities), len(features_list)
else:
shape = None
# obtain the data
url = data_description["url"]
bunch = _download_data_to_bunch(
url,
return_sparse,
data_home,
as_frame=bool(as_frame),
openml_columns_info=features_list,
shape=shape,
target_columns=target_columns,
data_columns=data_columns,
md5_checksum=data_description["md5_checksum"],
n_retries=n_retries,
delay=delay,
parser=parser_,
read_csv_kwargs=read_csv_kwargs,
)
if return_X_y:
return bunch.data, bunch.target
description = "{}\n\nDownloaded from openml.org.".format(
data_description.pop("description")
)
bunch.update(
DESCR=description,
details=data_description,
url="https://www.openml.org/d/{}".format(data_id),
)
return bunch
|
Fetch dataset from openml by name or dataset id.
Datasets are uniquely identified by either an integer ID or by a
combination of name and version (i.e. there might be multiple
versions of the 'iris' dataset). Please give either name or data_id
(not both). In case a name is given, a version can also be
provided.
Read more in the :ref:`User Guide <openml>`.
.. versionadded:: 0.20
.. note:: EXPERIMENTAL
The API is experimental (particularly the return value structure),
and might have small backward-incompatible changes without notice
or warning in future releases.
Parameters
----------
name : str, default=None
String identifier of the dataset. Note that OpenML can have multiple
datasets with the same name.
version : int or 'active', default='active'
Version of the dataset. Can only be provided if also ``name`` is given.
If 'active' the oldest version that's still active is used. Since
there may be more than one active version of a dataset, and those
versions may fundamentally be different from one another, setting an
exact version is highly recommended.
data_id : int, default=None
OpenML ID of the dataset. The most specific way of retrieving a
dataset. If data_id is not given, name (and potential version) are
used to obtain a dataset.
data_home : str or path-like, default=None
Specify another download and cache folder for the data sets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
target_column : str, list or None, default='default-target'
Specify the column name in the data to use as target. If
'default-target', the standard target column a stored on the server
is used. If ``None``, all columns are returned as data and the
target is ``None``. If list (of strings), all columns with these names
are returned as multi-target (Note: not all scikit-learn classifiers
can handle all types of multi-output combinations).
cache : bool, default=True
Whether to cache the downloaded datasets into `data_home`.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object. See
below for more information about the `data` and `target` objects.
as_frame : bool or 'auto', default='auto'
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric, string or categorical). The target is
a pandas DataFrame or Series depending on the number of target_columns.
The Bunch will contain a ``frame`` attribute with the target and the
data. If ``return_X_y`` is True, then ``(data, target)`` will be pandas
DataFrames or Series as describe above.
If `as_frame` is 'auto', the data and target will be converted to
DataFrame or Series as if `as_frame` is set to True, unless the dataset
is stored in sparse format.
If `as_frame` is False, the data and target will be NumPy arrays and
the `data` will only contain numerical values when `parser="liac-arff"`
where the categories are provided in the attribute `categories` of the
`Bunch` instance. When `parser="pandas"`, no ordinal encoding is made.
.. versionchanged:: 0.24
The default value of `as_frame` changed from `False` to `'auto'`
in 0.24.
n_retries : int, default=3
Number of retries when HTTP errors or network timeouts are encountered.
Error with status code 412 won't be retried as they represent OpenML
generic errors.
delay : float, default=1.0
Number of seconds between retries.
parser : {"auto", "pandas", "liac-arff"}, default="auto"
Parser used to load the ARFF file. Two parsers are implemented:
- `"pandas"`: this is the most efficient parser. However, it requires
pandas to be installed and can only open dense datasets.
- `"liac-arff"`: this is a pure Python ARFF parser that is much less
memory- and CPU-efficient. It deals with sparse ARFF datasets.
If `"auto"`, the parser is chosen automatically such that `"liac-arff"`
is selected for sparse ARFF datasets, otherwise `"pandas"` is selected.
.. versionadded:: 1.2
.. versionchanged:: 1.4
The default value of `parser` changes from `"liac-arff"` to
`"auto"`.
read_csv_kwargs : dict, default=None
Keyword arguments passed to :func:`pandas.read_csv` when loading the data
from a ARFF file and using the pandas parser. It can allow to
overwrite some default parameters.
.. versionadded:: 1.3
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : np.array, scipy.sparse.csr_matrix of floats, or pandas DataFrame
The feature matrix. Categorical features are encoded as ordinals.
target : np.array, pandas Series or DataFrame
The regression target or classification labels, if applicable.
Dtype is float if numeric, and object if categorical. If
``as_frame`` is True, ``target`` is a pandas object.
DESCR : str
The full description of the dataset.
feature_names : list
The names of the dataset columns.
target_names: list
The names of the target columns.
.. versionadded:: 0.22
categories : dict or None
Maps each categorical feature name to a list of values, such
that the value encoded as i is ith in the list. If ``as_frame``
is True, this is None.
details : dict
More metadata from OpenML.
frame : pandas DataFrame
Only present when `as_frame=True`. DataFrame with ``data`` and
``target``.
(data, target) : tuple if ``return_X_y`` is True
.. note:: EXPERIMENTAL
This interface is **experimental** and subsequent releases may
change attributes without notice (although there should only be
minor changes to ``data`` and ``target``).
Missing values in the 'data' are represented as NaN's. Missing values
in 'target' are represented as NaN's (numerical target) or None
(categorical target).
Notes
-----
The `"pandas"` and `"liac-arff"` parsers can lead to different data types
in the output. The notable differences are the following:
- The `"liac-arff"` parser always encodes categorical features as `str` objects.
To the contrary, the `"pandas"` parser instead infers the type while
reading and numerical categories will be casted into integers whenever
possible.
- The `"liac-arff"` parser uses float64 to encode numerical features
tagged as 'REAL' and 'NUMERICAL' in the metadata. The `"pandas"`
parser instead infers if these numerical features corresponds
to integers and uses panda's Integer extension dtype.
- In particular, classification datasets with integer categories are
typically loaded as such `(0, 1, ...)` with the `"pandas"` parser while
`"liac-arff"` will force the use of string encoded class labels such as
`"0"`, `"1"` and so on.
- The `"pandas"` parser will not strip single quotes - i.e. `'` - from
string columns. For instance, a string `'my string'` will be kept as is
while the `"liac-arff"` parser will strip the single quotes. For
categorical columns, the single quotes are stripped from the values.
In addition, when `as_frame=False` is used, the `"liac-arff"` parser
returns ordinally encoded data where the categories are provided in the
attribute `categories` of the `Bunch` instance. Instead, `"pandas"` returns
a NumPy array were the categories are not encoded.
Examples
--------
>>> from sklearn.datasets import fetch_openml
>>> adult = fetch_openml("adult", version=2) # doctest: +SKIP
>>> adult.frame.info() # doctest: +SKIP
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 48842 entries, 0 to 48841
Data columns (total 15 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 age 48842 non-null int64
1 workclass 46043 non-null category
2 fnlwgt 48842 non-null int64
3 education 48842 non-null category
4 education-num 48842 non-null int64
5 marital-status 48842 non-null category
6 occupation 46033 non-null category
7 relationship 48842 non-null category
8 race 48842 non-null category
9 sex 48842 non-null category
10 capital-gain 48842 non-null int64
11 capital-loss 48842 non-null int64
12 hours-per-week 48842 non-null int64
13 native-country 47985 non-null category
14 class 48842 non-null category
dtypes: category(9), int64(6)
memory usage: 2.7 MB
|
fetch_openml
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_openml.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_openml.py
|
BSD-3-Clause
|
def fetch_rcv1(
*,
data_home=None,
subset="all",
download_if_missing=True,
random_state=None,
shuffle=False,
return_X_y=False,
n_retries=3,
delay=1.0,
):
"""Load the RCV1 multilabel dataset (classification).
Download it if necessary.
Version: RCV1-v2, vectors, full sets, topics multilabels.
================= =====================
Classes 103
Samples total 804414
Dimensionality 47236
Features real, between 0 and 1
================= =====================
Read more in the :ref:`User Guide <rcv1_dataset>`.
.. versionadded:: 0.17
Parameters
----------
data_home : str or path-like, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
subset : {'train', 'test', 'all'}, default='all'
Select the dataset to load: 'train' for the training set
(23149 samples), 'test' for the test set (781265 samples),
'all' for both, with the training samples first if shuffle is False.
This follows the official LYRL2004 chronological split.
download_if_missing : bool, default=True
If False, raise an OSError if the data is not locally available
instead of trying to download the data from the source site.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset shuffling. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
shuffle : bool, default=False
Whether to shuffle dataset.
return_X_y : bool, default=False
If True, returns ``(dataset.data, dataset.target)`` instead of a Bunch
object. See below for more information about the `dataset.data` and
`dataset.target` object.
.. versionadded:: 0.20
n_retries : int, default=3
Number of retries when HTTP errors are encountered.
.. versionadded:: 1.5
delay : float, default=1.0
Number of seconds between retries.
.. versionadded:: 1.5
Returns
-------
dataset : :class:`~sklearn.utils.Bunch`
Dictionary-like object. Returned only if `return_X_y` is False.
`dataset` has the following attributes:
- data : sparse matrix of shape (804414, 47236), dtype=np.float64
The array has 0.16% of non zero values. Will be of CSR format.
- target : sparse matrix of shape (804414, 103), dtype=np.uint8
Each sample has a value of 1 in its categories, and 0 in others.
The array has 3.15% of non zero values. Will be of CSR format.
- sample_id : ndarray of shape (804414,), dtype=np.uint32,
Identification number of each sample, as ordered in dataset.data.
- target_names : ndarray of shape (103,), dtype=object
Names of each target (RCV1 topics), as ordered in dataset.target.
- DESCR : str
Description of the RCV1 dataset.
(data, target) : tuple
A tuple consisting of `dataset.data` and `dataset.target`, as
described above. Returned only if `return_X_y` is True.
.. versionadded:: 0.20
Examples
--------
>>> from sklearn.datasets import fetch_rcv1
>>> rcv1 = fetch_rcv1()
>>> rcv1.data.shape
(804414, 47236)
>>> rcv1.target.shape
(804414, 103)
"""
N_SAMPLES = 804414
N_FEATURES = 47236
N_CATEGORIES = 103
N_TRAIN = 23149
data_home = get_data_home(data_home=data_home)
rcv1_dir = join(data_home, "RCV1")
if download_if_missing:
if not exists(rcv1_dir):
makedirs(rcv1_dir)
samples_path = _pkl_filepath(rcv1_dir, "samples.pkl")
sample_id_path = _pkl_filepath(rcv1_dir, "sample_id.pkl")
sample_topics_path = _pkl_filepath(rcv1_dir, "sample_topics.pkl")
topics_path = _pkl_filepath(rcv1_dir, "topics_names.pkl")
# load data (X) and sample_id
if download_if_missing and (not exists(samples_path) or not exists(sample_id_path)):
files = []
for each in XY_METADATA:
logger.info("Downloading %s" % each.url)
file_path = _fetch_remote(
each, dirname=rcv1_dir, n_retries=n_retries, delay=delay
)
files.append(GzipFile(filename=file_path))
Xy = load_svmlight_files(files, n_features=N_FEATURES)
# Training data is before testing data
X = sp.vstack([Xy[8], Xy[0], Xy[2], Xy[4], Xy[6]]).tocsr()
sample_id = np.hstack((Xy[9], Xy[1], Xy[3], Xy[5], Xy[7]))
sample_id = sample_id.astype(np.uint32, copy=False)
joblib.dump(X, samples_path, compress=9)
joblib.dump(sample_id, sample_id_path, compress=9)
# delete archives
for f in files:
f.close()
remove(f.name)
else:
X = joblib.load(samples_path)
sample_id = joblib.load(sample_id_path)
# load target (y), categories, and sample_id_bis
if download_if_missing and (
not exists(sample_topics_path) or not exists(topics_path)
):
logger.info("Downloading %s" % TOPICS_METADATA.url)
topics_archive_path = _fetch_remote(
TOPICS_METADATA, dirname=rcv1_dir, n_retries=n_retries, delay=delay
)
# parse the target file
n_cat = -1
n_doc = -1
doc_previous = -1
y = np.zeros((N_SAMPLES, N_CATEGORIES), dtype=np.uint8)
sample_id_bis = np.zeros(N_SAMPLES, dtype=np.int32)
category_names = {}
with GzipFile(filename=topics_archive_path, mode="rb") as f:
for line in f:
line_components = line.decode("ascii").split(" ")
if len(line_components) == 3:
cat, doc, _ = line_components
if cat not in category_names:
n_cat += 1
category_names[cat] = n_cat
doc = int(doc)
if doc != doc_previous:
doc_previous = doc
n_doc += 1
sample_id_bis[n_doc] = doc
y[n_doc, category_names[cat]] = 1
# delete archive
remove(topics_archive_path)
# Samples in X are ordered with sample_id,
# whereas in y, they are ordered with sample_id_bis.
permutation = _find_permutation(sample_id_bis, sample_id)
y = y[permutation, :]
# save category names in a list, with same order than y
categories = np.empty(N_CATEGORIES, dtype=object)
for k in category_names.keys():
categories[category_names[k]] = k
# reorder categories in lexicographic order
order = np.argsort(categories)
categories = categories[order]
y = sp.csr_matrix(y[:, order])
joblib.dump(y, sample_topics_path, compress=9)
joblib.dump(categories, topics_path, compress=9)
else:
y = joblib.load(sample_topics_path)
categories = joblib.load(topics_path)
if subset == "all":
pass
elif subset == "train":
X = X[:N_TRAIN, :]
y = y[:N_TRAIN, :]
sample_id = sample_id[:N_TRAIN]
elif subset == "test":
X = X[N_TRAIN:, :]
y = y[N_TRAIN:, :]
sample_id = sample_id[N_TRAIN:]
else:
raise ValueError(
"Unknown subset parameter. Got '%s' instead of one"
" of ('all', 'train', test')" % subset
)
if shuffle:
X, y, sample_id = shuffle_(X, y, sample_id, random_state=random_state)
fdescr = load_descr("rcv1.rst")
if return_X_y:
return X, y
return Bunch(
data=X, target=y, sample_id=sample_id, target_names=categories, DESCR=fdescr
)
|
Load the RCV1 multilabel dataset (classification).
Download it if necessary.
Version: RCV1-v2, vectors, full sets, topics multilabels.
================= =====================
Classes 103
Samples total 804414
Dimensionality 47236
Features real, between 0 and 1
================= =====================
Read more in the :ref:`User Guide <rcv1_dataset>`.
.. versionadded:: 0.17
Parameters
----------
data_home : str or path-like, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
subset : {'train', 'test', 'all'}, default='all'
Select the dataset to load: 'train' for the training set
(23149 samples), 'test' for the test set (781265 samples),
'all' for both, with the training samples first if shuffle is False.
This follows the official LYRL2004 chronological split.
download_if_missing : bool, default=True
If False, raise an OSError if the data is not locally available
instead of trying to download the data from the source site.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset shuffling. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
shuffle : bool, default=False
Whether to shuffle dataset.
return_X_y : bool, default=False
If True, returns ``(dataset.data, dataset.target)`` instead of a Bunch
object. See below for more information about the `dataset.data` and
`dataset.target` object.
.. versionadded:: 0.20
n_retries : int, default=3
Number of retries when HTTP errors are encountered.
.. versionadded:: 1.5
delay : float, default=1.0
Number of seconds between retries.
.. versionadded:: 1.5
Returns
-------
dataset : :class:`~sklearn.utils.Bunch`
Dictionary-like object. Returned only if `return_X_y` is False.
`dataset` has the following attributes:
- data : sparse matrix of shape (804414, 47236), dtype=np.float64
The array has 0.16% of non zero values. Will be of CSR format.
- target : sparse matrix of shape (804414, 103), dtype=np.uint8
Each sample has a value of 1 in its categories, and 0 in others.
The array has 3.15% of non zero values. Will be of CSR format.
- sample_id : ndarray of shape (804414,), dtype=np.uint32,
Identification number of each sample, as ordered in dataset.data.
- target_names : ndarray of shape (103,), dtype=object
Names of each target (RCV1 topics), as ordered in dataset.target.
- DESCR : str
Description of the RCV1 dataset.
(data, target) : tuple
A tuple consisting of `dataset.data` and `dataset.target`, as
described above. Returned only if `return_X_y` is True.
.. versionadded:: 0.20
Examples
--------
>>> from sklearn.datasets import fetch_rcv1
>>> rcv1 = fetch_rcv1()
>>> rcv1.data.shape
(804414, 47236)
>>> rcv1.target.shape
(804414, 103)
|
fetch_rcv1
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_rcv1.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_rcv1.py
|
BSD-3-Clause
|
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions."""
if dimensions > 30:
return np.hstack(
[
rng.randint(2, size=(samples, dimensions - 30)),
_generate_hypercube(samples, 30, rng),
]
)
out = sample_without_replacement(2**dimensions, samples, random_state=rng).astype(
dtype=">u4", copy=False
)
out = np.unpackbits(out.view(">u1")).reshape((-1, 32))[:, -dimensions:]
return out
|
Returns distinct binary samples of length dimensions.
|
_generate_hypercube
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_samples_generator.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_samples_generator.py
|
BSD-3-Clause
|
def make_classification(
n_samples=100,
n_features=20,
*,
n_informative=2,
n_redundant=2,
n_repeated=0,
n_classes=2,
n_clusters_per_class=2,
weights=None,
flip_y=0.01,
class_sep=1.0,
hypercube=True,
shift=0.0,
scale=1.0,
shuffle=True,
random_state=None,
return_X_y=True,
):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of an ``n_informative``-dimensional hypercube with sides of
length ``2*class_sep`` and assigns an equal number of clusters to each
class. It introduces interdependence between these features and adds
various types of further noise to the data.
Without shuffling, ``X`` horizontally stacks features in the following
order: the primary ``n_informative`` features, followed by ``n_redundant``
linear combinations of the informative features, followed by ``n_repeated``
duplicates, drawn randomly with replacement from the informative and
redundant features. The remaining features are filled with random noise.
Thus, without shuffling, all useful features are contained in the columns
``X[:, :n_informative + n_redundant + n_repeated]``.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
n_features : int, default=20
The total number of features. These comprise ``n_informative``
informative features, ``n_redundant`` redundant features,
``n_repeated`` duplicated features and
``n_features-n_informative-n_redundant-n_repeated`` useless features
drawn at random.
n_informative : int, default=2
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension ``n_informative``. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, default=2
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, default=0
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, default=2
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, default=2
The number of clusters per class.
weights : array-like of shape (n_classes,) or (n_classes - 1,),\
default=None
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if ``len(weights) == n_classes - 1``,
then the last class weight is automatically inferred.
More than ``n_samples`` samples may be returned if the sum of
``weights`` exceeds 1. Note that the actual class proportions will
not exactly match ``weights`` when ``flip_y`` isn't 0.
flip_y : float, default=0.01
The fraction of samples whose class is assigned randomly. Larger
values introduce noise in the labels and make the classification
task harder. Note that the default setting flip_y > 0 might lead
to less than ``n_classes`` in y in some cases.
class_sep : float, default=1.0
The factor multiplying the hypercube size. Larger values spread
out the clusters/classes and make the classification task easier.
hypercube : bool, default=True
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, ndarray of shape (n_features,) or None, default=0.0
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, ndarray of shape (n_features,) or None, default=1.0
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : bool, default=True
Shuffle the samples and the features.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
return_X_y : bool, default=True
If True, a tuple ``(X, y)`` instead of a Bunch object is returned.
.. versionadded:: 1.7
Returns
-------
data : :class:`~sklearn.utils.Bunch` if `return_X_y` is `False`.
Dictionary-like object, with the following attributes.
DESCR : str
A description of the function that generated the dataset.
parameter : dict
A dictionary that stores the values of the arguments passed to the
generator function.
feature_info : list of len(n_features)
A description for each generated feature.
X : ndarray of shape (n_samples, n_features)
The generated samples.
y : ndarray of shape (n_samples,)
An integer label for class membership of each sample.
.. versionadded:: 1.7
(X, y) : tuple if ``return_X_y`` is True
A tuple of generated samples and labels.
See Also
--------
make_blobs : Simplified variant.
make_multilabel_classification : Unrelated generator for multilabel tasks.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
Examples
--------
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(random_state=42)
>>> X.shape
(100, 20)
>>> y.shape
(100,)
>>> list(y[:5])
[np.int64(0), np.int64(0), np.int64(1), np.int64(1), np.int64(0)]
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError(
"Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features"
)
# Use log2 to avoid overflow errors
if n_informative < np.log2(n_classes * n_clusters_per_class):
msg = "n_classes({}) * n_clusters_per_class({}) must be"
msg += " smaller or equal 2**n_informative({})={}"
raise ValueError(
msg.format(n_classes, n_clusters_per_class, n_informative, 2**n_informative)
)
if weights is not None:
# we define new variable, weight_, instead of modifying user defined parameter.
if len(weights) not in [n_classes, n_classes - 1]:
raise ValueError(
"Weights specified but incompatible with number of classes."
)
if len(weights) == n_classes - 1:
if isinstance(weights, list):
weights_ = weights + [1.0 - sum(weights)]
else:
weights_ = np.resize(weights, n_classes)
weights_[-1] = 1.0 - sum(weights_[:-1])
else:
weights_ = weights.copy()
else:
weights_ = [1.0 / n_classes] * n_classes
n_random = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
# Distribute samples among clusters by weight
n_samples_per_cluster = [
int(n_samples * weights_[k % n_classes] / n_clusters_per_class)
for k in range(n_clusters)
]
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Initialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative, generator).astype(
float, copy=False
)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.uniform(size=(n_clusters, 1))
centroids *= generator.uniform(size=(1, n_informative))
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.standard_normal(size=(n_samples, n_informative))
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.uniform(size=(n_informative, n_informative)) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.uniform(size=(n_informative, n_redundant)) - 1
X[:, n_informative : n_informative + n_redundant] = np.dot(
X[:, :n_informative], B
)
# Repeat some features
n = n_informative + n_redundant
if n_repeated > 0:
indices = ((n - 1) * generator.uniform(size=n_repeated) + 0.5).astype(np.intp)
X[:, n : n + n_repeated] = X[:, indices]
# Fill useless features
if n_random > 0:
X[:, -n_random:] = generator.standard_normal(size=(n_samples, n_random))
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.uniform(size=n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.uniform(size=n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.uniform(size=n_features)
X *= scale
indices = np.arange(n_features)
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
generator.shuffle(indices)
X[:, :] = X[:, indices]
if return_X_y:
return X, y
# feat_desc describes features in X
feat_desc = ["random"] * n_features
for i, index in enumerate(indices):
if index < n_informative:
feat_desc[i] = "informative"
elif n_informative <= index < n_informative + n_redundant:
feat_desc[i] = "redundant"
elif n <= index < n + n_repeated:
feat_desc[i] = "repeated"
parameters = {
"n_samples": n_samples,
"n_features": n_features,
"n_informative": n_informative,
"n_redundant": n_redundant,
"n_repeated": n_repeated,
"n_classes": n_classes,
"n_clusters_per_class": n_clusters_per_class,
"weights": weights,
"flip_y": flip_y,
"class_sep": class_sep,
"hypercube": hypercube,
"shift": shift,
"scale": scale,
"shuffle": shuffle,
"random_state": random_state,
"return_X_y": return_X_y,
}
bunch = Bunch(
DESCR=make_classification.__doc__,
parameters=parameters,
feature_info=feat_desc,
X=X,
y=y,
)
return bunch
|
Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of an ``n_informative``-dimensional hypercube with sides of
length ``2*class_sep`` and assigns an equal number of clusters to each
class. It introduces interdependence between these features and adds
various types of further noise to the data.
Without shuffling, ``X`` horizontally stacks features in the following
order: the primary ``n_informative`` features, followed by ``n_redundant``
linear combinations of the informative features, followed by ``n_repeated``
duplicates, drawn randomly with replacement from the informative and
redundant features. The remaining features are filled with random noise.
Thus, without shuffling, all useful features are contained in the columns
``X[:, :n_informative + n_redundant + n_repeated]``.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
n_features : int, default=20
The total number of features. These comprise ``n_informative``
informative features, ``n_redundant`` redundant features,
``n_repeated`` duplicated features and
``n_features-n_informative-n_redundant-n_repeated`` useless features
drawn at random.
n_informative : int, default=2
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension ``n_informative``. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, default=2
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, default=0
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, default=2
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, default=2
The number of clusters per class.
weights : array-like of shape (n_classes,) or (n_classes - 1,), default=None
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if ``len(weights) == n_classes - 1``,
then the last class weight is automatically inferred.
More than ``n_samples`` samples may be returned if the sum of
``weights`` exceeds 1. Note that the actual class proportions will
not exactly match ``weights`` when ``flip_y`` isn't 0.
flip_y : float, default=0.01
The fraction of samples whose class is assigned randomly. Larger
values introduce noise in the labels and make the classification
task harder. Note that the default setting flip_y > 0 might lead
to less than ``n_classes`` in y in some cases.
class_sep : float, default=1.0
The factor multiplying the hypercube size. Larger values spread
out the clusters/classes and make the classification task easier.
hypercube : bool, default=True
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, ndarray of shape (n_features,) or None, default=0.0
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, ndarray of shape (n_features,) or None, default=1.0
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : bool, default=True
Shuffle the samples and the features.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
return_X_y : bool, default=True
If True, a tuple ``(X, y)`` instead of a Bunch object is returned.
.. versionadded:: 1.7
Returns
-------
data : :class:`~sklearn.utils.Bunch` if `return_X_y` is `False`.
Dictionary-like object, with the following attributes.
DESCR : str
A description of the function that generated the dataset.
parameter : dict
A dictionary that stores the values of the arguments passed to the
generator function.
feature_info : list of len(n_features)
A description for each generated feature.
X : ndarray of shape (n_samples, n_features)
The generated samples.
y : ndarray of shape (n_samples,)
An integer label for class membership of each sample.
.. versionadded:: 1.7
(X, y) : tuple if ``return_X_y`` is True
A tuple of generated samples and labels.
See Also
--------
make_blobs : Simplified variant.
make_multilabel_classification : Unrelated generator for multilabel tasks.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
Examples
--------
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(random_state=42)
>>> X.shape
(100, 20)
>>> y.shape
(100,)
>>> list(y[:5])
[np.int64(0), np.int64(0), np.int64(1), np.int64(1), np.int64(0)]
|
make_classification
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_samples_generator.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_samples_generator.py
|
BSD-3-Clause
|
def make_multilabel_classification(
n_samples=100,
n_features=20,
*,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=True,
sparse=False,
return_indicator="dense",
return_distributions=False,
random_state=None,
):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
For an example of usage, see
:ref:`sphx_glr_auto_examples_datasets_plot_random_multilabel_dataset.py`.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
n_features : int, default=20
The total number of features.
n_classes : int, default=5
The number of classes of the classification problem.
n_labels : int, default=2
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, default=50
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, default=True
If ``True``, some instances might not belong to any class.
sparse : bool, default=False
If ``True``, return a sparse feature matrix.
.. versionadded:: 0.17
parameter to allow *sparse* output.
return_indicator : {'dense', 'sparse'} or False, default='dense'
If ``'dense'`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, default=False
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The generated samples.
Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)
The label sets. Sparse matrix should be of CSR format.
p_c : ndarray of shape (n_classes,)
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : ndarray of shape (n_features, n_classes)
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
Examples
--------
>>> from sklearn.datasets import make_multilabel_classification
>>> X, y = make_multilabel_classification(n_labels=3, random_state=42)
>>> X.shape
(100, 20)
>>> y.shape
(100, 5)
>>> list(y[:3])
[array([1, 1, 0, 1, 0]), array([0, 1, 1, 1, 0]), array([0, 1, 0, 0, 0])]
"""
generator = check_random_state(random_state)
p_c = generator.uniform(size=n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.uniform(size=(n_features, n_classes))
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c, generator.uniform(size=y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.uniform(size=n_words))
return words, y
X_indices = array.array("i")
X_indptr = array.array("i", [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr), shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, "sparse", "dense"):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == "sparse"))
Y = lb.fit([range(n_classes)]).transform(Y)
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
|
Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
For an example of usage, see
:ref:`sphx_glr_auto_examples_datasets_plot_random_multilabel_dataset.py`.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
n_features : int, default=20
The total number of features.
n_classes : int, default=5
The number of classes of the classification problem.
n_labels : int, default=2
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, default=50
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, default=True
If ``True``, some instances might not belong to any class.
sparse : bool, default=False
If ``True``, return a sparse feature matrix.
.. versionadded:: 0.17
parameter to allow *sparse* output.
return_indicator : {'dense', 'sparse'} or False, default='dense'
If ``'dense'`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, default=False
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The generated samples.
Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)
The label sets. Sparse matrix should be of CSR format.
p_c : ndarray of shape (n_classes,)
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : ndarray of shape (n_features, n_classes)
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
Examples
--------
>>> from sklearn.datasets import make_multilabel_classification
>>> X, y = make_multilabel_classification(n_labels=3, random_state=42)
>>> X.shape
(100, 20)
>>> y.shape
(100, 5)
>>> list(y[:3])
[array([1, 1, 0, 1, 0]), array([0, 1, 1, 1, 0]), array([0, 1, 0, 0, 0])]
|
make_multilabel_classification
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_samples_generator.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_samples_generator.py
|
BSD-3-Clause
|
def make_hastie_10_2(n_samples=12000, *, random_state=None):
"""Generate data for binary classification used in Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=12000
The number of samples.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, 10)
The input samples.
y : ndarray of shape (n_samples,)
The output values.
See Also
--------
make_gaussian_quantiles : A generalization of this dataset approach.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
Examples
--------
>>> from sklearn.datasets import make_hastie_10_2
>>> X, y = make_hastie_10_2(n_samples=24000, random_state=42)
>>> X.shape
(24000, 10)
>>> y.shape
(24000,)
>>> list(y[:5])
[np.float64(-1.0), np.float64(1.0), np.float64(-1.0), np.float64(1.0),
np.float64(-1.0)]
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X**2.0).sum(axis=1) > 9.34).astype(np.float64, copy=False)
y[y == 0.0] = -1.0
return X, y
|
Generate data for binary classification used in Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=12000
The number of samples.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, 10)
The input samples.
y : ndarray of shape (n_samples,)
The output values.
See Also
--------
make_gaussian_quantiles : A generalization of this dataset approach.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
Examples
--------
>>> from sklearn.datasets import make_hastie_10_2
>>> X, y = make_hastie_10_2(n_samples=24000, random_state=42)
>>> X.shape
(24000, 10)
>>> y.shape
(24000,)
>>> list(y[:5])
[np.float64(-1.0), np.float64(1.0), np.float64(-1.0), np.float64(1.0),
np.float64(-1.0)]
|
make_hastie_10_2
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_samples_generator.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_samples_generator.py
|
BSD-3-Clause
|
def make_regression(
n_samples=100,
n_features=100,
*,
n_informative=10,
n_targets=1,
bias=0.0,
effective_rank=None,
tail_strength=0.5,
noise=0.0,
shuffle=True,
coef=False,
random_state=None,
):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
n_features : int, default=100
The number of features.
n_informative : int, default=10
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, default=1
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, default=0.0
The bias term in the underlying linear model.
effective_rank : int, default=None
If not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
If None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float, default=0.5
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None. When a float, it should be
between 0 and 1.
noise : float, default=0.0
The standard deviation of the gaussian noise applied to the output.
shuffle : bool, default=True
Shuffle the samples and the features.
coef : bool, default=False
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The input samples.
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
The output values.
coef : ndarray of shape (n_features,) or (n_features, n_targets)
The coefficient of the underlying linear model. It is returned only if
coef is True.
Examples
--------
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_samples=5, n_features=2, noise=1, random_state=42)
>>> X
array([[ 0.4967, -0.1382 ],
[ 0.6476, 1.523],
[-0.2341, -0.2341],
[-0.4694, 0.5425],
[ 1.579, 0.7674]])
>>> y
array([ 6.737, 37.79, -10.27, 0.4017, 42.22])
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.standard_normal(size=(n_samples, n_features))
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(
n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator,
)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.uniform(
size=(n_informative, n_targets)
)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
|
Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
n_features : int, default=100
The number of features.
n_informative : int, default=10
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, default=1
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, default=0.0
The bias term in the underlying linear model.
effective_rank : int, default=None
If not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
If None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float, default=0.5
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None. When a float, it should be
between 0 and 1.
noise : float, default=0.0
The standard deviation of the gaussian noise applied to the output.
shuffle : bool, default=True
Shuffle the samples and the features.
coef : bool, default=False
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The input samples.
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
The output values.
coef : ndarray of shape (n_features,) or (n_features, n_targets)
The coefficient of the underlying linear model. It is returned only if
coef is True.
Examples
--------
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_samples=5, n_features=2, noise=1, random_state=42)
>>> X
array([[ 0.4967, -0.1382 ],
[ 0.6476, 1.523],
[-0.2341, -0.2341],
[-0.4694, 0.5425],
[ 1.579, 0.7674]])
>>> y
array([ 6.737, 37.79, -10.27, 0.4017, 42.22])
|
make_regression
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_samples_generator.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_samples_generator.py
|
BSD-3-Clause
|
def make_circles(
n_samples=100, *, shuffle=True, noise=None, random_state=None, factor=0.8
):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int or tuple of shape (2,), dtype=int, default=100
If int, it is the total number of points generated.
For odd numbers, the inner circle will have one point more than the
outer circle.
If two-element tuple, number of points in outer circle and inner
circle.
.. versionchanged:: 0.23
Added two-element tuple.
shuffle : bool, default=True
Whether to shuffle the samples.
noise : float, default=None
Standard deviation of Gaussian noise added to the data.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset shuffling and noise.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
factor : float, default=.8
Scale factor between inner and outer circle in the range `[0, 1)`.
Returns
-------
X : ndarray of shape (n_samples, 2)
The generated samples.
y : ndarray of shape (n_samples,)
The integer labels (0 or 1) for class membership of each sample.
Examples
--------
>>> from sklearn.datasets import make_circles
>>> X, y = make_circles(random_state=42)
>>> X.shape
(100, 2)
>>> y.shape
(100,)
>>> list(y[:5])
[np.int64(1), np.int64(1), np.int64(1), np.int64(0), np.int64(0)]
"""
if isinstance(n_samples, numbers.Integral):
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
else: # n_samples is a tuple
if len(n_samples) != 2:
raise ValueError("When a tuple, n_samples must have exactly two elements.")
n_samples_out, n_samples_in = n_samples
generator = check_random_state(random_state)
# so as not to have the first point = last point, we set endpoint=False
linspace_out = np.linspace(0, 2 * np.pi, n_samples_out, endpoint=False)
linspace_in = np.linspace(0, 2 * np.pi, n_samples_in, endpoint=False)
outer_circ_x = np.cos(linspace_out)
outer_circ_y = np.sin(linspace_out)
inner_circ_x = np.cos(linspace_in) * factor
inner_circ_y = np.sin(linspace_in) * factor
X = np.vstack(
[np.append(outer_circ_x, inner_circ_x), np.append(outer_circ_y, inner_circ_y)]
).T
y = np.hstack(
[np.zeros(n_samples_out, dtype=np.intp), np.ones(n_samples_in, dtype=np.intp)]
)
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
|
Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int or tuple of shape (2,), dtype=int, default=100
If int, it is the total number of points generated.
For odd numbers, the inner circle will have one point more than the
outer circle.
If two-element tuple, number of points in outer circle and inner
circle.
.. versionchanged:: 0.23
Added two-element tuple.
shuffle : bool, default=True
Whether to shuffle the samples.
noise : float, default=None
Standard deviation of Gaussian noise added to the data.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset shuffling and noise.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
factor : float, default=.8
Scale factor between inner and outer circle in the range `[0, 1)`.
Returns
-------
X : ndarray of shape (n_samples, 2)
The generated samples.
y : ndarray of shape (n_samples,)
The integer labels (0 or 1) for class membership of each sample.
Examples
--------
>>> from sklearn.datasets import make_circles
>>> X, y = make_circles(random_state=42)
>>> X.shape
(100, 2)
>>> y.shape
(100,)
>>> list(y[:5])
[np.int64(1), np.int64(1), np.int64(1), np.int64(0), np.int64(0)]
|
make_circles
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_samples_generator.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_samples_generator.py
|
BSD-3-Clause
|
def make_moons(n_samples=100, *, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles.
A simple toy dataset to visualize clustering and classification
algorithms. Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int or tuple of shape (2,), dtype=int, default=100
If int, the total number of points generated.
If two-element tuple, number of points in each of two moons.
.. versionchanged:: 0.23
Added two-element tuple.
shuffle : bool, default=True
Whether to shuffle the samples.
noise : float, default=None
Standard deviation of Gaussian noise added to the data.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset shuffling and noise.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, 2)
The generated samples.
y : ndarray of shape (n_samples,)
The integer labels (0 or 1) for class membership of each sample.
Examples
--------
>>> from sklearn.datasets import make_moons
>>> X, y = make_moons(n_samples=200, noise=0.2, random_state=42)
>>> X.shape
(200, 2)
>>> y.shape
(200,)
"""
if isinstance(n_samples, numbers.Integral):
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
else:
try:
n_samples_out, n_samples_in = n_samples
except ValueError as e:
raise ValueError(
"`n_samples` can be either an int or a two-element tuple."
) from e
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - 0.5
X = np.vstack(
[np.append(outer_circ_x, inner_circ_x), np.append(outer_circ_y, inner_circ_y)]
).T
y = np.hstack(
[np.zeros(n_samples_out, dtype=np.intp), np.ones(n_samples_in, dtype=np.intp)]
)
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
|
Make two interleaving half circles.
A simple toy dataset to visualize clustering and classification
algorithms. Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int or tuple of shape (2,), dtype=int, default=100
If int, the total number of points generated.
If two-element tuple, number of points in each of two moons.
.. versionchanged:: 0.23
Added two-element tuple.
shuffle : bool, default=True
Whether to shuffle the samples.
noise : float, default=None
Standard deviation of Gaussian noise added to the data.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset shuffling and noise.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, 2)
The generated samples.
y : ndarray of shape (n_samples,)
The integer labels (0 or 1) for class membership of each sample.
Examples
--------
>>> from sklearn.datasets import make_moons
>>> X, y = make_moons(n_samples=200, noise=0.2, random_state=42)
>>> X.shape
(200, 2)
>>> y.shape
(200,)
|
make_moons
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_samples_generator.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_samples_generator.py
|
BSD-3-Clause
|
def make_blobs(
n_samples=100,
n_features=2,
*,
centers=None,
cluster_std=1.0,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=None,
return_centers=False,
):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int or array-like, default=100
If int, it is the total number of points equally divided among
clusters.
If array-like, each element of the sequence indicates
the number of samples per cluster.
.. versionchanged:: v0.20
one can now pass an array-like to the ``n_samples`` parameter
n_features : int, default=2
The number of features for each sample.
centers : int or array-like of shape (n_centers, n_features), default=None
The number of centers to generate, or the fixed center locations.
If n_samples is an int and centers is None, 3 centers are generated.
If n_samples is array-like, centers must be
either None or an array of length equal to the length of n_samples.
cluster_std : float or array-like of float, default=1.0
The standard deviation of the clusters.
center_box : tuple of float (min, max), default=(-10.0, 10.0)
The bounding box for each cluster center when centers are
generated at random.
shuffle : bool, default=True
Shuffle the samples.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
return_centers : bool, default=False
If True, then return the centers of each cluster.
.. versionadded:: 0.23
Returns
-------
X : ndarray of shape (n_samples, n_features)
The generated samples.
y : ndarray of shape (n_samples,)
The integer labels for cluster membership of each sample.
centers : ndarray of shape (n_centers, n_features)
The centers of each cluster. Only returned if
``return_centers=True``.
See Also
--------
make_classification : A more intricate variant.
Examples
--------
>>> from sklearn.datasets import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
>>> X, y = make_blobs(n_samples=[3, 3, 4], centers=None, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 1, 2, 0, 2, 2, 2, 1, 1, 0])
"""
generator = check_random_state(random_state)
if isinstance(n_samples, numbers.Integral):
# Set n_centers by looking at centers arg
if centers is None:
centers = 3
if isinstance(centers, numbers.Integral):
n_centers = centers
centers = generator.uniform(
center_box[0], center_box[1], size=(n_centers, n_features)
)
else:
centers = check_array(centers)
n_features = centers.shape[1]
n_centers = centers.shape[0]
else:
# Set n_centers by looking at [n_samples] arg
n_centers = len(n_samples)
if centers is None:
centers = generator.uniform(
center_box[0], center_box[1], size=(n_centers, n_features)
)
if not isinstance(centers, Iterable):
raise ValueError(
"Parameter `centers` must be array-like. Got {!r} instead".format(
centers
)
)
if len(centers) != n_centers:
raise ValueError(
"Length of `n_samples` not consistent with number of "
f"centers. Got n_samples = {n_samples} and centers = {centers}"
)
centers = check_array(centers)
n_features = centers.shape[1]
# stds: if cluster_std is given as list, it must be consistent
# with the n_centers
if hasattr(cluster_std, "__len__") and len(cluster_std) != n_centers:
raise ValueError(
"Length of `clusters_std` not consistent with "
"number of centers. Got centers = {} "
"and cluster_std = {}".format(centers, cluster_std)
)
if isinstance(cluster_std, numbers.Real):
cluster_std = np.full(len(centers), cluster_std)
if isinstance(n_samples, Iterable):
n_samples_per_center = n_samples
else:
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
cum_sum_n_samples = np.cumsum(n_samples_per_center)
X = np.empty(shape=(sum(n_samples_per_center), n_features), dtype=np.float64)
y = np.empty(shape=(sum(n_samples_per_center),), dtype=int)
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
start_idx = cum_sum_n_samples[i - 1] if i > 0 else 0
end_idx = cum_sum_n_samples[i]
X[start_idx:end_idx] = generator.normal(
loc=centers[i], scale=std, size=(n, n_features)
)
y[start_idx:end_idx] = i
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if return_centers:
return X, y, centers
else:
return X, y
|
Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int or array-like, default=100
If int, it is the total number of points equally divided among
clusters.
If array-like, each element of the sequence indicates
the number of samples per cluster.
.. versionchanged:: v0.20
one can now pass an array-like to the ``n_samples`` parameter
n_features : int, default=2
The number of features for each sample.
centers : int or array-like of shape (n_centers, n_features), default=None
The number of centers to generate, or the fixed center locations.
If n_samples is an int and centers is None, 3 centers are generated.
If n_samples is array-like, centers must be
either None or an array of length equal to the length of n_samples.
cluster_std : float or array-like of float, default=1.0
The standard deviation of the clusters.
center_box : tuple of float (min, max), default=(-10.0, 10.0)
The bounding box for each cluster center when centers are
generated at random.
shuffle : bool, default=True
Shuffle the samples.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
return_centers : bool, default=False
If True, then return the centers of each cluster.
.. versionadded:: 0.23
Returns
-------
X : ndarray of shape (n_samples, n_features)
The generated samples.
y : ndarray of shape (n_samples,)
The integer labels for cluster membership of each sample.
centers : ndarray of shape (n_centers, n_features)
The centers of each cluster. Only returned if
``return_centers=True``.
See Also
--------
make_classification : A more intricate variant.
Examples
--------
>>> from sklearn.datasets import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
>>> X, y = make_blobs(n_samples=[3, 3, 4], centers=None, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 1, 2, 0, 2, 2, 2, 1, 1, 0])
|
make_blobs
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_samples_generator.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_samples_generator.py
|
BSD-3-Clause
|
def make_friedman1(n_samples=100, n_features=10, *, noise=0.0, random_state=None):
"""Generate the "Friedman #1" regression problem.
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
n_features : int, default=10
The number of features. Should be at least 5.
noise : float, default=0.0
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset noise. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The input samples.
y : ndarray of shape (n_samples,)
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> X, y = make_friedman1(random_state=42)
>>> X.shape
(100, 10)
>>> y.shape
(100,)
>>> list(y[:3])
[np.float64(16.8), np.float64(5.87), np.float64(9.46)]
"""
generator = check_random_state(random_state)
X = generator.uniform(size=(n_samples, n_features))
y = (
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3]
+ 5 * X[:, 4]
+ noise * generator.standard_normal(size=(n_samples))
)
return X, y
|
Generate the "Friedman #1" regression problem.
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 + 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
n_features : int, default=10
The number of features. Should be at least 5.
noise : float, default=0.0
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset noise. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The input samples.
y : ndarray of shape (n_samples,)
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> X, y = make_friedman1(random_state=42)
>>> X.shape
(100, 10)
>>> y.shape
(100,)
>>> list(y[:3])
[np.float64(16.8), np.float64(5.87), np.float64(9.46)]
|
make_friedman1
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_samples_generator.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_samples_generator.py
|
BSD-3-Clause
|
def make_friedman2(n_samples=100, *, noise=0.0, random_state=None):
"""Generate the "Friedman #2" regression problem.
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
noise : float, default=0.0
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset noise. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, 4)
The input samples.
y : ndarray of shape (n_samples,)
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> X, y = make_friedman2(random_state=42)
>>> X.shape
(100, 4)
>>> y.shape
(100,)
>>> list(y[:3])
[np.float64(1229.4), np.float64(27.0), np.float64(65.6)]
"""
generator = check_random_state(random_state)
X = generator.uniform(size=(n_samples, 4))
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (
X[:, 0] ** 2 + (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2
) ** 0.5 + noise * generator.standard_normal(size=(n_samples))
return X, y
|
Generate the "Friedman #2" regression problem.
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
noise : float, default=0.0
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset noise. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, 4)
The input samples.
y : ndarray of shape (n_samples,)
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> X, y = make_friedman2(random_state=42)
>>> X.shape
(100, 4)
>>> y.shape
(100,)
>>> list(y[:3])
[np.float64(1229.4), np.float64(27.0), np.float64(65.6)]
|
make_friedman2
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_samples_generator.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_samples_generator.py
|
BSD-3-Clause
|
def make_friedman3(n_samples=100, *, noise=0.0, random_state=None):
"""Generate the "Friedman #3" regression problem.
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
noise : float, default=0.0
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset noise. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, 4)
The input samples.
y : ndarray of shape (n_samples,)
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
Examples
--------
>>> from sklearn.datasets import make_friedman3
>>> X, y = make_friedman3(random_state=42)
>>> X.shape
(100, 4)
>>> y.shape
(100,)
>>> list(y[:3])
[np.float64(1.54), np.float64(0.956), np.float64(0.414)]
"""
generator = check_random_state(random_state)
X = generator.uniform(size=(n_samples, 4))
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan(
(X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]
) + noise * generator.standard_normal(size=(n_samples))
return X, y
|
Generate the "Friedman #3" regression problem.
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
noise : float, default=0.0
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset noise. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, 4)
The input samples.
y : ndarray of shape (n_samples,)
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
Examples
--------
>>> from sklearn.datasets import make_friedman3
>>> X, y = make_friedman3(random_state=42)
>>> X.shape
(100, 4)
>>> y.shape
(100,)
>>> list(y[:3])
[np.float64(1.54), np.float64(0.956), np.float64(0.414)]
|
make_friedman3
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_samples_generator.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_samples_generator.py
|
BSD-3-Clause
|
def make_low_rank_matrix(
n_samples=100,
n_features=100,
*,
effective_rank=10,
tail_strength=0.5,
random_state=None,
):
"""Generate a mostly low rank matrix with bell-shaped singular values.
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
n_features : int, default=100
The number of features.
effective_rank : int, default=10
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float, default=0.5
The relative importance of the fat noisy tail of the singular values
profile. The value should be between 0 and 1.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The matrix.
Examples
--------
>>> from numpy.linalg import svd
>>> from sklearn.datasets import make_low_rank_matrix
>>> X = make_low_rank_matrix(
... n_samples=50,
... n_features=25,
... effective_rank=5,
... tail_strength=0.01,
... random_state=0,
... )
>>> X.shape
(50, 25)
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(
generator.standard_normal(size=(n_samples, n)),
mode="economic",
check_finite=False,
)
v, _ = linalg.qr(
generator.standard_normal(size=(n_features, n)),
mode="economic",
check_finite=False,
)
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = (1 - tail_strength) * np.exp(-1.0 * (singular_ind / effective_rank) ** 2)
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
|
Generate a mostly low rank matrix with bell-shaped singular values.
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
n_features : int, default=100
The number of features.
effective_rank : int, default=10
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float, default=0.5
The relative importance of the fat noisy tail of the singular values
profile. The value should be between 0 and 1.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The matrix.
Examples
--------
>>> from numpy.linalg import svd
>>> from sklearn.datasets import make_low_rank_matrix
>>> X = make_low_rank_matrix(
... n_samples=50,
... n_features=25,
... effective_rank=5,
... tail_strength=0.01,
... random_state=0,
... )
>>> X.shape
(50, 25)
|
make_low_rank_matrix
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_samples_generator.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_samples_generator.py
|
BSD-3-Clause
|
def make_sparse_coded_signal(
n_samples,
*,
n_components,
n_features,
n_nonzero_coefs,
random_state=None,
):
"""Generate a signal as a sparse combination of dictionary elements.
Returns matrices `Y`, `D` and `X` such that `Y = XD` where `X` is of shape
`(n_samples, n_components)`, `D` is of shape `(n_components, n_features)`, and
each row of `X` has exactly `n_nonzero_coefs` non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
Number of samples to generate.
n_components : int
Number of components in the dictionary.
n_features : int
Number of features of the dataset to generate.
n_nonzero_coefs : int
Number of active (non-zero) coefficients in each sample.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
data : ndarray of shape (n_samples, n_features)
The encoded signal (Y).
dictionary : ndarray of shape (n_components, n_features)
The dictionary with normalized components (D).
code : ndarray of shape (n_samples, n_components)
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
Examples
--------
>>> from sklearn.datasets import make_sparse_coded_signal
>>> data, dictionary, code = make_sparse_coded_signal(
... n_samples=50,
... n_components=100,
... n_features=10,
... n_nonzero_coefs=4,
... random_state=0
... )
>>> data.shape
(50, 10)
>>> dictionary.shape
(100, 10)
>>> code.shape
(50, 100)
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.standard_normal(size=(n_features, n_components))
D /= np.sqrt(np.sum((D**2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.standard_normal(size=n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
# Transpose to have shapes consistent with the rest of the API
Y, D, X = Y.T, D.T, X.T
return map(np.squeeze, (Y, D, X))
|
Generate a signal as a sparse combination of dictionary elements.
Returns matrices `Y`, `D` and `X` such that `Y = XD` where `X` is of shape
`(n_samples, n_components)`, `D` is of shape `(n_components, n_features)`, and
each row of `X` has exactly `n_nonzero_coefs` non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
Number of samples to generate.
n_components : int
Number of components in the dictionary.
n_features : int
Number of features of the dataset to generate.
n_nonzero_coefs : int
Number of active (non-zero) coefficients in each sample.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
data : ndarray of shape (n_samples, n_features)
The encoded signal (Y).
dictionary : ndarray of shape (n_components, n_features)
The dictionary with normalized components (D).
code : ndarray of shape (n_samples, n_components)
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
Examples
--------
>>> from sklearn.datasets import make_sparse_coded_signal
>>> data, dictionary, code = make_sparse_coded_signal(
... n_samples=50,
... n_components=100,
... n_features=10,
... n_nonzero_coefs=4,
... random_state=0
... )
>>> data.shape
(50, 10)
>>> dictionary.shape
(100, 10)
>>> code.shape
(50, 100)
|
make_sparse_coded_signal
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_samples_generator.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_samples_generator.py
|
BSD-3-Clause
|
def make_sparse_uncorrelated(n_samples=100, n_features=10, *, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design.
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
n_features : int, default=10
The number of features.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The input samples.
y : ndarray of shape (n_samples,)
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
Examples
--------
>>> from sklearn.datasets import make_sparse_uncorrelated
>>> X, y = make_sparse_uncorrelated(random_state=0)
>>> X.shape
(100, 10)
>>> y.shape
(100,)
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(
loc=(X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]),
scale=np.ones(n_samples),
)
return X, y
|
Generate a random regression problem with sparse uncorrelated design.
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
n_features : int, default=10
The number of features.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The input samples.
y : ndarray of shape (n_samples,)
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
Examples
--------
>>> from sklearn.datasets import make_sparse_uncorrelated
>>> X, y = make_sparse_uncorrelated(random_state=0)
>>> X.shape
(100, 10)
>>> y.shape
(100,)
|
make_sparse_uncorrelated
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_samples_generator.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_samples_generator.py
|
BSD-3-Clause
|
def make_spd_matrix(n_dim, *, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_dim, n_dim)
The random symmetric, positive-definite matrix.
See Also
--------
make_sparse_spd_matrix: Generate a sparse symmetric definite positive matrix.
Examples
--------
>>> from sklearn.datasets import make_spd_matrix
>>> make_spd_matrix(n_dim=2, random_state=42)
array([[2.093, 0.346],
[0.346, 0.218]])
"""
generator = check_random_state(random_state)
A = generator.uniform(size=(n_dim, n_dim))
U, _, Vt = linalg.svd(np.dot(A.T, A), check_finite=False)
X = np.dot(np.dot(U, 1.0 + np.diag(generator.uniform(size=n_dim))), Vt)
return X
|
Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_dim, n_dim)
The random symmetric, positive-definite matrix.
See Also
--------
make_sparse_spd_matrix: Generate a sparse symmetric definite positive matrix.
Examples
--------
>>> from sklearn.datasets import make_spd_matrix
>>> make_spd_matrix(n_dim=2, random_state=42)
array([[2.093, 0.346],
[0.346, 0.218]])
|
make_spd_matrix
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_samples_generator.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_samples_generator.py
|
BSD-3-Clause
|
def make_sparse_spd_matrix(
n_dim=1,
*,
alpha=0.95,
norm_diag=False,
smallest_coef=0.1,
largest_coef=0.9,
sparse_format=None,
random_state=None,
):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int, default=1
The size of the random matrix to generate.
.. versionchanged:: 1.4
Renamed from ``dim`` to ``n_dim``.
alpha : float, default=0.95
The probability that a coefficient is zero (see notes). Larger values
enforce more sparsity. The value should be in the range 0 and 1.
norm_diag : bool, default=False
Whether to normalize the output matrix to make the leading diagonal
elements all 1.
smallest_coef : float, default=0.1
The value of the smallest coefficient between 0 and 1.
largest_coef : float, default=0.9
The value of the largest coefficient between 0 and 1.
sparse_format : str, default=None
String representing the output sparse format, such as 'csc', 'csr', etc.
If ``None``, return a dense numpy ndarray.
.. versionadded:: 1.4
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
prec : ndarray or sparse matrix of shape (dim, dim)
The generated matrix. If ``sparse_format=None``, this would be an ndarray.
Otherwise, this will be a sparse matrix of the specified format.
See Also
--------
make_spd_matrix : Generate a random symmetric, positive-definite matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
Examples
--------
>>> from sklearn.datasets import make_sparse_spd_matrix
>>> make_sparse_spd_matrix(n_dim=4, norm_diag=False, random_state=42)
array([[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]])
"""
random_state = check_random_state(random_state)
chol = -sp.eye(n_dim)
aux = sp.random(
m=n_dim,
n=n_dim,
density=1 - alpha,
data_rvs=lambda x: random_state.uniform(
low=smallest_coef, high=largest_coef, size=x
),
random_state=random_state,
)
# We need to avoid "coo" format because it does not support slicing
aux = sp.tril(aux, k=-1, format="csc")
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(n_dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = chol.T @ chol
if norm_diag:
# Form the diagonal vector into a row matrix
d = sp.diags(1.0 / np.sqrt(prec.diagonal()))
prec = d @ prec @ d
if sparse_format is None:
return prec.toarray()
else:
return prec.asformat(sparse_format)
|
Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int, default=1
The size of the random matrix to generate.
.. versionchanged:: 1.4
Renamed from ``dim`` to ``n_dim``.
alpha : float, default=0.95
The probability that a coefficient is zero (see notes). Larger values
enforce more sparsity. The value should be in the range 0 and 1.
norm_diag : bool, default=False
Whether to normalize the output matrix to make the leading diagonal
elements all 1.
smallest_coef : float, default=0.1
The value of the smallest coefficient between 0 and 1.
largest_coef : float, default=0.9
The value of the largest coefficient between 0 and 1.
sparse_format : str, default=None
String representing the output sparse format, such as 'csc', 'csr', etc.
If ``None``, return a dense numpy ndarray.
.. versionadded:: 1.4
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
prec : ndarray or sparse matrix of shape (dim, dim)
The generated matrix. If ``sparse_format=None``, this would be an ndarray.
Otherwise, this will be a sparse matrix of the specified format.
See Also
--------
make_spd_matrix : Generate a random symmetric, positive-definite matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
Examples
--------
>>> from sklearn.datasets import make_sparse_spd_matrix
>>> make_sparse_spd_matrix(n_dim=4, norm_diag=False, random_state=42)
array([[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]])
|
make_sparse_spd_matrix
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_samples_generator.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_samples_generator.py
|
BSD-3-Clause
|
def make_swiss_roll(n_samples=100, *, noise=0.0, random_state=None, hole=False):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of sample points on the Swiss Roll.
noise : float, default=0.0
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
hole : bool, default=False
If True generates the swiss roll with hole dataset.
Returns
-------
X : ndarray of shape (n_samples, 3)
The points.
t : ndarray of shape (n_samples,)
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective", 2nd edition,
Chapter 6, 2014.
https://homepages.ecs.vuw.ac.nz/~marslast/Code/Ch6/lle.py
Examples
--------
>>> from sklearn.datasets import make_swiss_roll
>>> X, t = make_swiss_roll(noise=0.05, random_state=0)
>>> X.shape
(100, 3)
>>> t.shape
(100,)
"""
generator = check_random_state(random_state)
if not hole:
t = 1.5 * np.pi * (1 + 2 * generator.uniform(size=n_samples))
y = 21 * generator.uniform(size=n_samples)
else:
corners = np.array(
[[np.pi * (1.5 + i), j * 7] for i in range(3) for j in range(3)]
)
corners = np.delete(corners, 4, axis=0)
corner_index = generator.choice(8, n_samples)
parameters = generator.uniform(size=(2, n_samples)) * np.array([[np.pi], [7]])
t, y = corners[corner_index].T + parameters
x = t * np.cos(t)
z = t * np.sin(t)
X = np.vstack((x, y, z))
X += noise * generator.standard_normal(size=(3, n_samples))
X = X.T
t = np.squeeze(t)
return X, t
|
Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of sample points on the Swiss Roll.
noise : float, default=0.0
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
hole : bool, default=False
If True generates the swiss roll with hole dataset.
Returns
-------
X : ndarray of shape (n_samples, 3)
The points.
t : ndarray of shape (n_samples,)
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective", 2nd edition,
Chapter 6, 2014.
https://homepages.ecs.vuw.ac.nz/~marslast/Code/Ch6/lle.py
Examples
--------
>>> from sklearn.datasets import make_swiss_roll
>>> X, t = make_swiss_roll(noise=0.05, random_state=0)
>>> X.shape
(100, 3)
>>> t.shape
(100,)
|
make_swiss_roll
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_samples_generator.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_samples_generator.py
|
BSD-3-Clause
|
def make_s_curve(n_samples=100, *, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of sample points on the S curve.
noise : float, default=0.0
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, 3)
The points.
t : ndarray of shape (n_samples,)
The univariate position of the sample according
to the main dimension of the points in the manifold.
Examples
--------
>>> from sklearn.datasets import make_s_curve
>>> X, t = make_s_curve(noise=0.05, random_state=0)
>>> X.shape
(100, 3)
>>> t.shape
(100,)
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.uniform(size=(1, n_samples)) - 0.5)
X = np.empty(shape=(n_samples, 3), dtype=np.float64)
X[:, 0] = np.sin(t)
X[:, 1] = 2.0 * generator.uniform(size=n_samples)
X[:, 2] = np.sign(t) * (np.cos(t) - 1)
X += noise * generator.standard_normal(size=(3, n_samples)).T
t = np.squeeze(t)
return X, t
|
Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of sample points on the S curve.
noise : float, default=0.0
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, 3)
The points.
t : ndarray of shape (n_samples,)
The univariate position of the sample according
to the main dimension of the points in the manifold.
Examples
--------
>>> from sklearn.datasets import make_s_curve
>>> X, t = make_s_curve(noise=0.05, random_state=0)
>>> X.shape
(100, 3)
>>> t.shape
(100,)
|
make_s_curve
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_samples_generator.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_samples_generator.py
|
BSD-3-Clause
|
def make_gaussian_quantiles(
*,
mean=None,
cov=1.0,
n_samples=100,
n_features=2,
n_classes=3,
shuffle=True,
random_state=None,
):
r"""Generate isotropic Gaussian and label samples by quantile.
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array-like of shape (n_features,), default=None
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, default=1.0
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, default=100
The total number of points equally divided among classes.
n_features : int, default=2
The number of features for each sample.
n_classes : int, default=3
The number of classes.
shuffle : bool, default=True
Shuffle the samples.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The generated samples.
y : ndarray of shape (n_samples,)
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
Examples
--------
>>> from sklearn.datasets import make_gaussian_quantiles
>>> X, y = make_gaussian_quantiles(random_state=42)
>>> X.shape
(100, 2)
>>> y.shape
(100,)
>>> list(y[:5])
[np.int64(2), np.int64(0), np.int64(1), np.int64(0), np.int64(2)]
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features), (n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack(
[
np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes),
]
)
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
|
Generate isotropic Gaussian and label samples by quantile.
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array-like of shape (n_features,), default=None
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, default=1.0
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, default=100
The total number of points equally divided among classes.
n_features : int, default=2
The number of features for each sample.
n_classes : int, default=3
The number of classes.
shuffle : bool, default=True
Shuffle the samples.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The generated samples.
y : ndarray of shape (n_samples,)
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
Examples
--------
>>> from sklearn.datasets import make_gaussian_quantiles
>>> X, y = make_gaussian_quantiles(random_state=42)
>>> X.shape
(100, 2)
>>> y.shape
(100,)
>>> list(y[:5])
[np.int64(2), np.int64(0), np.int64(1), np.int64(0), np.int64(2)]
|
make_gaussian_quantiles
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_samples_generator.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_samples_generator.py
|
BSD-3-Clause
|
def make_biclusters(
shape,
n_clusters,
*,
noise=0.0,
minval=10,
maxval=100,
shuffle=True,
random_state=None,
):
"""Generate a constant block diagonal structure array for biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : tuple of shape (n_rows, n_cols)
The shape of the result.
n_clusters : int
The number of biclusters.
noise : float, default=0.0
The standard deviation of the gaussian noise.
minval : float, default=10
Minimum value of a bicluster.
maxval : float, default=100
Maximum value of a bicluster.
shuffle : bool, default=True
Shuffle the samples.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape `shape`
The generated array.
rows : ndarray of shape (n_clusters, X.shape[0])
The indicators for cluster membership of each row.
cols : ndarray of shape (n_clusters, X.shape[1])
The indicators for cluster membership of each column.
See Also
--------
make_checkerboard: Generate an array with block checkerboard structure for
biclustering.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
Examples
--------
>>> from sklearn.datasets import make_biclusters
>>> data, rows, cols = make_biclusters(
... shape=(10, 20), n_clusters=2, random_state=42
... )
>>> data.shape
(10, 20)
>>> rows.shape
(2, 10)
>>> cols.shape
(2, 20)
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows, np.repeat(1.0 / n_clusters, n_clusters))
col_sizes = generator.multinomial(n_cols, np.repeat(1.0 / n_clusters, n_clusters))
row_labels = np.hstack(
[np.repeat(val, rep) for val, rep in zip(range(n_clusters), row_sizes)]
)
col_labels = np.hstack(
[np.repeat(val, rep) for val, rep in zip(range(n_clusters), col_sizes)]
)
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack([row_labels == c for c in range(n_clusters)])
cols = np.vstack([col_labels == c for c in range(n_clusters)])
return result, rows, cols
|
Generate a constant block diagonal structure array for biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : tuple of shape (n_rows, n_cols)
The shape of the result.
n_clusters : int
The number of biclusters.
noise : float, default=0.0
The standard deviation of the gaussian noise.
minval : float, default=10
Minimum value of a bicluster.
maxval : float, default=100
Maximum value of a bicluster.
shuffle : bool, default=True
Shuffle the samples.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape `shape`
The generated array.
rows : ndarray of shape (n_clusters, X.shape[0])
The indicators for cluster membership of each row.
cols : ndarray of shape (n_clusters, X.shape[1])
The indicators for cluster membership of each column.
See Also
--------
make_checkerboard: Generate an array with block checkerboard structure for
biclustering.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
Examples
--------
>>> from sklearn.datasets import make_biclusters
>>> data, rows, cols = make_biclusters(
... shape=(10, 20), n_clusters=2, random_state=42
... )
>>> data.shape
(10, 20)
>>> rows.shape
(2, 10)
>>> cols.shape
(2, 20)
|
make_biclusters
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_samples_generator.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_samples_generator.py
|
BSD-3-Clause
|
def make_checkerboard(
shape,
n_clusters,
*,
noise=0.0,
minval=10,
maxval=100,
shuffle=True,
random_state=None,
):
"""Generate an array with block checkerboard structure for biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : tuple of shape (n_rows, n_cols)
The shape of the result.
n_clusters : int or array-like or shape (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, default=0.0
The standard deviation of the gaussian noise.
minval : float, default=10
Minimum value of a bicluster.
maxval : float, default=100
Maximum value of a bicluster.
shuffle : bool, default=True
Shuffle the samples.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape `shape`
The generated array.
rows : ndarray of shape (n_clusters, X.shape[0])
The indicators for cluster membership of each row.
cols : ndarray of shape (n_clusters, X.shape[1])
The indicators for cluster membership of each column.
See Also
--------
make_biclusters : Generate an array with constant block diagonal structure
for biclustering.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
Examples
--------
>>> from sklearn.datasets import make_checkerboard
>>> data, rows, columns = make_checkerboard(shape=(300, 300), n_clusters=10,
... random_state=42)
>>> data.shape
(300, 300)
>>> rows.shape
(100, 300)
>>> columns.shape
(100, 300)
>>> print(rows[0][:5], columns[0][:5])
[False False False True False] [False False False False False]
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(
n_rows, np.repeat(1.0 / n_row_clusters, n_row_clusters)
)
col_sizes = generator.multinomial(
n_cols, np.repeat(1.0 / n_col_clusters, n_col_clusters)
)
row_labels = np.hstack(
[np.repeat(val, rep) for val, rep in zip(range(n_row_clusters), row_sizes)]
)
col_labels = np.hstack(
[np.repeat(val, rep) for val, rep in zip(range(n_col_clusters), col_sizes)]
)
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(
[
row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters)
]
)
cols = np.vstack(
[
col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters)
]
)
return result, rows, cols
|
Generate an array with block checkerboard structure for biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : tuple of shape (n_rows, n_cols)
The shape of the result.
n_clusters : int or array-like or shape (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, default=0.0
The standard deviation of the gaussian noise.
minval : float, default=10
Minimum value of a bicluster.
maxval : float, default=100
Maximum value of a bicluster.
shuffle : bool, default=True
Shuffle the samples.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape `shape`
The generated array.
rows : ndarray of shape (n_clusters, X.shape[0])
The indicators for cluster membership of each row.
cols : ndarray of shape (n_clusters, X.shape[1])
The indicators for cluster membership of each column.
See Also
--------
make_biclusters : Generate an array with constant block diagonal structure
for biclustering.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
Examples
--------
>>> from sklearn.datasets import make_checkerboard
>>> data, rows, columns = make_checkerboard(shape=(300, 300), n_clusters=10,
... random_state=42)
>>> data.shape
(300, 300)
>>> rows.shape
(100, 300)
>>> columns.shape
(100, 300)
>>> print(rows[0][:5], columns[0][:5])
[False False False True False] [False False False False False]
|
make_checkerboard
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_samples_generator.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_samples_generator.py
|
BSD-3-Clause
|
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for _ in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = int(header[b"NODATA_value"])
if nodata != -9999:
M[nodata] = -9999
return M
|
Load a coverage file from an open file object.
This will return a numpy array of the given dtype
|
_load_coverage
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_species_distributions.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_species_distributions.py
|
BSD-3-Clause
|
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
names = F.readline().decode("ascii").strip().split(",")
rec = np.loadtxt(F, skiprows=0, delimiter=",", dtype="S22,f4,f4")
rec.dtype.names = names
return rec
|
Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
|
_load_csv
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_species_distributions.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_species_distributions.py
|
BSD-3-Clause
|
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
|
Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
|
construct_grids
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_species_distributions.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_species_distributions.py
|
BSD-3-Clause
|
def fetch_species_distributions(
*,
data_home=None,
download_if_missing=True,
n_retries=3,
delay=1.0,
):
"""Loader for species distribution dataset from Phillips et. al. (2006).
Read more in the :ref:`User Guide <species_distribution_dataset>`.
Parameters
----------
data_home : str or path-like, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : bool, default=True
If False, raise an OSError if the data is not locally available
instead of trying to download the data from the source site.
n_retries : int, default=3
Number of retries when HTTP errors are encountered.
.. versionadded:: 1.5
delay : float, default=1.0
Number of seconds between retries.
.. versionadded:: 1.5
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured
at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1624,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (620,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
-----
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://rob.schapire.net/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Examples
--------
>>> from sklearn.datasets import fetch_species_distributions
>>> species = fetch_species_distributions()
>>> species.train[:5]
array([(b'microryzomys_minutus', -64.7 , -17.85 ),
(b'microryzomys_minutus', -67.8333, -16.3333),
(b'microryzomys_minutus', -67.8833, -16.3 ),
(b'microryzomys_minutus', -67.8 , -16.2667),
(b'microryzomys_minutus', -67.9833, -15.9 )],
dtype=[('species', 'S22'), ('dd long', '<f4'), ('dd lat', '<f4')])
For a more extended example,
see :ref:`sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py`
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(
x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05,
)
dtype = np.int16
archive_path = _pkl_filepath(data_home, DATA_ARCHIVE_NAME)
if not exists(archive_path):
if not download_if_missing:
raise OSError("Data not found and `download_if_missing` is False")
logger.info("Downloading species data from %s to %s" % (SAMPLES.url, data_home))
samples_path = _fetch_remote(
SAMPLES, dirname=data_home, n_retries=n_retries, delay=delay
)
with np.load(samples_path) as X: # samples.zip is a valid npz
for f in X.files:
fhandle = BytesIO(X[f])
if "train" in f:
train = _load_csv(fhandle)
if "test" in f:
test = _load_csv(fhandle)
remove(samples_path)
logger.info(
"Downloading coverage data from %s to %s" % (COVERAGES.url, data_home)
)
coverages_path = _fetch_remote(
COVERAGES, dirname=data_home, n_retries=n_retries, delay=delay
)
with np.load(coverages_path) as X: # coverages.zip is a valid npz
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
logger.debug(" - converting {}".format(f))
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
remove(coverages_path)
bunch = Bunch(coverages=coverages, test=test, train=train, **extra_params)
joblib.dump(bunch, archive_path, compress=9)
else:
bunch = joblib.load(archive_path)
return bunch
|
Loader for species distribution dataset from Phillips et. al. (2006).
Read more in the :ref:`User Guide <species_distribution_dataset>`.
Parameters
----------
data_home : str or path-like, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : bool, default=True
If False, raise an OSError if the data is not locally available
instead of trying to download the data from the source site.
n_retries : int, default=3
Number of retries when HTTP errors are encountered.
.. versionadded:: 1.5
delay : float, default=1.0
Number of seconds between retries.
.. versionadded:: 1.5
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured
at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1624,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (620,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
-----
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://rob.schapire.net/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Examples
--------
>>> from sklearn.datasets import fetch_species_distributions
>>> species = fetch_species_distributions()
>>> species.train[:5]
array([(b'microryzomys_minutus', -64.7 , -17.85 ),
(b'microryzomys_minutus', -67.8333, -16.3333),
(b'microryzomys_minutus', -67.8833, -16.3 ),
(b'microryzomys_minutus', -67.8 , -16.2667),
(b'microryzomys_minutus', -67.9833, -15.9 )],
dtype=[('species', 'S22'), ('dd long', '<f4'), ('dd lat', '<f4')])
For a more extended example,
see :ref:`sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py`
|
fetch_species_distributions
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_species_distributions.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_species_distributions.py
|
BSD-3-Clause
|
def load_svmlight_file(
f,
*,
n_features=None,
dtype=np.float64,
multilabel=False,
zero_based="auto",
query_id=False,
offset=0,
length=-1,
):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When repeatedly
working on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : str, path-like, file-like or int
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
.. versionchanged:: 1.2
Path-like objects are now accepted.
n_features : int, default=None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
n_features is only required if ``offset`` or ``length`` are passed a
non-default value.
dtype : numpy data type, default=np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
multilabel : bool, default=False
Samples may have several labels each (see
https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html).
zero_based : bool or "auto", default="auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe when no ``offset`` or ``length`` is passed.
If ``offset`` or ``length`` are passed, the "auto" mode falls back
to ``zero_based=True`` to avoid having the heuristic check yield
inconsistent results on different segments of the file.
query_id : bool, default=False
If True, will return the query_id array for each file.
offset : int, default=0
Ignore the offset first bytes by seeking forward, then
discarding the following bytes up until the next new line
character.
length : int, default=-1
If strictly positive, stop reading any new line of data once the
position in the file has reached the (offset + length) bytes threshold.
Returns
-------
X : scipy.sparse matrix of shape (n_samples, n_features)
The data matrix.
y : ndarray of shape (n_samples,), or a list of tuples of length n_samples
The target. It is a list of tuples when ``multilabel=True``, else a
ndarray.
query_id : array of shape (n_samples,)
The query_id for each sample. Only returned when query_id is set to
True.
See Also
--------
load_svmlight_files : Similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(
load_svmlight_files(
[f],
n_features=n_features,
dtype=dtype,
multilabel=multilabel,
zero_based=zero_based,
query_id=query_id,
offset=offset,
length=length,
)
)
|
Load datasets in the svmlight / libsvm format into sparse CSR matrix.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When repeatedly
working on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : str, path-like, file-like or int
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
.. versionchanged:: 1.2
Path-like objects are now accepted.
n_features : int, default=None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
n_features is only required if ``offset`` or ``length`` are passed a
non-default value.
dtype : numpy data type, default=np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
multilabel : bool, default=False
Samples may have several labels each (see
https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html).
zero_based : bool or "auto", default="auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe when no ``offset`` or ``length`` is passed.
If ``offset`` or ``length`` are passed, the "auto" mode falls back
to ``zero_based=True`` to avoid having the heuristic check yield
inconsistent results on different segments of the file.
query_id : bool, default=False
If True, will return the query_id array for each file.
offset : int, default=0
Ignore the offset first bytes by seeking forward, then
discarding the following bytes up until the next new line
character.
length : int, default=-1
If strictly positive, stop reading any new line of data once the
position in the file has reached the (offset + length) bytes threshold.
Returns
-------
X : scipy.sparse matrix of shape (n_samples, n_features)
The data matrix.
y : ndarray of shape (n_samples,), or a list of tuples of length n_samples
The target. It is a list of tuples when ``multilabel=True``, else a
ndarray.
query_id : array of shape (n_samples,)
The query_id for each sample. Only returned when query_id is set to
True.
See Also
--------
load_svmlight_files : Similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
|
load_svmlight_file
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_svmlight_format_io.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_svmlight_format_io.py
|
BSD-3-Clause
|
def load_svmlight_files(
files,
*,
n_features=None,
dtype=np.float64,
multilabel=False,
zero_based="auto",
query_id=False,
offset=0,
length=-1,
):
"""Load dataset from multiple files in SVMlight format.
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : array-like, dtype=str, path-like, file-like or int
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
.. versionchanged:: 1.2
Path-like objects are now accepted.
n_features : int, default=None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
dtype : numpy data type, default=np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
multilabel : bool, default=False
Samples may have several labels each (see
https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html).
zero_based : bool or "auto", default="auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe when no offset or length is passed.
If offset or length are passed, the "auto" mode falls back
to zero_based=True to avoid having the heuristic check yield
inconsistent results on different segments of the file.
query_id : bool, default=False
If True, will return the query_id array for each file.
offset : int, default=0
Ignore the offset first bytes by seeking forward, then
discarding the following bytes up until the next new line
character.
length : int, default=-1
If strictly positive, stop reading any new line of data once the
position in the file has reached the (offset + length) bytes threshold.
Returns
-------
[X1, y1, ..., Xn, yn] or [X1, y1, q1, ..., Xn, yn, qn]: list of arrays
Each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead (Xi, yi, qi)
triplets.
See Also
--------
load_svmlight_file: Similar function for loading a single file in this
format.
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data_train, target_train, data_test, target_test = load_svmlight_files(
["svmlight_file_train", "svmlight_file_test"]
)
return data_train, target_train, data_test, target_test
X_train, y_train, X_test, y_test = get_data()
"""
if (offset != 0 or length > 0) and zero_based == "auto":
# disable heuristic search to avoid getting inconsistent results on
# different segments of the file
zero_based = True
if (offset != 0 or length > 0) and n_features is None:
raise ValueError("n_features is required when offset or length is specified.")
r = [
_open_and_load(
f,
dtype,
multilabel,
bool(zero_based),
bool(query_id),
offset=offset,
length=length,
)
for f in files
]
if zero_based is False or (
zero_based == "auto" and all(len(tmp[1]) and np.min(tmp[1]) > 0 for tmp in r)
):
for _, indices, _, _, _ in r:
indices -= 1
n_f = max(ind[1].max() if len(ind[1]) else 0 for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError(
"n_features was set to {}, but input file contains {} features".format(
n_features, n_f
)
)
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
|
Load dataset from multiple files in SVMlight format.
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : array-like, dtype=str, path-like, file-like or int
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
.. versionchanged:: 1.2
Path-like objects are now accepted.
n_features : int, default=None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
dtype : numpy data type, default=np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
multilabel : bool, default=False
Samples may have several labels each (see
https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html).
zero_based : bool or "auto", default="auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe when no offset or length is passed.
If offset or length are passed, the "auto" mode falls back
to zero_based=True to avoid having the heuristic check yield
inconsistent results on different segments of the file.
query_id : bool, default=False
If True, will return the query_id array for each file.
offset : int, default=0
Ignore the offset first bytes by seeking forward, then
discarding the following bytes up until the next new line
character.
length : int, default=-1
If strictly positive, stop reading any new line of data once the
position in the file has reached the (offset + length) bytes threshold.
Returns
-------
[X1, y1, ..., Xn, yn] or [X1, y1, q1, ..., Xn, yn, qn]: list of arrays
Each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead (Xi, yi, qi)
triplets.
See Also
--------
load_svmlight_file: Similar function for loading a single file in this
format.
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data_train, target_train, data_test, target_test = load_svmlight_files(
["svmlight_file_train", "svmlight_file_test"]
)
return data_train, target_train, data_test, target_test
X_train, y_train, X_test, y_test = get_data()
|
load_svmlight_files
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_svmlight_format_io.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_svmlight_format_io.py
|
BSD-3-Clause
|
def dump_svmlight_file(
X,
y,
f,
*,
zero_based=True,
comment=None,
query_id=None,
multilabel=False,
):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : {array-like, sparse matrix}, shape = (n_samples,) or (n_samples, n_labels)
Target values. Class labels must be an
integer or float, or array-like objects of integer or float for
multilabel classifications.
f : str or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : bool, default=True
Whether column indices should be written zero-based (True) or one-based
(False).
comment : str or bytes, default=None
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like of shape (n_samples,), default=None
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel : bool, default=False
Samples may have several labels each (see
https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html).
.. versionadded:: 0.17
parameter `multilabel` to support multilabel datasets.
Examples
--------
>>> from sklearn.datasets import dump_svmlight_file, make_classification
>>> X, y = make_classification(random_state=0)
>>> output_file = "my_dataset.svmlight"
>>> dump_svmlight_file(X, y, output_file) # doctest: +SKIP
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if b"\0" in comment:
raise ValueError("comment string contains NUL byte")
yval = check_array(y, accept_sparse="csr", ensure_2d=False)
if sp.issparse(yval):
if yval.shape[1] != 1 and not multilabel:
raise ValueError(
"expected y of shape (n_samples, 1), got %r" % (yval.shape,)
)
else:
if yval.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r" % (yval.shape,))
Xval = check_array(X, accept_sparse="csr")
if Xval.shape[0] != yval.shape[0]:
raise ValueError(
"X.shape[0] and y.shape[0] should be the same, got %r and %r instead."
% (Xval.shape[0], yval.shape[0])
)
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if yval is y and hasattr(yval, "sorted_indices"):
y = yval.sorted_indices()
else:
y = yval
if hasattr(y, "sort_indices"):
y.sort_indices()
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is None:
# NOTE: query_id is passed to Cython functions using a fused type on query_id.
# Yet as of Cython>=3.0, memory views can't be None otherwise the runtime
# would not known which concrete implementation to dispatch the Python call to.
# TODO: simplify interfaces and implementations in _svmlight_format_fast.pyx.
query_id = np.array([], dtype=np.int32)
else:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError(
"expected query_id of shape (n_samples,), got %r" % (query_id.shape,)
)
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
|
Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : {array-like, sparse matrix}, shape = (n_samples,) or (n_samples, n_labels)
Target values. Class labels must be an
integer or float, or array-like objects of integer or float for
multilabel classifications.
f : str or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : bool, default=True
Whether column indices should be written zero-based (True) or one-based
(False).
comment : str or bytes, default=None
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like of shape (n_samples,), default=None
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel : bool, default=False
Samples may have several labels each (see
https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html).
.. versionadded:: 0.17
parameter `multilabel` to support multilabel datasets.
Examples
--------
>>> from sklearn.datasets import dump_svmlight_file, make_classification
>>> X, y = make_classification(random_state=0)
>>> output_file = "my_dataset.svmlight"
>>> dump_svmlight_file(X, y, output_file) # doctest: +SKIP
|
dump_svmlight_file
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_svmlight_format_io.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_svmlight_format_io.py
|
BSD-3-Clause
|
def _download_20newsgroups(target_dir, cache_path, n_retries, delay):
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
train_path = os.path.join(target_dir, TRAIN_FOLDER)
test_path = os.path.join(target_dir, TEST_FOLDER)
os.makedirs(target_dir, exist_ok=True)
logger.info("Downloading dataset from %s (14 MB)", ARCHIVE.url)
archive_path = _fetch_remote(
ARCHIVE, dirname=target_dir, n_retries=n_retries, delay=delay
)
logger.debug("Decompressing %s", archive_path)
with tarfile.open(archive_path, "r:gz") as fp:
# Use filter="data" to prevent the most dangerous security issues.
# For more details, see
# https://docs.python.org/3.9/library/tarfile.html#tarfile.TarFile.extractall
fp.extractall(path=target_dir, filter="data")
with suppress(FileNotFoundError):
os.remove(archive_path)
# Store a zipped pickle
cache = dict(
train=load_files(train_path, encoding="latin1"),
test=load_files(test_path, encoding="latin1"),
)
compressed_content = codecs.encode(pickle.dumps(cache), "zlib_codec")
with open(cache_path, "wb") as f:
f.write(compressed_content)
shutil.rmtree(target_dir)
return cache
|
Download the 20 newsgroups data and stored it as a zipped pickle.
|
_download_20newsgroups
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_twenty_newsgroups.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_twenty_newsgroups.py
|
BSD-3-Clause
|
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
Parameters
----------
text : str
The text from which to remove the signature block.
"""
lines = text.strip().split("\n")
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip("-") == "":
break
if line_num > 0:
return "\n".join(lines[:line_num])
else:
return text
|
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
Parameters
----------
text : str
The text from which to remove the signature block.
|
strip_newsgroup_footer
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_twenty_newsgroups.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_twenty_newsgroups.py
|
BSD-3-Clause
|
def fetch_20newsgroups(
*,
data_home=None,
subset="train",
categories=None,
shuffle=True,
random_state=42,
remove=(),
download_if_missing=True,
return_X_y=False,
n_retries=3,
delay=1.0,
):
"""Load the filenames and data from the 20 newsgroups dataset \
(classification).
Download it if necessary.
================= ==========
Classes 20
Samples total 18846
Dimensionality 1
Features text
================= ==========
Read more in the :ref:`User Guide <20newsgroups_dataset>`.
Parameters
----------
data_home : str or path-like, default=None
Specify a download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
subset : {'train', 'test', 'all'}, default='train'
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
categories : array-like, dtype=str, default=None
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle : bool, default=True
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, default=42
Determines random number generation for dataset shuffling. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
remove : tuple, default=()
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
download_if_missing : bool, default=True
If False, raise an OSError if the data is not locally available
instead of trying to download the data from the source site.
return_X_y : bool, default=False
If True, returns `(data.data, data.target)` instead of a Bunch
object.
.. versionadded:: 0.22
n_retries : int, default=3
Number of retries when HTTP errors are encountered.
.. versionadded:: 1.5
delay : float, default=1.0
Number of seconds between retries.
.. versionadded:: 1.5
Returns
-------
bunch : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : list of shape (n_samples,)
The data list to learn.
target: ndarray of shape (n_samples,)
The target labels.
filenames: list of shape (n_samples,)
The path to the location of the data.
DESCR: str
The full description of the dataset.
target_names: list of shape (n_classes,)
The names of target classes.
(data, target) : tuple if `return_X_y=True`
A tuple of two ndarrays. The first contains a 2D array of shape
(n_samples, n_classes) with each row representing one sample and each
column representing the features. The second array of shape
(n_samples,) contains the target samples.
.. versionadded:: 0.22
Examples
--------
>>> from sklearn.datasets import fetch_20newsgroups
>>> cats = ['alt.atheism', 'sci.space']
>>> newsgroups_train = fetch_20newsgroups(subset='train', categories=cats)
>>> list(newsgroups_train.target_names)
['alt.atheism', 'sci.space']
>>> newsgroups_train.filenames.shape
(1073,)
>>> newsgroups_train.target.shape
(1073,)
>>> newsgroups_train.target[:10]
array([0, 1, 1, 1, 0, 1, 1, 0, 0, 0])
"""
data_home = get_data_home(data_home=data_home)
cache_path = _pkl_filepath(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, "20news_home")
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, "rb") as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(compressed_content, "zlib_codec")
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * "_")
print("Cache loading failed")
print(80 * "_")
print(e)
if cache is None:
if download_if_missing:
logger.info("Downloading 20news dataset. This may take a few minutes.")
cache = _download_20newsgroups(
target_dir=twenty_home,
cache_path=cache_path,
n_retries=n_retries,
delay=delay,
)
else:
raise OSError("20Newsgroups dataset not found")
if subset in ("train", "test"):
data = cache[subset]
elif subset == "all":
data_lst = list()
target = list()
filenames = list()
for subset in ("train", "test"):
data = cache[subset]
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
fdescr = load_descr("twenty_newsgroups.rst")
data.DESCR = fdescr
if "headers" in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if "footers" in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if "quotes" in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.isin(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
if return_X_y:
return data.data, data.target
return data
|
Load the filenames and data from the 20 newsgroups dataset (classification).
Download it if necessary.
================= ==========
Classes 20
Samples total 18846
Dimensionality 1
Features text
================= ==========
Read more in the :ref:`User Guide <20newsgroups_dataset>`.
Parameters
----------
data_home : str or path-like, default=None
Specify a download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
subset : {'train', 'test', 'all'}, default='train'
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
categories : array-like, dtype=str, default=None
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle : bool, default=True
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, default=42
Determines random number generation for dataset shuffling. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
remove : tuple, default=()
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
download_if_missing : bool, default=True
If False, raise an OSError if the data is not locally available
instead of trying to download the data from the source site.
return_X_y : bool, default=False
If True, returns `(data.data, data.target)` instead of a Bunch
object.
.. versionadded:: 0.22
n_retries : int, default=3
Number of retries when HTTP errors are encountered.
.. versionadded:: 1.5
delay : float, default=1.0
Number of seconds between retries.
.. versionadded:: 1.5
Returns
-------
bunch : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : list of shape (n_samples,)
The data list to learn.
target: ndarray of shape (n_samples,)
The target labels.
filenames: list of shape (n_samples,)
The path to the location of the data.
DESCR: str
The full description of the dataset.
target_names: list of shape (n_classes,)
The names of target classes.
(data, target) : tuple if `return_X_y=True`
A tuple of two ndarrays. The first contains a 2D array of shape
(n_samples, n_classes) with each row representing one sample and each
column representing the features. The second array of shape
(n_samples,) contains the target samples.
.. versionadded:: 0.22
Examples
--------
>>> from sklearn.datasets import fetch_20newsgroups
>>> cats = ['alt.atheism', 'sci.space']
>>> newsgroups_train = fetch_20newsgroups(subset='train', categories=cats)
>>> list(newsgroups_train.target_names)
['alt.atheism', 'sci.space']
>>> newsgroups_train.filenames.shape
(1073,)
>>> newsgroups_train.target.shape
(1073,)
>>> newsgroups_train.target[:10]
array([0, 1, 1, 1, 0, 1, 1, 0, 0, 0])
|
fetch_20newsgroups
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_twenty_newsgroups.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_twenty_newsgroups.py
|
BSD-3-Clause
|
def fetch_20newsgroups_vectorized(
*,
subset="train",
remove=(),
data_home=None,
download_if_missing=True,
return_X_y=False,
normalize=True,
as_frame=False,
n_retries=3,
delay=1.0,
):
"""Load and vectorize the 20 newsgroups dataset (classification).
Download it if necessary.
This is a convenience function; the transformation is done using the
default settings for
:class:`~sklearn.feature_extraction.text.CountVectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom
:class:`~sklearn.feature_extraction.text.CountVectorizer`,
:class:`~sklearn.feature_extraction.text.HashingVectorizer`,
:class:`~sklearn.feature_extraction.text.TfidfTransformer` or
:class:`~sklearn.feature_extraction.text.TfidfVectorizer`.
The resulting counts are normalized using
:func:`sklearn.preprocessing.normalize` unless normalize is set to False.
================= ==========
Classes 20
Samples total 18846
Dimensionality 130107
Features real
================= ==========
Read more in the :ref:`User Guide <20newsgroups_dataset>`.
Parameters
----------
subset : {'train', 'test', 'all'}, default='train'
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
remove : tuple, default=()
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
data_home : str or path-like, default=None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : bool, default=True
If False, raise an OSError if the data is not locally available
instead of trying to download the data from the source site.
return_X_y : bool, default=False
If True, returns ``(data.data, data.target)`` instead of a Bunch
object.
.. versionadded:: 0.20
normalize : bool, default=True
If True, normalizes each document's feature vector to unit norm using
:func:`sklearn.preprocessing.normalize`.
.. versionadded:: 0.22
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric, string, or categorical). The target is
a pandas DataFrame or Series depending on the number of
`target_columns`.
.. versionadded:: 0.24
n_retries : int, default=3
Number of retries when HTTP errors are encountered.
.. versionadded:: 1.5
delay : float, default=1.0
Number of seconds between retries.
.. versionadded:: 1.5
Returns
-------
bunch : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data: {sparse matrix, dataframe} of shape (n_samples, n_features)
The input data matrix. If ``as_frame`` is `True`, ``data`` is
a pandas DataFrame with sparse columns.
target: {ndarray, series} of shape (n_samples,)
The target labels. If ``as_frame`` is `True`, ``target`` is a
pandas Series.
target_names: list of shape (n_classes,)
The names of target classes.
DESCR: str
The full description of the dataset.
frame: dataframe of shape (n_samples, n_features + 1)
Only present when `as_frame=True`. Pandas DataFrame with ``data``
and ``target``.
.. versionadded:: 0.24
(data, target) : tuple if ``return_X_y`` is True
`data` and `target` would be of the format defined in the `Bunch`
description above.
.. versionadded:: 0.20
Examples
--------
>>> from sklearn.datasets import fetch_20newsgroups_vectorized
>>> newsgroups_vectorized = fetch_20newsgroups_vectorized(subset='test')
>>> newsgroups_vectorized.data.shape
(7532, 130107)
>>> newsgroups_vectorized.target.shape
(7532,)
"""
data_home = get_data_home(data_home=data_home)
filebase = "20newsgroup_vectorized"
if remove:
filebase += "remove-" + "-".join(remove)
target_file = _pkl_filepath(data_home, filebase + ".pkl")
# we shuffle but use a fixed seed for the memoization
data_train = fetch_20newsgroups(
data_home=data_home,
subset="train",
categories=None,
shuffle=True,
random_state=12,
remove=remove,
download_if_missing=download_if_missing,
n_retries=n_retries,
delay=delay,
)
data_test = fetch_20newsgroups(
data_home=data_home,
subset="test",
categories=None,
shuffle=True,
random_state=12,
remove=remove,
download_if_missing=download_if_missing,
n_retries=n_retries,
delay=delay,
)
if os.path.exists(target_file):
try:
X_train, X_test, feature_names = joblib.load(target_file)
except ValueError as e:
raise ValueError(
f"The cached dataset located in {target_file} was fetched "
"with an older scikit-learn version and it is not compatible "
"with the scikit-learn version imported. You need to "
f"manually delete the file: {target_file}."
) from e
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
feature_names = vectorizer.get_feature_names_out()
joblib.dump((X_train, X_test, feature_names), target_file, compress=9)
# the data is stored as int16 for compactness
# but normalize needs floats
if normalize:
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
preprocessing.normalize(X_train, copy=False)
preprocessing.normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == "train":
data = X_train
target = data_train.target
elif subset == "test":
data = X_test
target = data_test.target
elif subset == "all":
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
fdescr = load_descr("twenty_newsgroups.rst")
frame = None
target_name = ["category_class"]
if as_frame:
frame, data, target = _convert_data_dataframe(
"fetch_20newsgroups_vectorized",
data,
target,
feature_names,
target_names=target_name,
sparse_data=True,
)
if return_X_y:
return data, target
return Bunch(
data=data,
target=target,
frame=frame,
target_names=target_names,
feature_names=feature_names,
DESCR=fdescr,
)
|
Load and vectorize the 20 newsgroups dataset (classification).
Download it if necessary.
This is a convenience function; the transformation is done using the
default settings for
:class:`~sklearn.feature_extraction.text.CountVectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom
:class:`~sklearn.feature_extraction.text.CountVectorizer`,
:class:`~sklearn.feature_extraction.text.HashingVectorizer`,
:class:`~sklearn.feature_extraction.text.TfidfTransformer` or
:class:`~sklearn.feature_extraction.text.TfidfVectorizer`.
The resulting counts are normalized using
:func:`sklearn.preprocessing.normalize` unless normalize is set to False.
================= ==========
Classes 20
Samples total 18846
Dimensionality 130107
Features real
================= ==========
Read more in the :ref:`User Guide <20newsgroups_dataset>`.
Parameters
----------
subset : {'train', 'test', 'all'}, default='train'
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
remove : tuple, default=()
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
data_home : str or path-like, default=None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : bool, default=True
If False, raise an OSError if the data is not locally available
instead of trying to download the data from the source site.
return_X_y : bool, default=False
If True, returns ``(data.data, data.target)`` instead of a Bunch
object.
.. versionadded:: 0.20
normalize : bool, default=True
If True, normalizes each document's feature vector to unit norm using
:func:`sklearn.preprocessing.normalize`.
.. versionadded:: 0.22
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric, string, or categorical). The target is
a pandas DataFrame or Series depending on the number of
`target_columns`.
.. versionadded:: 0.24
n_retries : int, default=3
Number of retries when HTTP errors are encountered.
.. versionadded:: 1.5
delay : float, default=1.0
Number of seconds between retries.
.. versionadded:: 1.5
Returns
-------
bunch : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data: {sparse matrix, dataframe} of shape (n_samples, n_features)
The input data matrix. If ``as_frame`` is `True`, ``data`` is
a pandas DataFrame with sparse columns.
target: {ndarray, series} of shape (n_samples,)
The target labels. If ``as_frame`` is `True`, ``target`` is a
pandas Series.
target_names: list of shape (n_classes,)
The names of target classes.
DESCR: str
The full description of the dataset.
frame: dataframe of shape (n_samples, n_features + 1)
Only present when `as_frame=True`. Pandas DataFrame with ``data``
and ``target``.
.. versionadded:: 0.24
(data, target) : tuple if ``return_X_y`` is True
`data` and `target` would be of the format defined in the `Bunch`
description above.
.. versionadded:: 0.20
Examples
--------
>>> from sklearn.datasets import fetch_20newsgroups_vectorized
>>> newsgroups_vectorized = fetch_20newsgroups_vectorized(subset='test')
>>> newsgroups_vectorized.data.shape
(7532, 130107)
>>> newsgroups_vectorized.target.shape
(7532,)
|
fetch_20newsgroups_vectorized
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/_twenty_newsgroups.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/_twenty_newsgroups.py
|
BSD-3-Clause
|
def test_20news_length_consistency(fetch_20newsgroups_fxt):
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
# Extract the full dataset
data = fetch_20newsgroups_fxt(subset="all")
assert len(data["data"]) == len(data.data)
assert len(data["target"]) == len(data.target)
assert len(data["filenames"]) == len(data.filenames)
|
Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
|
test_20news_length_consistency
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_20news.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_20news.py
|
BSD-3-Clause
|
def test_post_process_frame(feature_names, target_names):
"""Check the behaviour of the post-processing function for splitting a dataframe."""
pd = pytest.importorskip("pandas")
X_original = pd.DataFrame(
{
"col_int_as_integer": [1, 2, 3],
"col_int_as_numeric": [1, 2, 3],
"col_float_as_real": [1.0, 2.0, 3.0],
"col_float_as_numeric": [1.0, 2.0, 3.0],
"col_categorical": ["a", "b", "c"],
"col_string": ["a", "b", "c"],
}
)
X, y = _post_process_frame(X_original, feature_names, target_names)
assert isinstance(X, pd.DataFrame)
if len(target_names) >= 2:
assert isinstance(y, pd.DataFrame)
elif len(target_names) == 1:
assert isinstance(y, pd.Series)
else:
assert y is None
|
Check the behaviour of the post-processing function for splitting a dataframe.
|
test_post_process_frame
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_arff_parser.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_arff_parser.py
|
BSD-3-Clause
|
def test_load_arff_from_gzip_file_error_parser():
"""An error will be raised if the parser is not known."""
# None of the input parameters are required to be accurate since the check
# of the parser will be carried out first.
err_msg = "Unknown parser: 'xxx'. Should be 'liac-arff' or 'pandas'"
with pytest.raises(ValueError, match=err_msg):
load_arff_from_gzip_file("xxx", "xxx", "xxx", "xxx", "xxx", "xxx")
|
An error will be raised if the parser is not known.
|
test_load_arff_from_gzip_file_error_parser
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_arff_parser.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_arff_parser.py
|
BSD-3-Clause
|
def test_pandas_arff_parser_strip_single_quotes(parser_func):
"""Check that we properly strip single quotes from the data."""
pd = pytest.importorskip("pandas")
arff_file = BytesIO(
textwrap.dedent(
"""
@relation 'toy'
@attribute 'cat_single_quote' {'A', 'B', 'C'}
@attribute 'str_single_quote' string
@attribute 'str_nested_quote' string
@attribute 'class' numeric
@data
'A','some text','\"expect double quotes\"',0
"""
).encode("utf-8")
)
columns_info = {
"cat_single_quote": {
"data_type": "nominal",
"name": "cat_single_quote",
},
"str_single_quote": {
"data_type": "string",
"name": "str_single_quote",
},
"str_nested_quote": {
"data_type": "string",
"name": "str_nested_quote",
},
"class": {
"data_type": "numeric",
"name": "class",
},
}
feature_names = [
"cat_single_quote",
"str_single_quote",
"str_nested_quote",
]
target_names = ["class"]
# We don't strip single quotes for string columns with the pandas parser.
expected_values = {
"cat_single_quote": "A",
"str_single_quote": (
"some text" if parser_func is _liac_arff_parser else "'some text'"
),
"str_nested_quote": (
'"expect double quotes"'
if parser_func is _liac_arff_parser
else "'\"expect double quotes\"'"
),
"class": 0,
}
_, _, frame, _ = parser_func(
arff_file,
output_arrays_type="pandas",
openml_columns_info=columns_info,
feature_names_to_select=feature_names,
target_names_to_select=target_names,
)
assert frame.columns.tolist() == feature_names + target_names
pd.testing.assert_series_equal(frame.iloc[0], pd.Series(expected_values, name=0))
|
Check that we properly strip single quotes from the data.
|
test_pandas_arff_parser_strip_single_quotes
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_arff_parser.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_arff_parser.py
|
BSD-3-Clause
|
def test_pandas_arff_parser_strip_double_quotes(parser_func):
"""Check that we properly strip double quotes from the data."""
pd = pytest.importorskip("pandas")
arff_file = BytesIO(
textwrap.dedent(
"""
@relation 'toy'
@attribute 'cat_double_quote' {"A", "B", "C"}
@attribute 'str_double_quote' string
@attribute 'str_nested_quote' string
@attribute 'class' numeric
@data
"A","some text","\'expect double quotes\'",0
"""
).encode("utf-8")
)
columns_info = {
"cat_double_quote": {
"data_type": "nominal",
"name": "cat_double_quote",
},
"str_double_quote": {
"data_type": "string",
"name": "str_double_quote",
},
"str_nested_quote": {
"data_type": "string",
"name": "str_nested_quote",
},
"class": {
"data_type": "numeric",
"name": "class",
},
}
feature_names = [
"cat_double_quote",
"str_double_quote",
"str_nested_quote",
]
target_names = ["class"]
expected_values = {
"cat_double_quote": "A",
"str_double_quote": "some text",
"str_nested_quote": "'expect double quotes'",
"class": 0,
}
_, _, frame, _ = parser_func(
arff_file,
output_arrays_type="pandas",
openml_columns_info=columns_info,
feature_names_to_select=feature_names,
target_names_to_select=target_names,
)
assert frame.columns.tolist() == feature_names + target_names
pd.testing.assert_series_equal(frame.iloc[0], pd.Series(expected_values, name=0))
|
Check that we properly strip double quotes from the data.
|
test_pandas_arff_parser_strip_double_quotes
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_arff_parser.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_arff_parser.py
|
BSD-3-Clause
|
def test_pandas_arff_parser_strip_no_quotes(parser_func):
"""Check that we properly parse with no quotes characters."""
pd = pytest.importorskip("pandas")
arff_file = BytesIO(
textwrap.dedent(
"""
@relation 'toy'
@attribute 'cat_without_quote' {A, B, C}
@attribute 'str_without_quote' string
@attribute 'str_internal_quote' string
@attribute 'class' numeric
@data
A,some text,'internal' quote,0
"""
).encode("utf-8")
)
columns_info = {
"cat_without_quote": {
"data_type": "nominal",
"name": "cat_without_quote",
},
"str_without_quote": {
"data_type": "string",
"name": "str_without_quote",
},
"str_internal_quote": {
"data_type": "string",
"name": "str_internal_quote",
},
"class": {
"data_type": "numeric",
"name": "class",
},
}
feature_names = [
"cat_without_quote",
"str_without_quote",
"str_internal_quote",
]
target_names = ["class"]
expected_values = {
"cat_without_quote": "A",
"str_without_quote": "some text",
"str_internal_quote": "'internal' quote",
"class": 0,
}
_, _, frame, _ = parser_func(
arff_file,
output_arrays_type="pandas",
openml_columns_info=columns_info,
feature_names_to_select=feature_names,
target_names_to_select=target_names,
)
assert frame.columns.tolist() == feature_names + target_names
pd.testing.assert_series_equal(frame.iloc[0], pd.Series(expected_values, name=0))
|
Check that we properly parse with no quotes characters.
|
test_pandas_arff_parser_strip_no_quotes
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_arff_parser.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_arff_parser.py
|
BSD-3-Clause
|
def test_load_diabetes_raw():
"""Test to check that we load a scaled version by default but that we can
get an unscaled version when setting `scaled=False`."""
diabetes_raw = load_diabetes(scaled=False)
assert diabetes_raw.data.shape == (442, 10)
assert diabetes_raw.target.size == 442
assert len(diabetes_raw.feature_names) == 10
assert diabetes_raw.DESCR
diabetes_default = load_diabetes()
np.testing.assert_allclose(
scale(diabetes_raw.data) / (442**0.5), diabetes_default.data, atol=1e-04
)
|
Test to check that we load a scaled version by default but that we can
get an unscaled version when setting `scaled=False`.
|
test_load_diabetes_raw
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_base.py
|
BSD-3-Clause
|
def test_load_boston_error():
"""Check that we raise the ethical warning when trying to import `load_boston`."""
msg = "The Boston housing prices dataset has an ethical problem"
with pytest.raises(ImportError, match=msg):
from sklearn.datasets import load_boston # noqa: F401
# other non-existing function should raise the usual import error
msg = "cannot import name 'non_existing_function' from 'sklearn.datasets'"
with pytest.raises(ImportError, match=msg):
from sklearn.datasets import non_existing_function # noqa: F401
|
Check that we raise the ethical warning when trying to import `load_boston`.
|
test_load_boston_error
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_base.py
|
BSD-3-Clause
|
def test_corrupted_file_error_message(fetch_kddcup99_fxt, tmp_path):
"""Check that a nice error message is raised when cache is corrupted."""
kddcup99_dir = tmp_path / "kddcup99_10-py3"
kddcup99_dir.mkdir()
samples_path = kddcup99_dir / "samples"
with samples_path.open("wb") as f:
f.write(b"THIS IS CORRUPTED")
msg = (
"The cache for fetch_kddcup99 is invalid, please "
f"delete {kddcup99_dir} and run the fetch_kddcup99 again"
)
with pytest.raises(OSError, match=msg):
fetch_kddcup99_fxt(data_home=str(tmp_path))
|
Check that a nice error message is raised when cache is corrupted.
|
test_corrupted_file_error_message
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_kddcup99.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_kddcup99.py
|
BSD-3-Clause
|
def mock_data_home(tmp_path_factory):
"""Test fixture run once and common to all tests of this module"""
Image = pytest.importorskip("PIL.Image")
data_dir = tmp_path_factory.mktemp("scikit_learn_lfw_test")
lfw_home = data_dir / "lfw_home"
lfw_home.mkdir(parents=True, exist_ok=True)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = lfw_home / "lfw_funneled" / name
folder_name.mkdir(parents=True, exist_ok=True)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = folder_name / (name + "_%04d.jpg" % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
img = Image.fromarray(uniface.astype(np.uint8))
img.save(file_path)
# add some random file pollution to test robustness
(lfw_home / "lfw_funneled" / ".test.swp").write_bytes(
b"Text file to be ignored by the dataset loader."
)
# generate some pairing metadata files using the same format as LFW
with open(lfw_home / "pairsDevTrain.txt", "wb") as f:
f.write(b"10\n")
more_than_two = [name for name, count in counts.items() if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(("%s\t%d\t%d\n" % (name, first, second)).encode())
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = np_rng.choice(np.arange(counts[first_name]))
second_index = np_rng.choice(np.arange(counts[second_name]))
f.write(
(
"%s\t%d\t%s\t%d\n"
% (first_name, first_index, second_name, second_index)
).encode()
)
(lfw_home / "pairsDevTest.txt").write_bytes(
b"Fake place holder that won't be tested"
)
(lfw_home / "pairs.txt").write_bytes(b"Fake place holder that won't be tested")
yield data_dir
|
Test fixture run once and common to all tests of this module
|
mock_data_home
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_lfw.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_lfw.py
|
BSD-3-Clause
|
def test_fetch_lfw_people_internal_cropping(mock_data_home):
"""Check that we properly crop the images.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/24942
"""
# If cropping was not done properly and we don't resize the images, the images would
# have their original size (250x250) and the image would not fit in the NumPy array
# pre-allocated based on `slice_` parameter.
slice_ = (slice(70, 195), slice(78, 172))
lfw = fetch_lfw_people(
data_home=mock_data_home,
min_faces_per_person=3,
download_if_missing=False,
resize=None,
slice_=slice_,
)
assert lfw.images[0].shape == (
slice_[0].stop - slice_[0].start,
slice_[1].stop - slice_[1].start,
)
|
Check that we properly crop the images.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/24942
|
test_fetch_lfw_people_internal_cropping
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_lfw.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_lfw.py
|
BSD-3-Clause
|
def test_fetch_openml_as_frame_true(
monkeypatch,
data_id,
dataset_params,
n_samples,
n_features,
n_targets,
parser,
gzip_response,
):
"""Check the behaviour of `fetch_openml` with `as_frame=True`.
Fetch by ID and/or name (depending if the file was previously cached).
"""
pd = pytest.importorskip("pandas")
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=gzip_response)
bunch = fetch_openml(
as_frame=True,
cache=False,
parser=parser,
**dataset_params,
)
assert int(bunch.details["id"]) == data_id
assert isinstance(bunch, Bunch)
assert isinstance(bunch.frame, pd.DataFrame)
assert bunch.frame.shape == (n_samples, n_features + n_targets)
assert isinstance(bunch.data, pd.DataFrame)
assert bunch.data.shape == (n_samples, n_features)
if n_targets == 1:
assert isinstance(bunch.target, pd.Series)
assert bunch.target.shape == (n_samples,)
else:
assert isinstance(bunch.target, pd.DataFrame)
assert bunch.target.shape == (n_samples, n_targets)
assert bunch.categories is None
|
Check the behaviour of `fetch_openml` with `as_frame=True`.
Fetch by ID and/or name (depending if the file was previously cached).
|
test_fetch_openml_as_frame_true
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_openml.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_openml.py
|
BSD-3-Clause
|
def test_fetch_openml_as_frame_false(
monkeypatch,
data_id,
dataset_params,
n_samples,
n_features,
n_targets,
parser,
):
"""Check the behaviour of `fetch_openml` with `as_frame=False`.
Fetch both by ID and/or name + version.
"""
pytest.importorskip("pandas")
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True)
bunch = fetch_openml(
as_frame=False,
cache=False,
parser=parser,
**dataset_params,
)
assert int(bunch.details["id"]) == data_id
assert isinstance(bunch, Bunch)
assert bunch.frame is None
assert isinstance(bunch.data, np.ndarray)
assert bunch.data.shape == (n_samples, n_features)
assert isinstance(bunch.target, np.ndarray)
if n_targets == 1:
assert bunch.target.shape == (n_samples,)
else:
assert bunch.target.shape == (n_samples, n_targets)
assert isinstance(bunch.categories, dict)
|
Check the behaviour of `fetch_openml` with `as_frame=False`.
Fetch both by ID and/or name + version.
|
test_fetch_openml_as_frame_false
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_openml.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_openml.py
|
BSD-3-Clause
|
def test_fetch_openml_consistency_parser(monkeypatch, data_id):
"""Check the consistency of the LIAC-ARFF and pandas parsers."""
pd = pytest.importorskip("pandas")
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True)
bunch_liac = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
parser="liac-arff",
)
bunch_pandas = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
parser="pandas",
)
# The data frames for the input features should match up to some numerical
# dtype conversions (e.g. float64 <=> Int64) due to limitations of the
# LIAC-ARFF parser.
data_liac, data_pandas = bunch_liac.data, bunch_pandas.data
def convert_numerical_dtypes(series):
pandas_series = data_pandas[series.name]
if pd.api.types.is_numeric_dtype(pandas_series):
return series.astype(pandas_series.dtype)
else:
return series
data_liac_with_fixed_dtypes = data_liac.apply(convert_numerical_dtypes)
pd.testing.assert_frame_equal(data_liac_with_fixed_dtypes, data_pandas)
# Let's also check that the .frame attributes also match
frame_liac, frame_pandas = bunch_liac.frame, bunch_pandas.frame
# Note that the .frame attribute is a superset of the .data attribute:
pd.testing.assert_frame_equal(frame_pandas[bunch_pandas.feature_names], data_pandas)
# However the remaining columns, typically the target(s), are not necessarily
# dtyped similarly by both parsers due to limitations of the LIAC-ARFF parser.
# Therefore, extra dtype conversions are required for those columns:
def convert_numerical_and_categorical_dtypes(series):
pandas_series = frame_pandas[series.name]
if pd.api.types.is_numeric_dtype(pandas_series):
return series.astype(pandas_series.dtype)
elif isinstance(pandas_series.dtype, pd.CategoricalDtype):
# Compare categorical features by converting categorical liac uses
# strings to denote the categories, we rename the categories to make
# them comparable to the pandas parser. Fixing this behavior in
# LIAC-ARFF would allow to check the consistency in the future but
# we do not plan to maintain the LIAC-ARFF on the long term.
return series.cat.rename_categories(pandas_series.cat.categories)
else:
return series
frame_liac_with_fixed_dtypes = frame_liac.apply(
convert_numerical_and_categorical_dtypes
)
pd.testing.assert_frame_equal(frame_liac_with_fixed_dtypes, frame_pandas)
|
Check the consistency of the LIAC-ARFF and pandas parsers.
|
test_fetch_openml_consistency_parser
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_openml.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_openml.py
|
BSD-3-Clause
|
def test_fetch_openml_equivalence_array_dataframe(monkeypatch, parser):
"""Check the equivalence of the dataset when using `as_frame=False` and
`as_frame=True`.
"""
pytest.importorskip("pandas")
data_id = 61
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True)
bunch_as_frame_true = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
parser=parser,
)
bunch_as_frame_false = fetch_openml(
data_id=data_id,
as_frame=False,
cache=False,
parser=parser,
)
assert_allclose(bunch_as_frame_false.data, bunch_as_frame_true.data)
assert_array_equal(bunch_as_frame_false.target, bunch_as_frame_true.target)
|
Check the equivalence of the dataset when using `as_frame=False` and
`as_frame=True`.
|
test_fetch_openml_equivalence_array_dataframe
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_openml.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_openml.py
|
BSD-3-Clause
|
def test_fetch_openml_iris_pandas(monkeypatch, parser):
"""Check fetching on a numerical only dataset with string labels."""
pd = pytest.importorskip("pandas")
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 61
data_shape = (150, 4)
target_shape = (150,)
frame_shape = (150, 5)
target_dtype = CategoricalDtype(
["Iris-setosa", "Iris-versicolor", "Iris-virginica"]
)
data_dtypes = [np.float64] * 4
data_names = ["sepallength", "sepalwidth", "petallength", "petalwidth"]
target_name = "class"
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
parser=parser,
)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert np.all(data.dtypes == data_dtypes)
assert data.shape == data_shape
assert np.all(data.columns == data_names)
assert np.all(bunch.feature_names == data_names)
assert bunch.target_names == [target_name]
assert isinstance(target, pd.Series)
assert target.dtype == target_dtype
assert target.shape == target_shape
assert target.name == target_name
assert target.index.is_unique
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
assert np.all(frame.dtypes == data_dtypes + [target_dtype])
assert frame.index.is_unique
|
Check fetching on a numerical only dataset with string labels.
|
test_fetch_openml_iris_pandas
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_openml.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_openml.py
|
BSD-3-Clause
|
def test_fetch_openml_forcing_targets(monkeypatch, parser, target_column):
"""Check that we can force the target to not be the default target."""
pd = pytest.importorskip("pandas")
data_id = 61
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch_forcing_target = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
target_column=target_column,
parser=parser,
)
bunch_default = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
parser=parser,
)
pd.testing.assert_frame_equal(bunch_forcing_target.frame, bunch_default.frame)
if isinstance(target_column, list):
pd.testing.assert_index_equal(
bunch_forcing_target.target.columns, pd.Index(target_column)
)
assert bunch_forcing_target.data.shape == (150, 3)
else:
assert bunch_forcing_target.target.name == target_column
assert bunch_forcing_target.data.shape == (150, 4)
|
Check that we can force the target to not be the default target.
|
test_fetch_openml_forcing_targets
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_openml.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_openml.py
|
BSD-3-Clause
|
def test_fetch_openml_equivalence_frame_return_X_y(monkeypatch, data_id, parser):
"""Check the behaviour of `return_X_y=True` when `as_frame=True`."""
pd = pytest.importorskip("pandas")
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True)
bunch = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
return_X_y=False,
parser=parser,
)
X, y = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
return_X_y=True,
parser=parser,
)
pd.testing.assert_frame_equal(bunch.data, X)
if isinstance(y, pd.Series):
pd.testing.assert_series_equal(bunch.target, y)
else:
pd.testing.assert_frame_equal(bunch.target, y)
|
Check the behaviour of `return_X_y=True` when `as_frame=True`.
|
test_fetch_openml_equivalence_frame_return_X_y
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_openml.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_openml.py
|
BSD-3-Clause
|
def test_fetch_openml_equivalence_array_return_X_y(monkeypatch, data_id, parser):
"""Check the behaviour of `return_X_y=True` when `as_frame=False`."""
pytest.importorskip("pandas")
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True)
bunch = fetch_openml(
data_id=data_id,
as_frame=False,
cache=False,
return_X_y=False,
parser=parser,
)
X, y = fetch_openml(
data_id=data_id,
as_frame=False,
cache=False,
return_X_y=True,
parser=parser,
)
assert_array_equal(bunch.data, X)
assert_array_equal(bunch.target, y)
|
Check the behaviour of `return_X_y=True` when `as_frame=False`.
|
test_fetch_openml_equivalence_array_return_X_y
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_openml.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_openml.py
|
BSD-3-Clause
|
def test_fetch_openml_difference_parsers(monkeypatch):
"""Check the difference between liac-arff and pandas parser."""
pytest.importorskip("pandas")
data_id = 1119
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True)
# When `as_frame=False`, the categories will be ordinally encoded with
# liac-arff parser while this is not the case with pandas parser.
as_frame = False
bunch_liac_arff = fetch_openml(
data_id=data_id,
as_frame=as_frame,
cache=False,
parser="liac-arff",
)
bunch_pandas = fetch_openml(
data_id=data_id,
as_frame=as_frame,
cache=False,
parser="pandas",
)
assert bunch_liac_arff.data.dtype.kind == "f"
assert bunch_pandas.data.dtype == "O"
|
Check the difference between liac-arff and pandas parser.
|
test_fetch_openml_difference_parsers
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_openml.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_openml.py
|
BSD-3-Clause
|
def datasets_column_names():
"""Returns the columns names for each dataset."""
return {
61: ["sepallength", "sepalwidth", "petallength", "petalwidth", "class"],
2: [
"family",
"product-type",
"steel",
"carbon",
"hardness",
"temper_rolling",
"condition",
"formability",
"strength",
"non-ageing",
"surface-finish",
"surface-quality",
"enamelability",
"bc",
"bf",
"bt",
"bw%2Fme",
"bl",
"m",
"chrom",
"phos",
"cbond",
"marvi",
"exptl",
"ferro",
"corr",
"blue%2Fbright%2Fvarn%2Fclean",
"lustre",
"jurofm",
"s",
"p",
"shape",
"thick",
"width",
"len",
"oil",
"bore",
"packing",
"class",
],
561: ["vendor", "MYCT", "MMIN", "MMAX", "CACH", "CHMIN", "CHMAX", "class"],
40589: [
"Mean_Acc1298_Mean_Mem40_Centroid",
"Mean_Acc1298_Mean_Mem40_Rolloff",
"Mean_Acc1298_Mean_Mem40_Flux",
"Mean_Acc1298_Mean_Mem40_MFCC_0",
"Mean_Acc1298_Mean_Mem40_MFCC_1",
"Mean_Acc1298_Mean_Mem40_MFCC_2",
"Mean_Acc1298_Mean_Mem40_MFCC_3",
"Mean_Acc1298_Mean_Mem40_MFCC_4",
"Mean_Acc1298_Mean_Mem40_MFCC_5",
"Mean_Acc1298_Mean_Mem40_MFCC_6",
"Mean_Acc1298_Mean_Mem40_MFCC_7",
"Mean_Acc1298_Mean_Mem40_MFCC_8",
"Mean_Acc1298_Mean_Mem40_MFCC_9",
"Mean_Acc1298_Mean_Mem40_MFCC_10",
"Mean_Acc1298_Mean_Mem40_MFCC_11",
"Mean_Acc1298_Mean_Mem40_MFCC_12",
"Mean_Acc1298_Std_Mem40_Centroid",
"Mean_Acc1298_Std_Mem40_Rolloff",
"Mean_Acc1298_Std_Mem40_Flux",
"Mean_Acc1298_Std_Mem40_MFCC_0",
"Mean_Acc1298_Std_Mem40_MFCC_1",
"Mean_Acc1298_Std_Mem40_MFCC_2",
"Mean_Acc1298_Std_Mem40_MFCC_3",
"Mean_Acc1298_Std_Mem40_MFCC_4",
"Mean_Acc1298_Std_Mem40_MFCC_5",
"Mean_Acc1298_Std_Mem40_MFCC_6",
"Mean_Acc1298_Std_Mem40_MFCC_7",
"Mean_Acc1298_Std_Mem40_MFCC_8",
"Mean_Acc1298_Std_Mem40_MFCC_9",
"Mean_Acc1298_Std_Mem40_MFCC_10",
"Mean_Acc1298_Std_Mem40_MFCC_11",
"Mean_Acc1298_Std_Mem40_MFCC_12",
"Std_Acc1298_Mean_Mem40_Centroid",
"Std_Acc1298_Mean_Mem40_Rolloff",
"Std_Acc1298_Mean_Mem40_Flux",
"Std_Acc1298_Mean_Mem40_MFCC_0",
"Std_Acc1298_Mean_Mem40_MFCC_1",
"Std_Acc1298_Mean_Mem40_MFCC_2",
"Std_Acc1298_Mean_Mem40_MFCC_3",
"Std_Acc1298_Mean_Mem40_MFCC_4",
"Std_Acc1298_Mean_Mem40_MFCC_5",
"Std_Acc1298_Mean_Mem40_MFCC_6",
"Std_Acc1298_Mean_Mem40_MFCC_7",
"Std_Acc1298_Mean_Mem40_MFCC_8",
"Std_Acc1298_Mean_Mem40_MFCC_9",
"Std_Acc1298_Mean_Mem40_MFCC_10",
"Std_Acc1298_Mean_Mem40_MFCC_11",
"Std_Acc1298_Mean_Mem40_MFCC_12",
"Std_Acc1298_Std_Mem40_Centroid",
"Std_Acc1298_Std_Mem40_Rolloff",
"Std_Acc1298_Std_Mem40_Flux",
"Std_Acc1298_Std_Mem40_MFCC_0",
"Std_Acc1298_Std_Mem40_MFCC_1",
"Std_Acc1298_Std_Mem40_MFCC_2",
"Std_Acc1298_Std_Mem40_MFCC_3",
"Std_Acc1298_Std_Mem40_MFCC_4",
"Std_Acc1298_Std_Mem40_MFCC_5",
"Std_Acc1298_Std_Mem40_MFCC_6",
"Std_Acc1298_Std_Mem40_MFCC_7",
"Std_Acc1298_Std_Mem40_MFCC_8",
"Std_Acc1298_Std_Mem40_MFCC_9",
"Std_Acc1298_Std_Mem40_MFCC_10",
"Std_Acc1298_Std_Mem40_MFCC_11",
"Std_Acc1298_Std_Mem40_MFCC_12",
"BH_LowPeakAmp",
"BH_LowPeakBPM",
"BH_HighPeakAmp",
"BH_HighPeakBPM",
"BH_HighLowRatio",
"BHSUM1",
"BHSUM2",
"BHSUM3",
"amazed.suprised",
"happy.pleased",
"relaxing.calm",
"quiet.still",
"sad.lonely",
"angry.aggresive",
],
1119: [
"age",
"workclass",
"fnlwgt:",
"education:",
"education-num:",
"marital-status:",
"occupation:",
"relationship:",
"race:",
"sex:",
"capital-gain:",
"capital-loss:",
"hours-per-week:",
"native-country:",
"class",
],
40966: [
"DYRK1A_N",
"ITSN1_N",
"BDNF_N",
"NR1_N",
"NR2A_N",
"pAKT_N",
"pBRAF_N",
"pCAMKII_N",
"pCREB_N",
"pELK_N",
"pERK_N",
"pJNK_N",
"PKCA_N",
"pMEK_N",
"pNR1_N",
"pNR2A_N",
"pNR2B_N",
"pPKCAB_N",
"pRSK_N",
"AKT_N",
"BRAF_N",
"CAMKII_N",
"CREB_N",
"ELK_N",
"ERK_N",
"GSK3B_N",
"JNK_N",
"MEK_N",
"TRKA_N",
"RSK_N",
"APP_N",
"Bcatenin_N",
"SOD1_N",
"MTOR_N",
"P38_N",
"pMTOR_N",
"DSCR1_N",
"AMPKA_N",
"NR2B_N",
"pNUMB_N",
"RAPTOR_N",
"TIAM1_N",
"pP70S6_N",
"NUMB_N",
"P70S6_N",
"pGSK3B_N",
"pPKCG_N",
"CDK5_N",
"S6_N",
"ADARB1_N",
"AcetylH3K9_N",
"RRP1_N",
"BAX_N",
"ARC_N",
"ERBB4_N",
"nNOS_N",
"Tau_N",
"GFAP_N",
"GluR3_N",
"GluR4_N",
"IL1B_N",
"P3525_N",
"pCASP9_N",
"PSD95_N",
"SNCA_N",
"Ubiquitin_N",
"pGSK3B_Tyr216_N",
"SHH_N",
"BAD_N",
"BCL2_N",
"pS6_N",
"pCFOS_N",
"SYP_N",
"H3AcK18_N",
"EGR1_N",
"H3MeK4_N",
"CaNA_N",
"class",
],
40945: [
"pclass",
"survived",
"name",
"sex",
"age",
"sibsp",
"parch",
"ticket",
"fare",
"cabin",
"embarked",
"boat",
"body",
"home.dest",
],
}
|
Returns the columns names for each dataset.
|
datasets_column_names
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_openml.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_openml.py
|
BSD-3-Clause
|
def test_fetch_openml_types_inference(
monkeypatch,
data_id,
parser,
expected_n_categories,
expected_n_floats,
expected_n_ints,
gzip_response,
datasets_column_names,
datasets_missing_values,
):
"""Check that `fetch_openml` infer the right number of categories, integers, and
floats."""
pd = pytest.importorskip("pandas")
CategoricalDtype = pd.api.types.CategoricalDtype
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=gzip_response)
bunch = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
parser=parser,
)
frame = bunch.frame
n_categories = len(
[dtype for dtype in frame.dtypes if isinstance(dtype, CategoricalDtype)]
)
n_floats = len([dtype for dtype in frame.dtypes if dtype.kind == "f"])
n_ints = len([dtype for dtype in frame.dtypes if dtype.kind == "i"])
assert n_categories == expected_n_categories
assert n_floats == expected_n_floats
assert n_ints == expected_n_ints
assert frame.columns.tolist() == datasets_column_names[data_id]
frame_feature_to_n_nan = frame.isna().sum().to_dict()
for name, n_missing in frame_feature_to_n_nan.items():
expected_missing = datasets_missing_values[data_id].get(name, 0)
assert n_missing == expected_missing
|
Check that `fetch_openml` infer the right number of categories, integers, and
floats.
|
test_fetch_openml_types_inference
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_openml.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_openml.py
|
BSD-3-Clause
|
def test_fetch_openml_requires_pandas_error(monkeypatch, params):
"""Check that we raise the proper errors when we require pandas."""
data_id = 1119
try:
check_pandas_support("test_fetch_openml_requires_pandas")
except ImportError:
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
err_msg = "requires pandas to be installed. Alternatively, explicitly"
with pytest.raises(ImportError, match=err_msg):
fetch_openml(data_id=data_id, **params)
else:
raise SkipTest("This test requires pandas to not be installed.")
|
Check that we raise the proper errors when we require pandas.
|
test_fetch_openml_requires_pandas_error
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_openml.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_openml.py
|
BSD-3-Clause
|
def test_fetch_openml_sparse_arff_error(monkeypatch, params, err_msg):
"""Check that we raise the expected error for sparse ARFF datasets and
a wrong set of incompatible parameters.
"""
pytest.importorskip("pandas")
data_id = 292
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
with pytest.raises(ValueError, match=err_msg):
fetch_openml(
data_id=data_id,
cache=False,
**params,
)
|
Check that we raise the expected error for sparse ARFF datasets and
a wrong set of incompatible parameters.
|
test_fetch_openml_sparse_arff_error
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_openml.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_openml.py
|
BSD-3-Clause
|
def test_convert_arff_data_dataframe_warning_low_memory_pandas(monkeypatch):
"""Check that we raise a warning regarding the working memory when using
LIAC-ARFF parser."""
pytest.importorskip("pandas")
data_id = 1119
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
msg = "Could not adhere to working_memory config."
with pytest.warns(UserWarning, match=msg):
with config_context(working_memory=1e-6):
fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
parser="liac-arff",
)
|
Check that we raise a warning regarding the working memory when using
LIAC-ARFF parser.
|
test_convert_arff_data_dataframe_warning_low_memory_pandas
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_openml.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_openml.py
|
BSD-3-Clause
|
def test_fetch_openml_iris_warn_multiple_version(monkeypatch, gzip_response):
"""Check that a warning is raised when multiple versions exist and no version is
requested."""
data_id = 61
data_name = "iris"
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
msg = re.escape(
"Multiple active versions of the dataset matching the name"
" iris exist. Versions may be fundamentally different, "
"returning version 1. Available versions:\n"
"- version 1, status: active\n"
" url: https://www.openml.org/search?type=data&id=61\n"
"- version 3, status: active\n"
" url: https://www.openml.org/search?type=data&id=969\n"
)
with pytest.warns(UserWarning, match=msg):
fetch_openml(
name=data_name,
as_frame=False,
cache=False,
parser="liac-arff",
)
|
Check that a warning is raised when multiple versions exist and no version is
requested.
|
test_fetch_openml_iris_warn_multiple_version
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_openml.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_openml.py
|
BSD-3-Clause
|
def test_fetch_openml_no_target(monkeypatch, gzip_response):
"""Check that we can get a dataset without target."""
data_id = 61
target_column = None
expected_observations = 150
expected_features = 5
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
data = fetch_openml(
data_id=data_id,
target_column=target_column,
cache=False,
as_frame=False,
parser="liac-arff",
)
assert data.data.shape == (expected_observations, expected_features)
assert data.target is None
|
Check that we can get a dataset without target.
|
test_fetch_openml_no_target
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_openml.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_openml.py
|
BSD-3-Clause
|
def test_missing_values_pandas(monkeypatch, gzip_response, parser):
"""check that missing values in categories are compatible with pandas
categorical"""
pytest.importorskip("pandas")
data_id = 42585
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=gzip_response)
penguins = fetch_openml(
data_id=data_id,
cache=False,
as_frame=True,
parser=parser,
)
cat_dtype = penguins.data.dtypes["sex"]
# there are nans in the categorical
assert penguins.data["sex"].isna().any()
assert_array_equal(cat_dtype.categories, ["FEMALE", "MALE", "_"])
|
check that missing values in categories are compatible with pandas
categorical
|
test_missing_values_pandas
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_openml.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_openml.py
|
BSD-3-Clause
|
def test_fetch_openml_inactive(monkeypatch, gzip_response, dataset_params):
"""Check that we raise a warning when the dataset is inactive."""
data_id = 40675
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
msg = "Version 1 of dataset glass2 is inactive,"
with pytest.warns(UserWarning, match=msg):
glass2 = fetch_openml(
cache=False, as_frame=False, parser="liac-arff", **dataset_params
)
assert glass2.data.shape == (163, 9)
assert glass2.details["id"] == "40675"
|
Check that we raise a warning when the dataset is inactive.
|
test_fetch_openml_inactive
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_openml.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_openml.py
|
BSD-3-Clause
|
def test_fetch_openml_overwrite_default_params_read_csv(monkeypatch):
"""Check that we can overwrite the default parameters of `read_csv`."""
pytest.importorskip("pandas")
data_id = 1590
_monkey_patch_webbased_functions(monkeypatch, data_id=data_id, gzip_response=False)
common_params = {
"data_id": data_id,
"as_frame": True,
"cache": False,
"parser": "pandas",
}
# By default, the initial spaces are skipped. We checked that setting the parameter
# `skipinitialspace` to False will have an effect.
adult_without_spaces = fetch_openml(**common_params)
adult_with_spaces = fetch_openml(
**common_params, read_csv_kwargs={"skipinitialspace": False}
)
assert all(
cat.startswith(" ") for cat in adult_with_spaces.frame["class"].cat.categories
)
assert not any(
cat.startswith(" ")
for cat in adult_without_spaces.frame["class"].cat.categories
)
|
Check that we can overwrite the default parameters of `read_csv`.
|
test_fetch_openml_overwrite_default_params_read_csv
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_openml.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_openml.py
|
BSD-3-Clause
|
def test_fetch_openml_verify_checksum(monkeypatch, as_frame, tmpdir, parser):
"""Check that the checksum is working as expected."""
if as_frame or parser == "pandas":
pytest.importorskip("pandas")
data_id = 2
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
# create a temporary modified arff file
original_data_module = OPENML_TEST_DATA_MODULE + "." + f"id_{data_id}"
original_data_file_name = "data-v1-dl-1666876.arff.gz"
original_data_path = resources.files(original_data_module) / original_data_file_name
corrupt_copy_path = tmpdir / "test_invalid_checksum.arff"
with original_data_path.open("rb") as orig_file:
orig_gzip = gzip.open(orig_file, "rb")
data = bytearray(orig_gzip.read())
data[len(data) - 1] = 37
with gzip.GzipFile(corrupt_copy_path, "wb") as modified_gzip:
modified_gzip.write(data)
# Requests are already mocked by monkey_patch_webbased_functions.
# We want to reuse that mock for all requests except file download,
# hence creating a thin mock over the original mock
mocked_openml_url = sklearn.datasets._openml.urlopen
def swap_file_mock(request, *args, **kwargs):
url = request.get_full_url()
if url.endswith("data/v1/download/1666876/anneal.arff"):
with open(corrupt_copy_path, "rb") as f:
corrupted_data = f.read()
return _MockHTTPResponse(BytesIO(corrupted_data), is_gzip=True)
else:
return mocked_openml_url(request)
monkeypatch.setattr(sklearn.datasets._openml, "urlopen", swap_file_mock)
# validate failed checksum
with pytest.raises(ValueError) as exc:
sklearn.datasets.fetch_openml(
data_id=data_id, cache=False, as_frame=as_frame, parser=parser
)
# exception message should have file-path
assert exc.match("1666876")
|
Check that the checksum is working as expected.
|
test_fetch_openml_verify_checksum
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_openml.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_openml.py
|
BSD-3-Clause
|
def test_fetch_openml_with_ignored_feature(monkeypatch, gzip_response, parser):
"""Check that we can load the "zoo" dataset.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/14340
"""
if parser == "pandas":
pytest.importorskip("pandas")
data_id = 62
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
dataset = sklearn.datasets.fetch_openml(
data_id=data_id, cache=False, as_frame=False, parser=parser
)
assert dataset is not None
# The dataset has 17 features, including 1 ignored (animal),
# so we assert that we don't have the ignored feature in the final Bunch
assert dataset["data"].shape == (101, 16)
assert "animal" not in dataset["feature_names"]
|
Check that we can load the "zoo" dataset.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/14340
|
test_fetch_openml_with_ignored_feature
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_openml.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_openml.py
|
BSD-3-Clause
|
def test_fetch_openml_leading_whitespace(monkeypatch):
"""Check that we can strip leading whitespace in pandas parser.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/25311
"""
pd = pytest.importorskip("pandas")
data_id = 1590
_monkey_patch_webbased_functions(monkeypatch, data_id=data_id, gzip_response=False)
common_params = {"as_frame": True, "cache": False, "data_id": data_id}
adult_pandas = fetch_openml(parser="pandas", **common_params)
adult_liac_arff = fetch_openml(parser="liac-arff", **common_params)
pd.testing.assert_series_equal(
adult_pandas.frame["class"], adult_liac_arff.frame["class"]
)
|
Check that we can strip leading whitespace in pandas parser.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/25311
|
test_fetch_openml_leading_whitespace
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_openml.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_openml.py
|
BSD-3-Clause
|
def test_fetch_openml_quotechar_escapechar(monkeypatch):
"""Check that we can handle escapechar and single/double quotechar.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/25478
"""
pd = pytest.importorskip("pandas")
data_id = 42074
_monkey_patch_webbased_functions(monkeypatch, data_id=data_id, gzip_response=False)
common_params = {"as_frame": True, "cache": False, "data_id": data_id}
adult_pandas = fetch_openml(parser="pandas", **common_params)
adult_liac_arff = fetch_openml(parser="liac-arff", **common_params)
pd.testing.assert_frame_equal(adult_pandas.frame, adult_liac_arff.frame)
|
Check that we can handle escapechar and single/double quotechar.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/25478
|
test_fetch_openml_quotechar_escapechar
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_openml.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_openml.py
|
BSD-3-Clause
|
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(
make_classification,
class_sep=class_sep,
n_redundant=0,
n_repeated=0,
flip_y=0,
shift=0,
scale=1,
shuffle=False,
)
for n_informative, weights, n_clusters_per_class in [
(2, [1], 1),
(2, [1 / 3] * 3, 1),
(2, [1 / 4] * 4, 1),
(2, [1 / 2] * 2, 2),
(2, [3 / 4, 1 / 4], 2),
(10, [1 / 3] * 3, 10),
(64, [1], 1),
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(
n_samples=n_samples,
n_classes=n_classes,
weights=weights,
n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube,
random_state=0,
)
assert X.shape == (n_samples, n_informative)
assert y.shape == (n_samples,)
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype="|S{0}".format(signs.strides[0])).ravel()
unique_signs, cluster_index = np.unique(signs, return_inverse=True)
assert len(unique_signs) == n_clusters, (
"Wrong number of clusters, or not in distinct quadrants"
)
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert len(clusters) == n_clusters_per_class, (
"Wrong number of clusters per class"
)
assert len(clusters_by_class) == n_classes, "Wrong number of classes"
assert_array_almost_equal(
np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples per class",
)
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(
np.abs(centroid) / class_sep,
np.ones(n_informative),
decimal=5,
err_msg="Clusters are not centered on hypercube vertices",
)
else:
with pytest.raises(AssertionError):
assert_array_almost_equal(
np.abs(centroid) / class_sep,
np.ones(n_informative),
decimal=5,
err_msg=(
"Clusters should not be centered on hypercube vertices"
),
)
with pytest.raises(ValueError):
make(n_features=2, n_informative=2, n_classes=5, n_clusters_per_class=1)
with pytest.raises(ValueError):
make(n_features=2, n_informative=2, n_classes=3, n_clusters_per_class=2)
|
Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
|
test_make_classification_informative_features
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_samples_generator.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_samples_generator.py
|
BSD-3-Clause
|
def test_make_classification_return_x_y():
"""
Test that make_classification returns a Bunch when return_X_y is False.
Also that bunch.X is the same as X
"""
kwargs = {
"n_samples": 100,
"n_features": 20,
"n_informative": 5,
"n_redundant": 1,
"n_repeated": 1,
"n_classes": 3,
"n_clusters_per_class": 2,
"weights": None,
"flip_y": 0.01,
"class_sep": 1.0,
"hypercube": True,
"shift": 0.0,
"scale": 1.0,
"shuffle": True,
"random_state": 42,
"return_X_y": True,
}
X, y = make_classification(**kwargs)
kwargs["return_X_y"] = False
bunch = make_classification(**kwargs)
assert (
hasattr(bunch, "DESCR")
and hasattr(bunch, "parameters")
and hasattr(bunch, "feature_info")
and hasattr(bunch, "X")
and hasattr(bunch, "y")
)
def count(str_):
return bunch.feature_info.count(str_)
assert np.array_equal(X, bunch.X)
assert np.array_equal(y, bunch.y)
assert bunch.DESCR == make_classification.__doc__
assert bunch.parameters == kwargs
assert count("informative") == kwargs["n_informative"]
assert count("redundant") == kwargs["n_redundant"]
assert count("repeated") == kwargs["n_repeated"]
|
Test that make_classification returns a Bunch when return_X_y is False.
Also that bunch.X is the same as X
|
test_make_classification_return_x_y
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_samples_generator.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_samples_generator.py
|
BSD-3-Clause
|
def _load_svmlight_local_test_file(filename, **kwargs):
"""
Helper to load resource `filename` with `importlib.resources`
"""
data_path = _svmlight_local_test_file_path(filename)
with data_path.open("rb") as f:
return load_svmlight_file(f, **kwargs)
|
Helper to load resource `filename` with `importlib.resources`
|
_load_svmlight_local_test_file
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_svmlight_format.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_svmlight_format.py
|
BSD-3-Clause
|
def test_load_large_qid():
"""
load large libsvm / svmlight file with qid attribute. Tests 64-bit query ID
"""
data = b"\n".join(
(
"3 qid:{0} 1:0.53 2:0.12\n2 qid:{0} 1:0.13 2:0.1".format(i).encode()
for i in range(1, 40 * 1000 * 1000)
)
)
X, y, qid = load_svmlight_file(BytesIO(data), query_id=True)
assert_array_equal(y[-4:], [3, 2, 3, 2])
assert_array_equal(np.unique(qid), np.arange(1, 40 * 1000 * 1000))
|
load large libsvm / svmlight file with qid attribute. Tests 64-bit query ID
|
test_load_large_qid
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_svmlight_format.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_svmlight_format.py
|
BSD-3-Clause
|
def test_multilabel_y_explicit_zeros(tmp_path, csr_container):
"""
Ensure that if y contains explicit zeros (i.e. elements of y.data equal to
0) then those explicit zeros are not encoded.
"""
save_path = str(tmp_path / "svm_explicit_zero")
rng = np.random.RandomState(42)
X = rng.randn(3, 5).astype(np.float64)
indptr = np.array([0, 2, 3, 6])
indices = np.array([0, 2, 2, 0, 1, 2])
# The first and last element are explicit zeros.
data = np.array([0, 1, 1, 1, 1, 0])
y = csr_container((data, indices, indptr), shape=(3, 3))
# y as a dense array would look like
# [[0, 0, 1],
# [0, 0, 1],
# [1, 1, 0]]
dump_svmlight_file(X, y, save_path, multilabel=True)
_, y_load = load_svmlight_file(save_path, multilabel=True)
y_true = [(2.0,), (2.0,), (0.0, 1.0)]
assert y_load == y_true
|
Ensure that if y contains explicit zeros (i.e. elements of y.data equal to
0) then those explicit zeros are not encoded.
|
test_multilabel_y_explicit_zeros
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_svmlight_format.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_svmlight_format.py
|
BSD-3-Clause
|
def test_dump_read_only(tmp_path):
"""Ensure that there is no ValueError when dumping a read-only `X`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/28026
"""
rng = np.random.RandomState(42)
X = rng.randn(5, 2)
y = rng.randn(5)
# Convert to memmap-backed which are read-only
X, y = create_memmap_backed_data([X, y])
save_path = str(tmp_path / "svm_read_only")
dump_svmlight_file(X, y, save_path)
|
Ensure that there is no ValueError when dumping a read-only `X`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/28026
|
test_dump_read_only
|
python
|
scikit-learn/scikit-learn
|
sklearn/datasets/tests/test_svmlight_format.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/tests/test_svmlight_format.py
|
BSD-3-Clause
|
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array of shape=(n_features, n_features)
Estimated covariance of data.
"""
xp, _ = get_namespace(self.components_)
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * xp.sqrt(exp_var[:, np.newaxis])
exp_var_diff = exp_var - self.noise_variance_
exp_var_diff = xp.where(
exp_var > self.noise_variance_,
exp_var_diff,
xp.asarray(0.0, device=device(exp_var), dtype=exp_var.dtype),
)
cov = (components_.T * exp_var_diff) @ components_
_fill_or_add_to_diagonal(cov, self.noise_variance_, xp)
return cov
|
Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array of shape=(n_features, n_features)
Estimated covariance of data.
|
get_covariance
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_base.py
|
BSD-3-Clause
|
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
xp, is_array_api_compliant = get_namespace(self.components_)
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return xp.eye(n_features) / self.noise_variance_
if is_array_api_compliant:
linalg_inv = xp.linalg.inv
else:
linalg_inv = linalg.inv
if self.noise_variance_ == 0.0:
return linalg_inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * xp.sqrt(exp_var[:, np.newaxis])
exp_var_diff = exp_var - self.noise_variance_
exp_var_diff = xp.where(
exp_var > self.noise_variance_,
exp_var_diff,
xp.asarray(0.0, device=device(exp_var)),
)
precision = components_ @ components_.T / self.noise_variance_
_fill_or_add_to_diagonal(precision, 1.0 / exp_var_diff, xp)
precision = components_.T @ linalg_inv(precision) @ components_
precision /= -(self.noise_variance_**2)
_fill_or_add_to_diagonal(precision, 1.0 / self.noise_variance_, xp)
return precision
|
Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
|
get_precision
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_base.py
|
BSD-3-Clause
|
def fit(self, X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
|
Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
self : object
Returns the instance itself.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_base.py
|
BSD-3-Clause
|
def transform(self, X):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Returns
-------
X_new : array-like of shape (n_samples, n_components)
Projection of X in the first principal components, where `n_samples`
is the number of samples and `n_components` is the number of the components.
"""
xp, _ = get_namespace(X, self.components_, self.explained_variance_)
check_is_fitted(self)
X = validate_data(
self,
X,
dtype=[xp.float64, xp.float32],
accept_sparse=("csr", "csc"),
reset=False,
)
return self._transform(X, xp=xp, x_is_centered=False)
|
Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Returns
-------
X_new : array-like of shape (n_samples, n_components)
Projection of X in the first principal components, where `n_samples`
is the number of samples and `n_components` is the number of the components.
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_base.py
|
BSD-3-Clause
|
def inverse_transform(self, X):
"""Transform data back to its original space.
In other words, return an input `X_original` whose transform would be X.
Parameters
----------
X : array-like of shape (n_samples, n_components)
New data, where `n_samples` is the number of samples
and `n_components` is the number of components.
Returns
-------
X_original : array-like of shape (n_samples, n_features)
Original data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
xp, _ = get_namespace(X)
if self.whiten:
scaled_components = (
xp.sqrt(self.explained_variance_[:, np.newaxis]) * self.components_
)
return X @ scaled_components + self.mean_
else:
return X @ self.components_ + self.mean_
|
Transform data back to its original space.
In other words, return an input `X_original` whose transform would be X.
Parameters
----------
X : array-like of shape (n_samples, n_components)
New data, where `n_samples` is the number of samples
and `n_components` is the number of components.
Returns
-------
X_original : array-like of shape (n_samples, n_features)
Original data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
|
inverse_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_base.py
|
BSD-3-Clause
|
def _sparse_encode_precomputed(
X,
dictionary,
*,
gram=None,
cov=None,
algorithm="lasso_lars",
regularization=None,
copy_cov=True,
init=None,
max_iter=1000,
verbose=0,
positive=False,
):
"""Generic sparse coding with precomputed Gram and/or covariance matrices.
Each row of the result is the solution to a Lasso problem.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data matrix.
dictionary : ndarray of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram : ndarray of shape (n_components, n_components), default=None
Precomputed Gram matrix, `dictionary * dictionary'`
gram can be `None` if method is 'threshold'.
cov : ndarray of shape (n_components, n_samples), default=None
Precomputed covariance, `dictionary * X'`.
algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}, \
default='lasso_lars'
The algorithm used:
* `'lars'`: uses the least angle regression method
(`linear_model.lars_path`);
* `'lasso_lars'`: uses Lars to compute the Lasso solution;
* `'lasso_cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). lasso_lars will be faster if
the estimated components are sparse;
* `'omp'`: uses orthogonal matching pursuit to estimate the sparse
solution;
* `'threshold'`: squashes to zero all coefficients less than
regularization from the projection `dictionary * data'`.
regularization : int or float, default=None
The regularization parameter. It corresponds to alpha when
algorithm is `'lasso_lars'`, `'lasso_cd'` or `'threshold'`.
Otherwise it corresponds to `n_nonzero_coefs`.
init : ndarray of shape (n_samples, n_components), default=None
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter : int, default=1000
Maximum number of iterations to perform if `algorithm='lasso_cd'` or
`'lasso_lars'`.
copy_cov : bool, default=True
Whether to copy the precomputed covariance matrix; if `False`, it may
be overwritten.
verbose : int, default=0
Controls the verbosity; the higher, the more messages.
positive: bool, default=False
Whether to enforce a positivity constraint on the sparse code.
.. versionadded:: 0.20
Returns
-------
code : ndarray of shape (n_components, n_features)
The sparse codes.
"""
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if algorithm == "lasso_lars":
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all="ignore")
# Not passing in verbose=max(0, verbose-1) because Lars.fit already
# corrects the verbosity level.
lasso_lars = LassoLars(
alpha=alpha,
fit_intercept=False,
verbose=verbose,
precompute=gram,
fit_path=False,
positive=positive,
max_iter=max_iter,
)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == "lasso_cd":
alpha = float(regularization) / n_features # account for scaling
# TODO: Make verbosity argument for Lasso?
# sklearn.linear_model.coordinate_descent.enet_path has a verbosity
# argument that we could pass in from Lasso.
clf = Lasso(
alpha=alpha,
fit_intercept=False,
precompute=gram,
max_iter=max_iter,
warm_start=True,
positive=positive,
)
if init is not None:
# In some workflows using coordinate descent algorithms:
# - users might provide NumPy arrays with read-only buffers
# - `joblib` might memmap arrays making their buffer read-only
# TODO: move this handling (which is currently too broad)
# closer to the actual private function which need buffers to be writable.
if not init.flags["WRITEABLE"]:
init = np.array(init)
clf.coef_ = init
clf.fit(dictionary.T, X.T, check_input=False)
new_code = clf.coef_
elif algorithm == "lars":
try:
err_mgt = np.seterr(all="ignore")
# Not passing in verbose=max(0, verbose-1) because Lars.fit already
# corrects the verbosity level.
lars = Lars(
fit_intercept=False,
verbose=verbose,
precompute=gram,
n_nonzero_coefs=int(regularization),
fit_path=False,
)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == "threshold":
new_code = (np.sign(cov) * np.maximum(np.abs(cov) - regularization, 0)).T
if positive:
np.clip(new_code, 0, None, out=new_code)
elif algorithm == "omp":
new_code = orthogonal_mp_gram(
Gram=gram,
Xy=cov,
n_nonzero_coefs=int(regularization),
tol=None,
norms_squared=row_norms(X, squared=True),
copy_Xy=copy_cov,
).T
return new_code.reshape(n_samples, n_components)
|
Generic sparse coding with precomputed Gram and/or covariance matrices.
Each row of the result is the solution to a Lasso problem.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data matrix.
dictionary : ndarray of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram : ndarray of shape (n_components, n_components), default=None
Precomputed Gram matrix, `dictionary * dictionary'`
gram can be `None` if method is 'threshold'.
cov : ndarray of shape (n_components, n_samples), default=None
Precomputed covariance, `dictionary * X'`.
algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}, default='lasso_lars'
The algorithm used:
* `'lars'`: uses the least angle regression method
(`linear_model.lars_path`);
* `'lasso_lars'`: uses Lars to compute the Lasso solution;
* `'lasso_cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). lasso_lars will be faster if
the estimated components are sparse;
* `'omp'`: uses orthogonal matching pursuit to estimate the sparse
solution;
* `'threshold'`: squashes to zero all coefficients less than
regularization from the projection `dictionary * data'`.
regularization : int or float, default=None
The regularization parameter. It corresponds to alpha when
algorithm is `'lasso_lars'`, `'lasso_cd'` or `'threshold'`.
Otherwise it corresponds to `n_nonzero_coefs`.
init : ndarray of shape (n_samples, n_components), default=None
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter : int, default=1000
Maximum number of iterations to perform if `algorithm='lasso_cd'` or
`'lasso_lars'`.
copy_cov : bool, default=True
Whether to copy the precomputed covariance matrix; if `False`, it may
be overwritten.
verbose : int, default=0
Controls the verbosity; the higher, the more messages.
positive: bool, default=False
Whether to enforce a positivity constraint on the sparse code.
.. versionadded:: 0.20
Returns
-------
code : ndarray of shape (n_components, n_features)
The sparse codes.
|
_sparse_encode_precomputed
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_dict_learning.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_dict_learning.py
|
BSD-3-Clause
|
def sparse_encode(
X,
dictionary,
*,
gram=None,
cov=None,
algorithm="lasso_lars",
n_nonzero_coefs=None,
alpha=None,
copy_cov=True,
init=None,
max_iter=1000,
n_jobs=None,
check_input=True,
verbose=0,
positive=False,
):
"""Sparse coding.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data matrix.
dictionary : array-like of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram : array-like of shape (n_components, n_components), default=None
Precomputed Gram matrix, `dictionary * dictionary'`.
cov : array-like of shape (n_components, n_samples), default=None
Precomputed covariance, `dictionary' * X`.
algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}, \
default='lasso_lars'
The algorithm used:
* `'lars'`: uses the least angle regression method
(`linear_model.lars_path`);
* `'lasso_lars'`: uses Lars to compute the Lasso solution;
* `'lasso_cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). lasso_lars will be faster if
the estimated components are sparse;
* `'omp'`: uses orthogonal matching pursuit to estimate the sparse
solution;
* `'threshold'`: squashes to zero all coefficients less than
regularization from the projection `dictionary * data'`.
n_nonzero_coefs : int, default=None
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case. If `None`, then
`n_nonzero_coefs=int(n_features / 10)`.
alpha : float, default=None
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
If `None`, default to 1.
copy_cov : bool, default=True
Whether to copy the precomputed covariance matrix; if `False`, it may
be overwritten.
init : ndarray of shape (n_samples, n_components), default=None
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter : int, default=1000
Maximum number of iterations to perform if `algorithm='lasso_cd'` or
`'lasso_lars'`.
n_jobs : int, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
check_input : bool, default=True
If `False`, the input arrays X and dictionary will not be checked.
verbose : int, default=0
Controls the verbosity; the higher, the more messages.
positive : bool, default=False
Whether to enforce positivity when finding the encoding.
.. versionadded:: 0.20
Returns
-------
code : ndarray of shape (n_samples, n_components)
The sparse codes.
See Also
--------
sklearn.linear_model.lars_path : Compute Least Angle Regression or Lasso
path using LARS algorithm.
sklearn.linear_model.orthogonal_mp : Solves Orthogonal Matching Pursuit problems.
sklearn.linear_model.Lasso : Train Linear Model with L1 prior as regularizer.
SparseCoder : Find a sparse representation of data from a fixed precomputed
dictionary.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import sparse_encode
>>> X = np.array([[-1, -1, -1], [0, 0, 3]])
>>> dictionary = np.array(
... [[0, 1, 0],
... [-1, -1, 2],
... [1, 1, 1],
... [0, 1, 1],
... [0, 2, 1]],
... dtype=np.float64
... )
>>> sparse_encode(X, dictionary, alpha=1e-10)
array([[ 0., 0., -1., 0., 0.],
[ 0., 1., 1., 0., 0.]])
"""
if check_input:
if algorithm == "lasso_cd":
dictionary = check_array(
dictionary, order="C", dtype=[np.float64, np.float32]
)
X = check_array(X, order="C", dtype=[np.float64, np.float32])
else:
dictionary = check_array(dictionary)
X = check_array(X)
if dictionary.shape[1] != X.shape[1]:
raise ValueError(
"Dictionary and X have different numbers of features:"
"dictionary.shape: {} X.shape{}".format(dictionary.shape, X.shape)
)
_check_positive_coding(algorithm, positive)
return _sparse_encode(
X,
dictionary,
gram=gram,
cov=cov,
algorithm=algorithm,
n_nonzero_coefs=n_nonzero_coefs,
alpha=alpha,
copy_cov=copy_cov,
init=init,
max_iter=max_iter,
n_jobs=n_jobs,
verbose=verbose,
positive=positive,
)
|
Sparse coding.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data matrix.
dictionary : array-like of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram : array-like of shape (n_components, n_components), default=None
Precomputed Gram matrix, `dictionary * dictionary'`.
cov : array-like of shape (n_components, n_samples), default=None
Precomputed covariance, `dictionary' * X`.
algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}, default='lasso_lars'
The algorithm used:
* `'lars'`: uses the least angle regression method
(`linear_model.lars_path`);
* `'lasso_lars'`: uses Lars to compute the Lasso solution;
* `'lasso_cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). lasso_lars will be faster if
the estimated components are sparse;
* `'omp'`: uses orthogonal matching pursuit to estimate the sparse
solution;
* `'threshold'`: squashes to zero all coefficients less than
regularization from the projection `dictionary * data'`.
n_nonzero_coefs : int, default=None
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case. If `None`, then
`n_nonzero_coefs=int(n_features / 10)`.
alpha : float, default=None
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
If `None`, default to 1.
copy_cov : bool, default=True
Whether to copy the precomputed covariance matrix; if `False`, it may
be overwritten.
init : ndarray of shape (n_samples, n_components), default=None
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter : int, default=1000
Maximum number of iterations to perform if `algorithm='lasso_cd'` or
`'lasso_lars'`.
n_jobs : int, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
check_input : bool, default=True
If `False`, the input arrays X and dictionary will not be checked.
verbose : int, default=0
Controls the verbosity; the higher, the more messages.
positive : bool, default=False
Whether to enforce positivity when finding the encoding.
.. versionadded:: 0.20
Returns
-------
code : ndarray of shape (n_samples, n_components)
The sparse codes.
See Also
--------
sklearn.linear_model.lars_path : Compute Least Angle Regression or Lasso
path using LARS algorithm.
sklearn.linear_model.orthogonal_mp : Solves Orthogonal Matching Pursuit problems.
sklearn.linear_model.Lasso : Train Linear Model with L1 prior as regularizer.
SparseCoder : Find a sparse representation of data from a fixed precomputed
dictionary.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import sparse_encode
>>> X = np.array([[-1, -1, -1], [0, 0, 3]])
>>> dictionary = np.array(
... [[0, 1, 0],
... [-1, -1, 2],
... [1, 1, 1],
... [0, 1, 1],
... [0, 2, 1]],
... dtype=np.float64
... )
>>> sparse_encode(X, dictionary, alpha=1e-10)
array([[ 0., 0., -1., 0., 0.],
[ 0., 1., 1., 0., 0.]])
|
sparse_encode
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_dict_learning.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_dict_learning.py
|
BSD-3-Clause
|
def _sparse_encode(
X,
dictionary,
*,
gram=None,
cov=None,
algorithm="lasso_lars",
n_nonzero_coefs=None,
alpha=None,
copy_cov=True,
init=None,
max_iter=1000,
n_jobs=None,
verbose=0,
positive=False,
):
"""Sparse coding without input/parameter validation."""
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if algorithm in ("lars", "omp"):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.0
if gram is None and algorithm != "threshold":
gram = np.dot(dictionary, dictionary.T)
if cov is None and algorithm != "lasso_cd":
copy_cov = False
cov = np.dot(dictionary, X.T)
if effective_n_jobs(n_jobs) == 1 or algorithm == "threshold":
code = _sparse_encode_precomputed(
X,
dictionary,
gram=gram,
cov=cov,
algorithm=algorithm,
regularization=regularization,
copy_cov=copy_cov,
init=init,
max_iter=max_iter,
verbose=verbose,
positive=positive,
)
return code
# Enter parallel code block
n_samples = X.shape[0]
n_components = dictionary.shape[0]
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, effective_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_sparse_encode_precomputed)(
X[this_slice],
dictionary,
gram=gram,
cov=cov[:, this_slice] if cov is not None else None,
algorithm=algorithm,
regularization=regularization,
copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter,
verbose=verbose,
positive=positive,
)
for this_slice in slices
)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
|
Sparse coding without input/parameter validation.
|
_sparse_encode
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_dict_learning.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_dict_learning.py
|
BSD-3-Clause
|
def _update_dict(
dictionary,
Y,
code,
A=None,
B=None,
verbose=False,
random_state=None,
positive=False,
):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary : ndarray of shape (n_components, n_features)
Value of the dictionary at the previous iteration.
Y : ndarray of shape (n_samples, n_features)
Data matrix.
code : ndarray of shape (n_samples, n_components)
Sparse coding of the data against which to optimize the dictionary.
A : ndarray of shape (n_components, n_components), default=None
Together with `B`, sufficient stats of the online model to update the
dictionary.
B : ndarray of shape (n_features, n_components), default=None
Together with `A`, sufficient stats of the online model to update the
dictionary.
verbose: bool, default=False
Degree of output the procedure will print.
random_state : int, RandomState instance or None, default=None
Used for randomly initializing the dictionary. Pass an int for
reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
positive : bool, default=False
Whether to enforce positivity when finding the dictionary.
.. versionadded:: 0.20
"""
n_samples, n_components = code.shape
random_state = check_random_state(random_state)
if A is None:
A = code.T @ code
if B is None:
B = Y.T @ code
n_unused = 0
for k in range(n_components):
if A[k, k] > 1e-6:
# 1e-6 is arbitrary but consistent with the spams implementation
dictionary[k] += (B[:, k] - A[k] @ dictionary) / A[k, k]
else:
# kth atom is almost never used -> sample a new one from the data
newd = Y[random_state.choice(n_samples)]
# add small noise to avoid making the sparse coding ill conditioned
noise_level = 0.01 * (newd.std() or 1) # avoid 0 std
noise = random_state.normal(0, noise_level, size=len(newd))
dictionary[k] = newd + noise
code[:, k] = 0
n_unused += 1
if positive:
np.clip(dictionary[k], 0, None, out=dictionary[k])
# Projection on the constraint set ||V_k|| <= 1
dictionary[k] /= max(linalg.norm(dictionary[k]), 1)
if verbose and n_unused > 0:
print(f"{n_unused} unused atoms resampled.")
|
Update the dense dictionary factor in place.
Parameters
----------
dictionary : ndarray of shape (n_components, n_features)
Value of the dictionary at the previous iteration.
Y : ndarray of shape (n_samples, n_features)
Data matrix.
code : ndarray of shape (n_samples, n_components)
Sparse coding of the data against which to optimize the dictionary.
A : ndarray of shape (n_components, n_components), default=None
Together with `B`, sufficient stats of the online model to update the
dictionary.
B : ndarray of shape (n_features, n_components), default=None
Together with `A`, sufficient stats of the online model to update the
dictionary.
verbose: bool, default=False
Degree of output the procedure will print.
random_state : int, RandomState instance or None, default=None
Used for randomly initializing the dictionary. Pass an int for
reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
positive : bool, default=False
Whether to enforce positivity when finding the dictionary.
.. versionadded:: 0.20
|
_update_dict
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_dict_learning.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_dict_learning.py
|
BSD-3-Clause
|
def dict_learning_online(
X,
n_components=2,
*,
alpha=1,
max_iter=100,
return_code=True,
dict_init=None,
callback=None,
batch_size=256,
verbose=False,
shuffle=True,
n_jobs=None,
method="lars",
random_state=None,
positive_dict=False,
positive_code=False,
method_max_iter=1000,
tol=1e-3,
max_no_improvement=10,
):
"""Solve a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. ||.||_Fro stands for
the Frobenius norm and ||.||_1,1 stands for the entry-wise matrix norm
which is the sum of the absolute values of all the entries in the matrix.
This is accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data matrix.
n_components : int or None, default=2
Number of dictionary atoms to extract. If None, then ``n_components``
is set to ``n_features``.
alpha : float, default=1
Sparsity controlling parameter.
max_iter : int, default=100
Maximum number of iterations over the complete dataset before
stopping independently of any early stopping criterion heuristics.
.. versionadded:: 1.1
return_code : bool, default=True
Whether to also return the code U or just the dictionary `V`.
dict_init : ndarray of shape (n_components, n_features), default=None
Initial values for the dictionary for warm restart scenarios.
If `None`, the initial values for the dictionary are created
with an SVD decomposition of the data via
:func:`~sklearn.utils.extmath.randomized_svd`.
callback : callable, default=None
A callable that gets invoked at the end of each iteration.
batch_size : int, default=256
The number of samples to take in each batch.
.. versionchanged:: 1.3
The default value of `batch_size` changed from 3 to 256 in version 1.3.
verbose : bool, default=False
To control the verbosity of the procedure.
shuffle : bool, default=True
Whether to shuffle the data before splitting it in batches.
n_jobs : int, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
method : {'lars', 'cd'}, default='lars'
* `'lars'`: uses the least angle regression method to solve the lasso
problem (`linear_model.lars_path`);
* `'cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). Lars will be faster if
the estimated components are sparse.
random_state : int, RandomState instance or None, default=None
Used for initializing the dictionary when ``dict_init`` is not
specified, randomly shuffling the data when ``shuffle`` is set to
``True``, and updating the dictionary. Pass an int for reproducible
results across multiple function calls.
See :term:`Glossary <random_state>`.
positive_dict : bool, default=False
Whether to enforce positivity when finding the dictionary.
.. versionadded:: 0.20
positive_code : bool, default=False
Whether to enforce positivity when finding the code.
.. versionadded:: 0.20
method_max_iter : int, default=1000
Maximum number of iterations to perform when solving the lasso problem.
.. versionadded:: 0.22
tol : float, default=1e-3
Control early stopping based on the norm of the differences in the
dictionary between 2 steps.
To disable early stopping based on changes in the dictionary, set
`tol` to 0.0.
.. versionadded:: 1.1
max_no_improvement : int, default=10
Control early stopping based on the consecutive number of mini batches
that does not yield an improvement on the smoothed cost function.
To disable convergence detection based on cost function, set
`max_no_improvement` to None.
.. versionadded:: 1.1
Returns
-------
code : ndarray of shape (n_samples, n_components),
The sparse code (only returned if `return_code=True`).
dictionary : ndarray of shape (n_components, n_features),
The solutions to the dictionary learning problem.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See Also
--------
dict_learning : Solve a dictionary learning matrix factorization problem.
DictionaryLearning : Find a dictionary that sparsely encodes data.
MiniBatchDictionaryLearning : A faster, less accurate, version of the dictionary
learning algorithm.
SparsePCA : Sparse Principal Components Analysis.
MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_sparse_coded_signal
>>> from sklearn.decomposition import dict_learning_online
>>> X, _, _ = make_sparse_coded_signal(
... n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10,
... random_state=42,
... )
>>> U, V = dict_learning_online(
... X, n_components=15, alpha=0.2, max_iter=20, batch_size=3, random_state=42
... )
We can check the level of sparsity of `U`:
>>> np.mean(U == 0)
np.float64(0.53)
We can compare the average squared euclidean norm of the reconstruction
error of the sparse coded signal relative to the squared euclidean norm of
the original signal:
>>> X_hat = U @ V
>>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
np.float64(0.053)
"""
transform_algorithm = "lasso_" + method
est = MiniBatchDictionaryLearning(
n_components=n_components,
alpha=alpha,
max_iter=max_iter,
n_jobs=n_jobs,
fit_algorithm=method,
batch_size=batch_size,
shuffle=shuffle,
dict_init=dict_init,
random_state=random_state,
transform_algorithm=transform_algorithm,
transform_alpha=alpha,
positive_code=positive_code,
positive_dict=positive_dict,
transform_max_iter=method_max_iter,
verbose=verbose,
callback=callback,
tol=tol,
max_no_improvement=max_no_improvement,
).fit(X)
if not return_code:
return est.components_
else:
code = est.transform(X)
return code, est.components_
|
Solve a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. ||.||_Fro stands for
the Frobenius norm and ||.||_1,1 stands for the entry-wise matrix norm
which is the sum of the absolute values of all the entries in the matrix.
This is accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data matrix.
n_components : int or None, default=2
Number of dictionary atoms to extract. If None, then ``n_components``
is set to ``n_features``.
alpha : float, default=1
Sparsity controlling parameter.
max_iter : int, default=100
Maximum number of iterations over the complete dataset before
stopping independently of any early stopping criterion heuristics.
.. versionadded:: 1.1
return_code : bool, default=True
Whether to also return the code U or just the dictionary `V`.
dict_init : ndarray of shape (n_components, n_features), default=None
Initial values for the dictionary for warm restart scenarios.
If `None`, the initial values for the dictionary are created
with an SVD decomposition of the data via
:func:`~sklearn.utils.extmath.randomized_svd`.
callback : callable, default=None
A callable that gets invoked at the end of each iteration.
batch_size : int, default=256
The number of samples to take in each batch.
.. versionchanged:: 1.3
The default value of `batch_size` changed from 3 to 256 in version 1.3.
verbose : bool, default=False
To control the verbosity of the procedure.
shuffle : bool, default=True
Whether to shuffle the data before splitting it in batches.
n_jobs : int, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
method : {'lars', 'cd'}, default='lars'
* `'lars'`: uses the least angle regression method to solve the lasso
problem (`linear_model.lars_path`);
* `'cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). Lars will be faster if
the estimated components are sparse.
random_state : int, RandomState instance or None, default=None
Used for initializing the dictionary when ``dict_init`` is not
specified, randomly shuffling the data when ``shuffle`` is set to
``True``, and updating the dictionary. Pass an int for reproducible
results across multiple function calls.
See :term:`Glossary <random_state>`.
positive_dict : bool, default=False
Whether to enforce positivity when finding the dictionary.
.. versionadded:: 0.20
positive_code : bool, default=False
Whether to enforce positivity when finding the code.
.. versionadded:: 0.20
method_max_iter : int, default=1000
Maximum number of iterations to perform when solving the lasso problem.
.. versionadded:: 0.22
tol : float, default=1e-3
Control early stopping based on the norm of the differences in the
dictionary between 2 steps.
To disable early stopping based on changes in the dictionary, set
`tol` to 0.0.
.. versionadded:: 1.1
max_no_improvement : int, default=10
Control early stopping based on the consecutive number of mini batches
that does not yield an improvement on the smoothed cost function.
To disable convergence detection based on cost function, set
`max_no_improvement` to None.
.. versionadded:: 1.1
Returns
-------
code : ndarray of shape (n_samples, n_components),
The sparse code (only returned if `return_code=True`).
dictionary : ndarray of shape (n_components, n_features),
The solutions to the dictionary learning problem.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See Also
--------
dict_learning : Solve a dictionary learning matrix factorization problem.
DictionaryLearning : Find a dictionary that sparsely encodes data.
MiniBatchDictionaryLearning : A faster, less accurate, version of the dictionary
learning algorithm.
SparsePCA : Sparse Principal Components Analysis.
MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_sparse_coded_signal
>>> from sklearn.decomposition import dict_learning_online
>>> X, _, _ = make_sparse_coded_signal(
... n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10,
... random_state=42,
... )
>>> U, V = dict_learning_online(
... X, n_components=15, alpha=0.2, max_iter=20, batch_size=3, random_state=42
... )
We can check the level of sparsity of `U`:
>>> np.mean(U == 0)
np.float64(0.53)
We can compare the average squared euclidean norm of the reconstruction
error of the sparse coded signal relative to the squared euclidean norm of
the original signal:
>>> X_hat = U @ V
>>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
np.float64(0.053)
|
dict_learning_online
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_dict_learning.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_dict_learning.py
|
BSD-3-Clause
|
def dict_learning(
X,
n_components,
*,
alpha,
max_iter=100,
tol=1e-8,
method="lars",
n_jobs=None,
dict_init=None,
code_init=None,
callback=None,
verbose=False,
random_state=None,
return_n_iter=False,
positive_dict=False,
positive_code=False,
method_max_iter=1000,
):
"""Solve a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. ||.||_Fro stands for
the Frobenius norm and ||.||_1,1 stands for the entry-wise matrix norm
which is the sum of the absolute values of all the entries in the matrix.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data matrix.
n_components : int
Number of dictionary atoms to extract.
alpha : int or float
Sparsity controlling parameter.
max_iter : int, default=100
Maximum number of iterations to perform.
tol : float, default=1e-8
Tolerance for the stopping condition.
method : {'lars', 'cd'}, default='lars'
The method used:
* `'lars'`: uses the least angle regression method to solve the lasso
problem (`linear_model.lars_path`);
* `'cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). Lars will be faster if
the estimated components are sparse.
n_jobs : int, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
dict_init : ndarray of shape (n_components, n_features), default=None
Initial value for the dictionary for warm restart scenarios. Only used
if `code_init` and `dict_init` are not None.
code_init : ndarray of shape (n_samples, n_components), default=None
Initial value for the sparse code for warm restart scenarios. Only used
if `code_init` and `dict_init` are not None.
callback : callable, default=None
Callable that gets invoked every five iterations.
verbose : bool, default=False
To control the verbosity of the procedure.
random_state : int, RandomState instance or None, default=None
Used for randomly initializing the dictionary. Pass an int for
reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
positive_dict : bool, default=False
Whether to enforce positivity when finding the dictionary.
.. versionadded:: 0.20
positive_code : bool, default=False
Whether to enforce positivity when finding the code.
.. versionadded:: 0.20
method_max_iter : int, default=1000
Maximum number of iterations to perform.
.. versionadded:: 0.22
Returns
-------
code : ndarray of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary : ndarray of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors : array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See Also
--------
dict_learning_online : Solve a dictionary learning matrix factorization
problem online.
DictionaryLearning : Find a dictionary that sparsely encodes data.
MiniBatchDictionaryLearning : A faster, less accurate version
of the dictionary learning algorithm.
SparsePCA : Sparse Principal Components Analysis.
MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_sparse_coded_signal
>>> from sklearn.decomposition import dict_learning
>>> X, _, _ = make_sparse_coded_signal(
... n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10,
... random_state=42,
... )
>>> U, V, errors = dict_learning(X, n_components=15, alpha=0.1, random_state=42)
We can check the level of sparsity of `U`:
>>> np.mean(U == 0)
np.float64(0.62)
We can compare the average squared euclidean norm of the reconstruction
error of the sparse coded signal relative to the squared euclidean norm of
the original signal:
>>> X_hat = U @ V
>>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
np.float64(0.0192)
"""
estimator = DictionaryLearning(
n_components=n_components,
alpha=alpha,
max_iter=max_iter,
tol=tol,
fit_algorithm=method,
n_jobs=n_jobs,
dict_init=dict_init,
callback=callback,
code_init=code_init,
verbose=verbose,
random_state=random_state,
positive_code=positive_code,
positive_dict=positive_dict,
transform_max_iter=method_max_iter,
).set_output(transform="default")
code = estimator.fit_transform(X)
if return_n_iter:
return (
code,
estimator.components_,
estimator.error_,
estimator.n_iter_,
)
return code, estimator.components_, estimator.error_
|
Solve a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. ||.||_Fro stands for
the Frobenius norm and ||.||_1,1 stands for the entry-wise matrix norm
which is the sum of the absolute values of all the entries in the matrix.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data matrix.
n_components : int
Number of dictionary atoms to extract.
alpha : int or float
Sparsity controlling parameter.
max_iter : int, default=100
Maximum number of iterations to perform.
tol : float, default=1e-8
Tolerance for the stopping condition.
method : {'lars', 'cd'}, default='lars'
The method used:
* `'lars'`: uses the least angle regression method to solve the lasso
problem (`linear_model.lars_path`);
* `'cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). Lars will be faster if
the estimated components are sparse.
n_jobs : int, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
dict_init : ndarray of shape (n_components, n_features), default=None
Initial value for the dictionary for warm restart scenarios. Only used
if `code_init` and `dict_init` are not None.
code_init : ndarray of shape (n_samples, n_components), default=None
Initial value for the sparse code for warm restart scenarios. Only used
if `code_init` and `dict_init` are not None.
callback : callable, default=None
Callable that gets invoked every five iterations.
verbose : bool, default=False
To control the verbosity of the procedure.
random_state : int, RandomState instance or None, default=None
Used for randomly initializing the dictionary. Pass an int for
reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
positive_dict : bool, default=False
Whether to enforce positivity when finding the dictionary.
.. versionadded:: 0.20
positive_code : bool, default=False
Whether to enforce positivity when finding the code.
.. versionadded:: 0.20
method_max_iter : int, default=1000
Maximum number of iterations to perform.
.. versionadded:: 0.22
Returns
-------
code : ndarray of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary : ndarray of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors : array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See Also
--------
dict_learning_online : Solve a dictionary learning matrix factorization
problem online.
DictionaryLearning : Find a dictionary that sparsely encodes data.
MiniBatchDictionaryLearning : A faster, less accurate version
of the dictionary learning algorithm.
SparsePCA : Sparse Principal Components Analysis.
MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_sparse_coded_signal
>>> from sklearn.decomposition import dict_learning
>>> X, _, _ = make_sparse_coded_signal(
... n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10,
... random_state=42,
... )
>>> U, V, errors = dict_learning(X, n_components=15, alpha=0.1, random_state=42)
We can check the level of sparsity of `U`:
>>> np.mean(U == 0)
np.float64(0.62)
We can compare the average squared euclidean norm of the reconstruction
error of the sparse coded signal relative to the squared euclidean norm of
the original signal:
>>> X_hat = U @ V
>>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
np.float64(0.0192)
|
dict_learning
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_dict_learning.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_dict_learning.py
|
BSD-3-Clause
|
def _transform(self, X, dictionary):
"""Private method allowing to accommodate both DictionaryLearning and
SparseCoder."""
X = validate_data(self, X, reset=False)
if hasattr(self, "alpha") and self.transform_alpha is None:
transform_alpha = self.alpha
else:
transform_alpha = self.transform_alpha
code = sparse_encode(
X,
dictionary,
algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=transform_alpha,
max_iter=self.transform_max_iter,
n_jobs=self.n_jobs,
positive=self.positive_code,
)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
|
Private method allowing to accommodate both DictionaryLearning and
SparseCoder.
|
_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_dict_learning.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_dict_learning.py
|
BSD-3-Clause
|
def transform(self, X):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Transformed data.
"""
check_is_fitted(self)
return self._transform(X, self.components_)
|
Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Transformed data.
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_dict_learning.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_dict_learning.py
|
BSD-3-Clause
|
def _inverse_transform(self, code, dictionary):
"""Private method allowing to accommodate both DictionaryLearning and
SparseCoder."""
code = check_array(code)
# compute number of expected features in code
expected_n_components = dictionary.shape[0]
if self.split_sign:
expected_n_components += expected_n_components
if not code.shape[1] == expected_n_components:
raise ValueError(
"The number of components in the code is different from the "
"number of components in the dictionary."
f"Expected {expected_n_components}, got {code.shape[1]}."
)
if self.split_sign:
n_samples, n_features = code.shape
n_features //= 2
code = code[:, :n_features] - code[:, n_features:]
return code @ dictionary
|
Private method allowing to accommodate both DictionaryLearning and
SparseCoder.
|
_inverse_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_dict_learning.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_dict_learning.py
|
BSD-3-Clause
|
def inverse_transform(self, X):
"""Transform data back to its original space.
Parameters
----------
X : array-like of shape (n_samples, n_components)
Data to be transformed back. Must have the same number of
components as the data used to train the model.
Returns
-------
X_original : ndarray of shape (n_samples, n_features)
Transformed data.
"""
check_is_fitted(self)
return self._inverse_transform(X, self.components_)
|
Transform data back to its original space.
Parameters
----------
X : array-like of shape (n_samples, n_components)
Data to be transformed back. Must have the same number of
components as the data used to train the model.
Returns
-------
X_original : ndarray of shape (n_samples, n_features)
Transformed data.
|
inverse_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_dict_learning.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_dict_learning.py
|
BSD-3-Clause
|
def fit_transform(self, X, y=None):
"""Fit the model from data in X and return the transformed data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
V : ndarray of shape (n_samples, n_components)
Transformed data.
"""
_check_positive_coding(method=self.fit_algorithm, positive=self.positive_code)
method = "lasso_" + self.fit_algorithm
random_state = check_random_state(self.random_state)
X = validate_data(self, X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E, self.n_iter_ = _dict_learning(
X,
n_components,
alpha=self.alpha,
tol=self.tol,
max_iter=self.max_iter,
method=method,
method_max_iter=self.transform_max_iter,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
callback=self.callback,
verbose=self.verbose,
random_state=random_state,
return_n_iter=True,
positive_dict=self.positive_dict,
positive_code=self.positive_code,
)
self.components_ = U
self.error_ = E
return V
|
Fit the model from data in X and return the transformed data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
V : ndarray of shape (n_samples, n_components)
Transformed data.
|
fit_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_dict_learning.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_dict_learning.py
|
BSD-3-Clause
|
def _minibatch_step(self, X, dictionary, random_state, step):
"""Perform the update on the dictionary for one minibatch."""
batch_size = X.shape[0]
# Compute code for this batch
code = _sparse_encode(
X,
dictionary,
algorithm=self._fit_algorithm,
alpha=self.alpha,
n_jobs=self.n_jobs,
positive=self.positive_code,
max_iter=self.transform_max_iter,
verbose=self.verbose,
)
batch_cost = (
0.5 * ((X - code @ dictionary) ** 2).sum()
+ self.alpha * np.sum(np.abs(code))
) / batch_size
# Update inner stats
self._update_inner_stats(X, code, batch_size, step)
# Update dictionary
_update_dict(
dictionary,
X,
code,
self._A,
self._B,
verbose=self.verbose,
random_state=random_state,
positive=self.positive_dict,
)
return batch_cost
|
Perform the update on the dictionary for one minibatch.
|
_minibatch_step
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_dict_learning.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_dict_learning.py
|
BSD-3-Clause
|
def _check_convergence(
self, X, batch_cost, new_dict, old_dict, n_samples, step, n_steps
):
"""Helper function to encapsulate the early stopping logic.
Early stopping is based on two factors:
- A small change of the dictionary between two minibatch updates. This is
controlled by the tol parameter.
- No more improvement on a smoothed estimate of the objective function for a
a certain number of consecutive minibatch updates. This is controlled by
the max_no_improvement parameter.
"""
batch_size = X.shape[0]
# counts steps starting from 1 for user friendly verbose mode.
step = step + 1
# Ignore 100 first steps or 1 epoch to avoid initializing the ewa_cost with a
# too bad value
if step <= min(100, n_samples / batch_size):
if self.verbose:
print(f"Minibatch step {step}/{n_steps}: mean batch cost: {batch_cost}")
return False
# Compute an Exponentially Weighted Average of the cost function to
# monitor the convergence while discarding minibatch-local stochastic
# variability: https://en.wikipedia.org/wiki/Moving_average
if self._ewa_cost is None:
self._ewa_cost = batch_cost
else:
alpha = batch_size / (n_samples + 1)
alpha = min(alpha, 1)
self._ewa_cost = self._ewa_cost * (1 - alpha) + batch_cost * alpha
if self.verbose:
print(
f"Minibatch step {step}/{n_steps}: mean batch cost: "
f"{batch_cost}, ewa cost: {self._ewa_cost}"
)
# Early stopping based on change of dictionary
dict_diff = linalg.norm(new_dict - old_dict) / self._n_components
if self.tol > 0 and dict_diff <= self.tol:
if self.verbose:
print(f"Converged (small dictionary change) at step {step}/{n_steps}")
return True
# Early stopping heuristic due to lack of improvement on smoothed
# cost function
if self._ewa_cost_min is None or self._ewa_cost < self._ewa_cost_min:
self._no_improvement = 0
self._ewa_cost_min = self._ewa_cost
else:
self._no_improvement += 1
if (
self.max_no_improvement is not None
and self._no_improvement >= self.max_no_improvement
):
if self.verbose:
print(
"Converged (lack of improvement in objective function) "
f"at step {step}/{n_steps}"
)
return True
return False
|
Helper function to encapsulate the early stopping logic.
Early stopping is based on two factors:
- A small change of the dictionary between two minibatch updates. This is
controlled by the tol parameter.
- No more improvement on a smoothed estimate of the objective function for a
a certain number of consecutive minibatch updates. This is controlled by
the max_no_improvement parameter.
|
_check_convergence
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_dict_learning.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_dict_learning.py
|
BSD-3-Clause
|
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
X = validate_data(
self, X, dtype=[np.float64, np.float32], order="C", copy=False
)
self._check_params(X)
self._random_state = check_random_state(self.random_state)
dictionary = self._initialize_dict(X, self._random_state)
old_dict = dictionary.copy()
if self.shuffle:
X_train = X.copy()
self._random_state.shuffle(X_train)
else:
X_train = X
n_samples, n_features = X_train.shape
if self.verbose:
print("[dict_learning]")
# Inner stats
self._A = np.zeros(
(self._n_components, self._n_components), dtype=X_train.dtype
)
self._B = np.zeros((n_features, self._n_components), dtype=X_train.dtype)
# Attributes to monitor the convergence
self._ewa_cost = None
self._ewa_cost_min = None
self._no_improvement = 0
batches = gen_batches(n_samples, self._batch_size)
batches = itertools.cycle(batches)
n_steps_per_iter = int(np.ceil(n_samples / self._batch_size))
n_steps = self.max_iter * n_steps_per_iter
i = -1 # to allow max_iter = 0
for i, batch in zip(range(n_steps), batches):
X_batch = X_train[batch]
batch_cost = self._minibatch_step(
X_batch, dictionary, self._random_state, i
)
if self._check_convergence(
X_batch, batch_cost, dictionary, old_dict, n_samples, i, n_steps
):
break
# XXX callback param added for backward compat in #18975 but a common
# unified callback API should be preferred
if self.callback is not None:
self.callback(locals())
old_dict[:] = dictionary
self.n_steps_ = i + 1
self.n_iter_ = np.ceil(self.n_steps_ / n_steps_per_iter)
self.components_ = dictionary
return self
|
Fit the model from data in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_dict_learning.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_dict_learning.py
|
BSD-3-Clause
|
def partial_fit(self, X, y=None):
"""Update the model using the data in X as a mini-batch.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Return the instance itself.
"""
has_components = hasattr(self, "components_")
X = validate_data(
self, X, dtype=[np.float64, np.float32], order="C", reset=not has_components
)
if not has_components:
# This instance has not been fitted yet (fit or partial_fit)
self._check_params(X)
self._random_state = check_random_state(self.random_state)
dictionary = self._initialize_dict(X, self._random_state)
self.n_steps_ = 0
self._A = np.zeros((self._n_components, self._n_components), dtype=X.dtype)
self._B = np.zeros((X.shape[1], self._n_components), dtype=X.dtype)
else:
dictionary = self.components_
self._minibatch_step(X, dictionary, self._random_state, self.n_steps_)
self.components_ = dictionary
self.n_steps_ += 1
return self
|
Update the model using the data in X as a mini-batch.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Return the instance itself.
|
partial_fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_dict_learning.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_dict_learning.py
|
BSD-3-Clause
|
def fit(self, X, y=None):
"""Fit the FactorAnalysis model to X using SVD based approach.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : Ignored
Ignored parameter.
Returns
-------
self : object
FactorAnalysis class instance.
"""
X = validate_data(
self, X, copy=self.copy, dtype=np.float64, force_writeable=True
)
n_samples, n_features = X.shape
n_components = self.n_components
if n_components is None:
n_components = n_features
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
# some constant terms
nsqrt = sqrt(n_samples)
llconst = n_features * log(2.0 * np.pi) + n_components
var = np.var(X, axis=0)
if self.noise_variance_init is None:
psi = np.ones(n_features, dtype=X.dtype)
else:
if len(self.noise_variance_init) != n_features:
raise ValueError(
"noise_variance_init dimension does not "
"with number of features : %d != %d"
% (len(self.noise_variance_init), n_features)
)
psi = np.array(self.noise_variance_init)
loglike = []
old_ll = -np.inf
SMALL = 1e-12
# we'll modify svd outputs to return unexplained variance
# to allow for unified computation of loglikelihood
if self.svd_method == "lapack":
def my_svd(X):
_, s, Vt = linalg.svd(X, full_matrices=False, check_finite=False)
return (
s[:n_components],
Vt[:n_components],
squared_norm(s[n_components:]),
)
else: # svd_method == "randomized"
random_state = check_random_state(self.random_state)
def my_svd(X):
_, s, Vt = _randomized_svd(
X,
n_components,
random_state=random_state,
n_iter=self.iterated_power,
)
return s, Vt, squared_norm(X) - squared_norm(s)
for i in range(self.max_iter):
# SMALL helps numerics
sqrt_psi = np.sqrt(psi) + SMALL
s, Vt, unexp_var = my_svd(X / (sqrt_psi * nsqrt))
s **= 2
# Use 'maximum' here to avoid sqrt problems.
W = np.sqrt(np.maximum(s - 1.0, 0.0))[:, np.newaxis] * Vt
del Vt
W *= sqrt_psi
# loglikelihood
ll = llconst + np.sum(np.log(s))
ll += unexp_var + np.sum(np.log(psi))
ll *= -n_samples / 2.0
loglike.append(ll)
if (ll - old_ll) < self.tol:
break
old_ll = ll
psi = np.maximum(var - np.sum(W**2, axis=0), SMALL)
else:
warnings.warn(
"FactorAnalysis did not converge."
" You might want"
" to increase the number of iterations.",
ConvergenceWarning,
)
self.components_ = W
if self.rotation is not None:
self.components_ = self._rotate(W)
self.noise_variance_ = psi
self.loglike_ = loglike
self.n_iter_ = i + 1
return self
|
Fit the FactorAnalysis model to X using SVD based approach.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : Ignored
Ignored parameter.
Returns
-------
self : object
FactorAnalysis class instance.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_factor_analysis.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_factor_analysis.py
|
BSD-3-Clause
|
def transform(self, X):
"""Apply dimensionality reduction to X using the model.
Compute the expected mean of the latent variables.
See Barber, 21.2.33 (or Bishop, 12.66).
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
The latent variables of X.
"""
check_is_fitted(self)
X = validate_data(self, X, reset=False)
Ih = np.eye(len(self.components_))
X_transformed = X - self.mean_
Wpsi = self.components_ / self.noise_variance_
cov_z = linalg.inv(Ih + np.dot(Wpsi, self.components_.T))
tmp = np.dot(X_transformed, Wpsi.T)
X_transformed = np.dot(tmp, cov_z)
return X_transformed
|
Apply dimensionality reduction to X using the model.
Compute the expected mean of the latent variables.
See Barber, 21.2.33 (or Bishop, 12.66).
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
The latent variables of X.
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/_factor_analysis.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/_factor_analysis.py
|
BSD-3-Clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.