|
--- |
|
dataset_info: |
|
- config_name: default |
|
features: |
|
- name: utterance |
|
dtype: string |
|
- name: label |
|
sequence: int64 |
|
splits: |
|
- name: train |
|
num_bytes: 380277965 |
|
num_examples: 53203 |
|
- name: validation |
|
num_bytes: 40200731 |
|
num_examples: 4834 |
|
- name: test |
|
num_bytes: 57450762 |
|
num_examples: 4774 |
|
download_size: 389851249 |
|
dataset_size: 477929458 |
|
- config_name: intents |
|
features: |
|
- name: id |
|
dtype: int64 |
|
- name: name |
|
dtype: 'null' |
|
- name: tags |
|
sequence: 'null' |
|
- name: regex_full_match |
|
sequence: 'null' |
|
- name: regex_partial_match |
|
sequence: 'null' |
|
- name: description |
|
dtype: 'null' |
|
splits: |
|
- name: intents |
|
num_bytes: 220 |
|
num_examples: 11 |
|
download_size: 3329 |
|
dataset_size: 220 |
|
- config_name: intentsqwen3-32b |
|
features: |
|
- name: id |
|
dtype: int64 |
|
- name: name |
|
dtype: 'null' |
|
- name: tags |
|
sequence: 'null' |
|
- name: regex_full_match |
|
sequence: 'null' |
|
- name: regex_partial_match |
|
sequence: 'null' |
|
- name: description |
|
dtype: string |
|
splits: |
|
- name: intents |
|
num_bytes: 1560 |
|
num_examples: 11 |
|
download_size: 4185 |
|
dataset_size: 1560 |
|
configs: |
|
- config_name: default |
|
data_files: |
|
- split: train |
|
path: data/train-* |
|
- split: test |
|
path: data/test-* |
|
- split: validation |
|
path: data/validation-* |
|
- config_name: intents |
|
data_files: |
|
- split: intents |
|
path: intents/intents-* |
|
- config_name: intentsqwen3-32b |
|
data_files: |
|
- split: intents |
|
path: intentsqwen3-32b/intents-* |
|
task_categories: |
|
- text-classification |
|
language: |
|
- en |
|
--- |
|
|
|
# eurlex |
|
|
|
This is a text classification dataset. It is intended for machine learning research and experimentation. |
|
|
|
This dataset is obtained via formatting another publicly available data to be compatible with our [AutoIntent Library](https://deeppavlov.github.io/AutoIntent/index.html). |
|
|
|
## Usage |
|
|
|
It is intended to be used with our [AutoIntent Library](https://deeppavlov.github.io/AutoIntent/index.html): |
|
|
|
```python |
|
from autointent import Dataset |
|
|
|
eurlex = Dataset.from_hub("AutoIntent/eurlex") |
|
``` |
|
|
|
## Source |
|
|
|
This dataset is taken from `coastalcph/multi_eurlex` and formatted with our [AutoIntent Library](https://deeppavlov.github.io/AutoIntent/index.html): |
|
|
|
```python |
|
import datasets |
|
from autointent import Dataset |
|
|
|
|
|
def get_number_of_classes(ds: datasets.Dataset) -> int: |
|
return len(set(i for example in ds for labels in example for i in labels)) |
|
|
|
|
|
def parse(ds: datasets.Dataset, n_classes: int) -> datasets.Dataset: |
|
def transform(example: dict): |
|
return {"utterance": example["text"], "label": [int(i in example["labels"]) for i in range(n_classes)]} |
|
return ds.map(transform, remove_columns=ds.features.keys()) |
|
|
|
|
|
def get_low_resource_classes_mask(ds: datasets.Dataset, n_classes: int, fraction_thresh: float = 0.01) -> list[bool]: |
|
res = [0] * n_classes |
|
for sample in ds: |
|
for i, indicator in enumerate(sample["label"]): |
|
res[i] += indicator |
|
for i in range(n_classes): |
|
res[i] /= len(ds) |
|
return [(frac < fraction_thresh) for frac in res] |
|
|
|
|
|
def remove_low_resource_classes(ds: datasets.Dataset, mask: list[bool]) -> list[dict]: |
|
res = [] |
|
for sample in ds: |
|
if sum(sample["label"]) == 1 and mask[sample["label"].index(1)]: |
|
continue |
|
sample["label"] = [ |
|
indicator for indicator, low_resource in |
|
zip(sample["label"], mask, strict=True) if not low_resource |
|
] |
|
res.append(sample) |
|
return res |
|
|
|
|
|
def remove_oos(ds: list[dict]): |
|
return [sample for sample in ds if sum(sample["label"]) != 0] |
|
|
|
|
|
if __name__ == "__main__": |
|
eurlex = datasets.load_dataset("coastalcph/multi_eurlex", "en", trust_remote_code=True) |
|
|
|
n_classes = get_number_of_classes(eurlex["train"]) |
|
|
|
train = parse(eurlex["train"], n_classes) |
|
test = parse(eurlex["test"], n_classes) |
|
validation = parse(eurlex["validation"], n_classes) |
|
|
|
mask = get_low_resource_classes_mask(train, n_classes) |
|
train = remove_oos(remove_low_resource_classes(train, mask)) |
|
test = remove_oos(remove_low_resource_classes(test, mask)) |
|
validation = remove_oos(remove_low_resource_classes(validation, mask)) |
|
|
|
eurlex_converted = Dataset.from_dict({ |
|
"train": train, |
|
"test": test, |
|
"validation": validation, |
|
}) |
|
``` |
|
|