Datasets:
Update README.md
Browse files
README.md
CHANGED
@@ -78,32 +78,37 @@ massive_ru = Dataset.from_datasets("AutoIntent/massive_ru")
|
|
78 |
This dataset is taken from `mteb/amazon_massive_intent` and formatted with our [AutoIntent Library](https://deeppavlov.github.io/AutoIntent/index.html):
|
79 |
|
80 |
```python
|
|
|
81 |
from datasets import load_dataset
|
82 |
|
83 |
-
|
84 |
-
|
85 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
n_classes = len(intent_names)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
for batch in massive_train.iter(batch_size=16, drop_last_batch=False):
|
99 |
-
for txt, name in zip(batch["text"], batch["label"], strict=False):
|
100 |
-
intent_id = name_to_id[name]
|
101 |
-
target_list = classwise_utterance_records[intent_id]
|
102 |
-
target_list.append({"utterance": txt, "label": intent_id})
|
103 |
-
|
104 |
-
utterances = [rec for lst in classwise_utterance_records for rec in lst]
|
105 |
-
return Dataset.from_dict({"intents": intents, "train": utterances})
|
106 |
-
|
107 |
-
massive = load_dataset("mteb/amazon_massive_intent", "ru")
|
108 |
-
massive_converted = convert_massive(massive["train"])
|
109 |
```
|
|
|
78 |
This dataset is taken from `mteb/amazon_massive_intent` and formatted with our [AutoIntent Library](https://deeppavlov.github.io/AutoIntent/index.html):
|
79 |
|
80 |
```python
|
81 |
+
from datasets import Dataset as HFDataset
|
82 |
from datasets import load_dataset
|
83 |
|
84 |
+
from autointent import Dataset
|
85 |
+
from autointent.schemas import Intent, Sample
|
86 |
+
|
87 |
+
|
88 |
+
def extract_intents_info(split: HFDataset) -> tuple[list[Intent], dict[str, int]]:
|
89 |
+
"""Extract metadata."""
|
90 |
+
intent_names = sorted(split.unique("label"))
|
91 |
+
intent_names.remove("cooking_query")
|
92 |
+
intent_names.remove("audio_volume_other")
|
93 |
n_classes = len(intent_names)
|
94 |
+
name_to_id = dict(zip(intent_names, range(n_classes), strict=False))
|
95 |
+
intents_data = [Intent(id=i, name=intent_names[i]) for i in range(n_classes)]
|
96 |
+
return intents_data, name_to_id
|
97 |
+
|
98 |
+
|
99 |
+
def convert_massive(split: HFDataset, name_to_id: dict[str, int]) -> list[Sample]:
|
100 |
+
"""Extract utterances and labels."""
|
101 |
+
return [Sample(utterance=s["text"], label=name_to_id[s["label"]]) for s in split if s["label"] in name_to_id]
|
102 |
+
|
103 |
|
104 |
+
if __name__ == "__main__":
|
105 |
+
massive = load_dataset("mteb/amazon_massive_intent", "ru")
|
106 |
+
intents, name_to_id = extract_intents_info(massive["train"])
|
107 |
+
train_samples = convert_massive(massive["train"], name_to_id)
|
108 |
+
test_samples = convert_massive(massive["test"], name_to_id)
|
109 |
+
validation_samples = convert_massive(massive["validation"], name_to_id)
|
110 |
+
dataset = Dataset.from_dict(
|
111 |
+
{"intents": intents, "train": train_samples, "test": test_samples, "validation": validation_samples}
|
112 |
+
)
|
113 |
+
dataset.to_json("data/massive_ru.json")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
```
|