Update readme
Browse files- README.md +12 -8
- natural_unit_conversion-custom.py +63 -0
README.md
CHANGED
@@ -57,17 +57,21 @@ The data is structured with the following fields:
|
|
57 |
[14, 18, "UNIT_VALUE"],
|
58 |
[19, 22, "FROM_UNIT"]
|
59 |
]
|
60 |
-
},
|
61 |
-
{
|
62 |
-
"text": "Transform 2562 kwh into ft lb",
|
63 |
-
"entities": [
|
64 |
-
[24, 29, "TO_UNIT"],
|
65 |
-
[10, 14, "UNIT_VALUE"],
|
66 |
-
[15, 18, "FROM_UNIT"]]
|
67 |
-
}
|
68 |
]
|
69 |
```
|
70 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
### Entity Types
|
72 |
- **UNIT_VALUE**: Represents the value to be converted.
|
73 |
- **FROM_UNIT**: The unit from which the conversion is being made.
|
|
|
57 |
[14, 18, "UNIT_VALUE"],
|
58 |
[19, 22, "FROM_UNIT"]
|
59 |
]
|
60 |
+
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
]
|
62 |
```
|
63 |
|
64 |
+
### Loading Dataset
|
65 |
+
You can load the dataset `maliknaik/natural_unit_conversion` using the following code:
|
66 |
+
|
67 |
+
```python
|
68 |
+
from datasets import load_dataset
|
69 |
+
|
70 |
+
dataset_name = "maliknaik/natural_unit_conversion"
|
71 |
+
dataset = load_dataset(dataset_name)
|
72 |
+
```
|
73 |
+
|
74 |
+
|
75 |
### Entity Types
|
76 |
- **UNIT_VALUE**: Represents the value to be converted.
|
77 |
- **FROM_UNIT**: The unit from which the conversion is being made.
|
natural_unit_conversion-custom.py
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import datasets
|
3 |
+
import json
|
4 |
+
|
5 |
+
|
6 |
+
class CustomUnitConversionDataset(datasets.GeneratorBasedBuilder):
|
7 |
+
|
8 |
+
def __init__(self, *args, **kwargs):
|
9 |
+
super().__init__(*args, **kwargs)
|
10 |
+
self.urls = {
|
11 |
+
"train": "https://huggingface.co/datasets/maliknaik/natural_unit_conversion/resolve/main/train.json",
|
12 |
+
"test": "https://huggingface.co/datasets/maliknaik/natural_unit_conversion/resolve/main/test.json",
|
13 |
+
"val": "https://huggingface.co/datasets/maliknaik/natural_unit_conversion/resolve/main/val.json",
|
14 |
+
}
|
15 |
+
|
16 |
+
def _info(self):
|
17 |
+
_DESCRIPTION = """This dataset contains unit conversion requests, where each example includes a sentence with associated entities (in spaCy-supported format) for Named-Entity Recognition (NER) modeling. The entities represent the values and units being converted. The goal is to aid in developing systems capable of extracting unit conversion data from natural language for natural language understanding.
|
18 |
+
"""
|
19 |
+
_CITATION = '''@misc{unit-conversion-dataset,
|
20 |
+
author = {Malik N. Mohammed},
|
21 |
+
title = {Natural Language Unit Conversion Dataset for Named-Entity Recognition},
|
22 |
+
year = {2025},
|
23 |
+
publisher = {HuggingFace},
|
24 |
+
journal = {HuggingFace repository}
|
25 |
+
howpublished = {\\url{https://huggingface.co/datasets/maliknaik/natural_unit_conversion}}
|
26 |
+
}
|
27 |
+
'''
|
28 |
+
|
29 |
+
return datasets.DatasetInfo(
|
30 |
+
description=_DESCRIPTION,
|
31 |
+
features=datasets.Features(
|
32 |
+
{
|
33 |
+
"text": datasets.Value("string"),
|
34 |
+
"entities": datasets.features.Sequence(
|
35 |
+
{
|
36 |
+
"start": datasets.Value("int32"),
|
37 |
+
"end": datasets.Value("int32"),
|
38 |
+
"tag": datasets.Value("string")
|
39 |
+
}
|
40 |
+
)
|
41 |
+
}
|
42 |
+
),
|
43 |
+
supervised_keys=None,
|
44 |
+
homepage='https://huggingface.co/datasets/maliknaik/natural_unit_conversion',
|
45 |
+
citation=_CITATION
|
46 |
+
)
|
47 |
+
|
48 |
+
def _split_generators(self, dl_manager):
|
49 |
+
downloaded_files = dl_manager.download_and_extract(self.urls)
|
50 |
+
|
51 |
+
return [
|
52 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files['train']}),
|
53 |
+
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files['test']}),
|
54 |
+
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files['val']}),
|
55 |
+
]
|
56 |
+
|
57 |
+
def _generate_examples(self, filepath):
|
58 |
+
with open(filepath, "r") as f:
|
59 |
+
data = json.load(f)
|
60 |
+
|
61 |
+
for i, example in enumerate(data):
|
62 |
+
yield i, example
|
63 |
+
|