|
|
|
import datasets |
|
import json |
|
|
|
|
|
class CustomUnitConversionDataset(datasets.GeneratorBasedBuilder): |
|
|
|
def __init__(self, *args, **kwargs): |
|
super().__init__(*args, **kwargs) |
|
self.urls = { |
|
"train": "https://huggingface.co/datasets/maliknaik/natural_unit_conversion/resolve/main/train.json", |
|
"test": "https://huggingface.co/datasets/maliknaik/natural_unit_conversion/resolve/main/test.json", |
|
"val": "https://huggingface.co/datasets/maliknaik/natural_unit_conversion/resolve/main/val.json", |
|
} |
|
|
|
def _info(self): |
|
_DESCRIPTION = """This dataset contains unit conversion requests, where each example includes a sentence with associated entities (in spaCy-supported format) for Named-Entity Recognition (NER) modeling. The entities represent the values and units being converted. The goal is to aid in developing systems capable of extracting unit conversion data from natural language for natural language understanding. |
|
""" |
|
_CITATION = '''@misc{unit-conversion-dataset, |
|
author = {Malik N. Mohammed}, |
|
title = {Natural Language Unit Conversion Dataset for Named-Entity Recognition}, |
|
year = {2025}, |
|
publisher = {HuggingFace}, |
|
journal = {HuggingFace repository} |
|
howpublished = {\\url{https://huggingface.co/datasets/maliknaik/natural_unit_conversion}} |
|
} |
|
''' |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"text": datasets.Value("string"), |
|
"entities": datasets.features.Sequence( |
|
{ |
|
"start": datasets.Value("int32"), |
|
"end": datasets.Value("int32"), |
|
"tag": datasets.Value("string") |
|
} |
|
) |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage='https://huggingface.co/datasets/maliknaik/natural_unit_conversion', |
|
citation=_CITATION |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
downloaded_files = dl_manager.download_and_extract(self.urls) |
|
|
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files['train']}), |
|
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files['test']}), |
|
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files['val']}), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
with open(filepath, "r") as f: |
|
data = json.load(f) |
|
|
|
for i, example in enumerate(data): |
|
yield i, example |
|
|
|
|