|
"""Inspec benchmark dataset for keyphrase extraction an generation.""" |
|
|
|
import csv |
|
import json |
|
import os |
|
import datasets |
|
|
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{bougouin-etal-2016-termith, |
|
title = "{T}erm{ITH}-Eval: a {F}rench Standard-Based Resource for Keyphrase Extraction Evaluation", |
|
author = "Bougouin, Adrien and |
|
Barreaux, Sabine and |
|
Romary, Laurent and |
|
Boudin, Florian and |
|
Daille, B{\'e}atrice", |
|
booktitle = "Proceedings of the Tenth International Conference on Language Resources and Evaluation ({LREC}'16)", |
|
month = may, |
|
year = "2016", |
|
address = "Portoro{\v{z}}, Slovenia", |
|
publisher = "European Language Resources Association (ELRA)", |
|
url = "https://aclanthology.org/L16-1304", |
|
pages = "1924--1927", |
|
abstract = "Keyphrase extraction is the task of finding phrases that represent the important content of a document. The main aim of keyphrase extraction is to propose textual units that represent the most important topics developed in a document. The output keyphrases of automatic keyphrase extraction methods for test documents are typically evaluated by comparing them to manually assigned reference keyphrases. Each output keyphrase is considered correct if it matches one of the reference keyphrases. However, the choice of the appropriate textual unit (keyphrase) for a topic is sometimes subjective and evaluating by exact matching underestimates the performance. This paper presents a dataset of evaluation scores assigned to automatically extracted keyphrases by human evaluators. Along with the reference keyphrases, the manual evaluations can be used to validate new evaluation measures. Indeed, an evaluation measure that is highly correlated to the manual evaluation is appropriate for the evaluation of automatic keyphrase extraction methods.", |
|
} |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
TermITH-Eval benchmark dataset for keyphrase extraction an generation. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "https://aclanthology.org/L16-1304.pdf" |
|
|
|
|
|
_LICENSE = "Apache 2.0 License" |
|
|
|
|
|
|
|
|
|
_URLS = { |
|
"test": "test.jsonl" |
|
} |
|
|
|
|
|
class Wikinews(datasets.GeneratorBasedBuilder): |
|
"""TODO: Short description of my dataset.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="raw", version=VERSION, description="This part of my dataset covers the raw data."), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "raw" |
|
|
|
def _info(self): |
|
|
|
if self.config.name == "raw": |
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"title": datasets.Value("string"), |
|
"abstract": datasets.Value("string"), |
|
"keyphrases": datasets.features.Sequence(datasets.Value("string")), |
|
"prmu": datasets.features.Sequence(datasets.Value("string")), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
|
|
|
|
|
|
|
|
urls = _URLS |
|
data_dir = dl_manager.download_and_extract(urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir["test"]), |
|
"split": "test" |
|
}, |
|
), |
|
] |
|
|
|
|
|
def _generate_examples(self, filepath, split): |
|
|
|
|
|
with open(filepath, encoding="utf-8") as f: |
|
for key, row in enumerate(f): |
|
data = json.loads(row) |
|
|
|
yield key, { |
|
"id": data["id"], |
|
"title": data["title"], |
|
"abstract": data["abstract"], |
|
"keyphrases": data["keyphrases"], |
|
"prmu": data["prmu"], |
|
} |
|
|
|
|