| import json | |
| import os | |
| import datasets | |
| class ERRNewsConfig(datasets.BuilderConfig): | |
| def __init__(self, data_url, features, **kwargs): | |
| super().__init__(version=datasets.Version("1.0.0"), **kwargs) | |
| self.features = features | |
| self.data_url = data_url | |
| class ERRNews(datasets.GeneratorBasedBuilder): | |
| features = ["transcript", "summary", "id"] | |
| data_url = "https://cs.taltech.ee/staff/heharm/AMIsum/" | |
| BUILDER_CONFIGS = [ | |
| ERRNewsConfig( | |
| name="full", | |
| features=features, | |
| data_url=data_url | |
| ) | |
| ] | |
| DEFAULT_CONFIG_NAME = "full" | |
| def _info(self): | |
| features = datasets.Features( | |
| { | |
| "transcript": datasets.Value("string"), | |
| "summary": datasets.Value("string"), | |
| "id": datasets.Value("string"), | |
| }) | |
| description = """\ | |
| AMI Summarization Dataset (AMIsum) is a meeting summarization dataset, consisting of meeting transcripts \ | |
| and abstract summaries from the AMI Corpus: https://groups.inf.ed.ac.uk/ami/corpus/. | |
| """ | |
| return datasets.DatasetInfo( | |
| features=features, | |
| description=description, | |
| supervised_keys=None, | |
| version=self.config.version, | |
| ) | |
| def _split_generators(self, dl_manager): | |
| """Returns SplitGenerators.""" | |
| train = "train.json" | |
| test = "test.json" | |
| val = "val.json" | |
| return [ | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TRAIN, | |
| gen_kwargs={ | |
| "file_path": dl_manager.download(self.config.data_url + train), | |
| }, | |
| ), | |
| datasets.SplitGenerator( | |
| name=datasets.Split.VALIDATION, | |
| gen_kwargs={ | |
| "file_path": dl_manager.download(self.config.data_url + val), | |
| }, | |
| ), | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TEST, | |
| gen_kwargs={ | |
| "file_path": dl_manager.download(self.config.data_url + test), | |
| }, | |
| ), | |
| ] | |
| def create_dict(self, data): | |
| res = dict() | |
| for key in self.config.features: | |
| res[key] = data[key] | |
| return res | |
| def _generate_examples(self, file_path): | |
| with open(file_path) as f: | |
| data = json.load(f) | |
| for idx, transcript in enumerate(data["transcript"]): | |
| id_ = data["id"][idx] | |
| yield id_, { | |
| "transcript": transcript, | |
| "summary": data["summary"][idx], | |
| "id": id_, | |
| } | |