cscomm / cscomm.py
ekojs's picture
Upload cscomm.py
b89b73b
import json
import datasets
from pathlib import Path
import pandas as pd
BASE_DATA_PATH = Path("./data")
class CSCOMMConfig(datasets.BuilderConfig):
"""BuilderConfig for CSCOMM."""
def __init__(self, key, pretraining=False, data_path="./data", **kwargs):
"""BuilderConfig for CSCOMM.
Args:
key: `string`
**kwargs: keyword arguments forwarded to super.
"""
# Version history:
# 0.0.1: Initial version.
super(CSCOMMConfig, self).__init__(version=datasets.Version("0.0.1"), **kwargs)
self.key = key
self.pretraining = pretraining
class CSCOMM(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
CSCOMMConfig(
name="AP",
key="ap"
),
CSCOMMConfig(
name="AP+P",
key="ap_p"
),
CSCOMMConfig(
name="AP+J",
key="ap_j"
),
CSCOMMConfig(
name="AP+PJ",
key="ap_pj"
),
CSCOMMConfig(
name="BA",
key="ba"
),
CSCOMMConfig(
name="BA+P",
key="ba_p"
),
CSCOMMConfig(
name="BA+J",
key="ba_j"
),
CSCOMMConfig(
name="BA+PJ",
key="ba_pj"
),
CSCOMMConfig(
name="pretrain-unlabeled",
key="pt_un",
pretraining=True
),
CSCOMMConfig(
name="pretrain-labeled",
key="pt_la",
pretraining=True
),
CSCOMMConfig(
name="pretrain-both",
key="pt_unla",
pretraining=True
),
]
def _info(self):
features = {
"round_id": datasets.Value("string"),
"source": datasets.Value("string")
}
if not self.config.pretraining:
features["commentary"] = datasets.Value("string")
return datasets.DatasetInfo(
features=datasets.Features(features),
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract({
"train": f"./data/{self.config.key}/train.csv",
"valid": f"./data/{self.config.key}/valid.csv",
"test": f"./data/{self.config.key}/test.csv"
})
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_file": dl_dir["train"],
"split": datasets.Split.TRAIN,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"data_file": dl_dir["valid"],
"split": datasets.Split.VALIDATION,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"data_file": dl_dir["test"],
"split": datasets.Split.TEST,
},
),
]
def _generate_examples(self, data_file, split):
df = pd.read_csv(data_file)
for i, row in enumerate(df.itertuples()):
example = {"round_id": row.round_id, "source": row.source}
if not self.config.pretraining:
example["commentary"] = row.commentary
yield i, example