Commit
·
90c562e
1
Parent(s):
3ab7660
feat
Browse files- VieGLUE.py +7 -75
VieGLUE.py
CHANGED
@@ -99,80 +99,7 @@ _DOWNLOAD_URL = {
|
|
99 |
"validation": [os.path.join("data", "wnli", "validation.tar.gz")],
|
100 |
},
|
101 |
}
|
102 |
-
|
103 |
-
# name="cola",
|
104 |
-
# description=textwrap.dedent(
|
105 |
-
# """\
|
106 |
-
# The Corpus of Linguistic Acceptability consists of English
|
107 |
-
# acceptability judgments drawn from books and journal articles on
|
108 |
-
# linguistic theory. Each example is a sequence of words annotated
|
109 |
-
# with whether it is a grammatical English sentence."""
|
110 |
-
# ),
|
111 |
-
# text_features={"sentence": "sentence"},
|
112 |
-
# label_classes=["unacceptable", "acceptable"],
|
113 |
-
# label_column="is_acceptable",
|
114 |
-
# data_url="https://dl.fbaipublicfiles.com/glue/data/CoLA.zip",
|
115 |
-
# data_dir="CoLA",
|
116 |
-
# citation=textwrap.dedent(
|
117 |
-
# """\
|
118 |
-
# @article{warstadt2018neural,
|
119 |
-
# title={Neural Network Acceptability Judgments},
|
120 |
-
# author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R},
|
121 |
-
# journal={arXiv preprint arXiv:1805.12471},
|
122 |
-
# year={2018}
|
123 |
-
# }"""
|
124 |
-
# ),
|
125 |
-
# url="https://nyu-mll.github.io/CoLA/",
|
126 |
-
# ),
|
127 |
-
# VieGLUEConfig(
|
128 |
-
# name="mnli",
|
129 |
-
# description=textwrap.dedent(
|
130 |
-
# """\
|
131 |
-
# The Multi-Genre Natural Language Inference Corpus is a crowdsourced
|
132 |
-
# collection of sentence pairs with textual entailment annotations. Given a premise sentence
|
133 |
-
# and a hypothesis sentence, the task is to predict whether the premise entails the hypothesis
|
134 |
-
# (entailment), contradicts the hypothesis (contradiction), or neither (neutral). The premise sentences are
|
135 |
-
# gathered from ten different sources, including transcribed speech, fiction, and government reports.
|
136 |
-
# We use the standard test set, for which we obtained private labels from the authors, and evaluate
|
137 |
-
# on both the matched (in-domain) and mismatched (cross-domain) section. We also use and recommend
|
138 |
-
# the SNLI corpus as 550k examples of auxiliary training data."""
|
139 |
-
# ),
|
140 |
-
# text_features={
|
141 |
-
# "premise": "sentence1",
|
142 |
-
# "hypothesis": "sentence2",
|
143 |
-
# },
|
144 |
-
# label_classes=["entailment", "neutral", "contradiction"],
|
145 |
-
# label_column="gold_label",
|
146 |
-
# data_url="https://dl.fbaipublicfiles.com/glue/data/MNLI.zip",
|
147 |
-
# data_dir="MNLI",
|
148 |
-
# citation=textwrap.dedent(
|
149 |
-
# """\
|
150 |
-
# @InProceedings{N18-1101,
|
151 |
-
# author = "Williams, Adina
|
152 |
-
# and Nangia, Nikita
|
153 |
-
# and Bowman, Samuel",
|
154 |
-
# title = "A Broad-Coverage Challenge Corpus for
|
155 |
-
# Sentence Understanding through Inference",
|
156 |
-
# booktitle = "Proceedings of the 2018 Conference of
|
157 |
-
# the North American Chapter of the
|
158 |
-
# Association for Computational Linguistics:
|
159 |
-
# Human Language Technologies, Volume 1 (Long
|
160 |
-
# Papers)",
|
161 |
-
# year = "2018",
|
162 |
-
# publisher = "Association for Computational Linguistics",
|
163 |
-
# pages = "1112--1122",
|
164 |
-
# location = "New Orleans, Louisiana",
|
165 |
-
# url = "http://aclweb.org/anthology/N18-1101"
|
166 |
-
# }
|
167 |
-
# @article{bowman2015large,
|
168 |
-
# title={A large annotated corpus for learning natural language inference},
|
169 |
-
# author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},
|
170 |
-
# journal={arXiv preprint arXiv:1508.05326},
|
171 |
-
# year={2015}
|
172 |
-
# }"""
|
173 |
-
# ),
|
174 |
-
# url="http://www.nyu.edu/projects/bowman/multinli/",
|
175 |
-
# ),
|
176 |
SUBSET_KWARGS = {
|
177 |
"ax": {
|
178 |
"name": "ax",
|
@@ -564,6 +491,7 @@ class VNExpress(datasets.GeneratorBasedBuilder):
|
|
564 |
|
565 |
def _generate_examples(self, files, urls, stage, config):
|
566 |
# id_ = 0
|
|
|
567 |
|
568 |
if not isinstance(files, list):
|
569 |
files = [files]
|
@@ -580,4 +508,8 @@ class VNExpress(datasets.GeneratorBasedBuilder):
|
|
580 |
print(f"Loaded {len(all_samples)} samples from {file_path}")
|
581 |
print(f"Sample: {all_samples[0]}")
|
582 |
for sample in all_samples:
|
583 |
-
yield
|
|
|
|
|
|
|
|
|
|
99 |
"validation": [os.path.join("data", "wnli", "validation.tar.gz")],
|
100 |
},
|
101 |
}
|
102 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
SUBSET_KWARGS = {
|
104 |
"ax": {
|
105 |
"name": "ax",
|
|
|
491 |
|
492 |
def _generate_examples(self, files, urls, stage, config):
|
493 |
# id_ = 0
|
494 |
+
features = config.text_features
|
495 |
|
496 |
if not isinstance(files, list):
|
497 |
files = [files]
|
|
|
508 |
print(f"Loaded {len(all_samples)} samples from {file_path}")
|
509 |
print(f"Sample: {all_samples[0]}")
|
510 |
for sample in all_samples:
|
511 |
+
yield {
|
512 |
+
"idx": sample["idx"],
|
513 |
+
"label": sample["label"],
|
514 |
+
**{f: sample[f] for f in features},
|
515 |
+
}
|