Datasets:
Tasks:
Question Answering
Modalities:
Text
Sub-tasks:
extractive-qa
Languages:
code
Size:
100K - 1M
License:
Update loading script with download maanger
Browse files- codequeries.py +40 -41
codequeries.py
CHANGED
|
@@ -41,9 +41,6 @@ CodeQueries Ideal setup.
|
|
| 41 |
_PREFIX_DESCRIPTION = """\
|
| 42 |
CodeQueries Prefix setup."""
|
| 43 |
|
| 44 |
-
_SLIDING_WINDOW_DESCRIPTION = """\
|
| 45 |
-
CodeQueries Sliding window setup."""
|
| 46 |
-
|
| 47 |
_FILE_IDEAL_DESCRIPTION = """\
|
| 48 |
CodeQueries File level Ideal setup."""
|
| 49 |
|
|
@@ -82,28 +79,25 @@ class Codequeries(datasets.GeneratorBasedBuilder):
|
|
| 82 |
"supporting_fact_spans", "code_file_path", "example_type",
|
| 83 |
"subtokenized_input_sequence", "label_sequence"],
|
| 84 |
citation=_CODEQUERIES_CITATION,
|
| 85 |
-
|
| 86 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
url="",
|
| 88 |
),
|
| 89 |
-
# CodequeriesConfig(
|
| 90 |
-
# name="prefix",
|
| 91 |
-
# description=_PREFIX_DESCRIPTION,
|
| 92 |
-
# features=["query_name", "context_blocks", "answer_spans",
|
| 93 |
-
# "supporting_fact_spans", "code_file_path", "example_type",
|
| 94 |
-
# "subtokenized_input_sequence", "label_sequence"],
|
| 95 |
-
# citation=_CODEQUERIES_CITATION,
|
| 96 |
-
# url="",
|
| 97 |
-
# ),
|
| 98 |
-
# CodequeriesConfig(
|
| 99 |
-
# name="sliding_window",
|
| 100 |
-
# description=_SLIDING_WINDOW_DESCRIPTION,
|
| 101 |
-
# features=["query_name", "context_blocks", "answer_spans",
|
| 102 |
-
# "supporting_fact_spans", "code_file_path", "example_type",
|
| 103 |
-
# "subtokenized_input_sequence", "label_sequence"],
|
| 104 |
-
# citation=_CODEQUERIES_CITATION,
|
| 105 |
-
# url="",
|
| 106 |
-
# ),
|
| 107 |
CodequeriesConfig(
|
| 108 |
name="file_ideal",
|
| 109 |
description=_FILE_IDEAL_DESCRIPTION,
|
|
@@ -111,23 +105,28 @@ class Codequeries(datasets.GeneratorBasedBuilder):
|
|
| 111 |
"supporting_fact_spans", "code_file_path", "example_type",
|
| 112 |
"subtokenized_input_sequence", "label_sequence"],
|
| 113 |
citation=_CODEQUERIES_CITATION,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 114 |
url="",
|
| 115 |
),
|
| 116 |
-
# CodequeriesConfig(
|
| 117 |
-
# name="twostep",
|
| 118 |
-
# description=_TWOSTEP_DESCRIPTION,
|
| 119 |
-
# features=["query_name", "context_blocks", "answer_spans",
|
| 120 |
-
# "supporting_fact_spans", "code_file_path", "example_type",
|
| 121 |
-
# "subtokenized_input_sequence", "label_sequence"],
|
| 122 |
-
# citation=_CODEQUERIES_CITATION,
|
| 123 |
-
# url="",
|
| 124 |
-
# ),
|
| 125 |
]
|
| 126 |
|
| 127 |
-
|
| 128 |
|
| 129 |
def _info(self):
|
| 130 |
-
# features = {feature: datasets.Value("string") for feature in self.config.features}
|
| 131 |
features = {}
|
| 132 |
features["query_name"] = datasets.Value("string")
|
| 133 |
features["context_blocks"] = [
|
|
@@ -168,13 +167,13 @@ class Codequeries(datasets.GeneratorBasedBuilder):
|
|
| 168 |
)
|
| 169 |
|
| 170 |
def _split_generators(self, dl_manager):
|
| 171 |
-
dl_dir =
|
| 172 |
-
if self.config.name in ["prefix", "
|
| 173 |
return [
|
| 174 |
datasets.SplitGenerator(
|
| 175 |
name=datasets.Split.TEST,
|
| 176 |
gen_kwargs={
|
| 177 |
-
"filepath": os.path.join(dl_dir
|
| 178 |
"split": datasets.Split.TEST,
|
| 179 |
},
|
| 180 |
),
|
|
@@ -184,28 +183,28 @@ class Codequeries(datasets.GeneratorBasedBuilder):
|
|
| 184 |
datasets.SplitGenerator(
|
| 185 |
name=datasets.Split.TRAIN,
|
| 186 |
gen_kwargs={
|
| 187 |
-
"filepath": os.path.join(dl_dir
|
| 188 |
"split": datasets.Split.TRAIN,
|
| 189 |
},
|
| 190 |
),
|
| 191 |
datasets.SplitGenerator(
|
| 192 |
name=datasets.Split.VALIDATION,
|
| 193 |
gen_kwargs={
|
| 194 |
-
"filepath": os.path.join(dl_dir
|
| 195 |
"split": datasets.Split.VALIDATION,
|
| 196 |
},
|
| 197 |
),
|
| 198 |
datasets.SplitGenerator(
|
| 199 |
name=datasets.Split.TEST,
|
| 200 |
gen_kwargs={
|
| 201 |
-
"filepath": os.path.join(dl_dir
|
| 202 |
"split": datasets.Split.TEST,
|
| 203 |
},
|
| 204 |
),
|
| 205 |
]
|
| 206 |
|
| 207 |
def _generate_examples(self, filepath, split):
|
| 208 |
-
if self.config.name in ["prefix", "
|
| 209 |
assert split == datasets.Split.TEST
|
| 210 |
logger.info("generating examples from = %s", filepath)
|
| 211 |
|
|
|
|
| 41 |
_PREFIX_DESCRIPTION = """\
|
| 42 |
CodeQueries Prefix setup."""
|
| 43 |
|
|
|
|
|
|
|
|
|
|
| 44 |
_FILE_IDEAL_DESCRIPTION = """\
|
| 45 |
CodeQueries File level Ideal setup."""
|
| 46 |
|
|
|
|
| 79 |
"supporting_fact_spans", "code_file_path", "example_type",
|
| 80 |
"subtokenized_input_sequence", "label_sequence"],
|
| 81 |
citation=_CODEQUERIES_CITATION,
|
| 82 |
+
data_url={
|
| 83 |
+
"train": "ideal_train.json",
|
| 84 |
+
"dev": "ideal_val.json",
|
| 85 |
+
"test": "ideal_test.json"
|
| 86 |
+
},
|
| 87 |
+
url="",
|
| 88 |
+
),
|
| 89 |
+
CodequeriesConfig(
|
| 90 |
+
name="prefix",
|
| 91 |
+
description=_PREFIX_DESCRIPTION,
|
| 92 |
+
features=["query_name", "answer_spans",
|
| 93 |
+
"supporting_fact_spans", "code_file_path", "example_type",
|
| 94 |
+
"subtokenized_input_sequence", "label_sequence"],
|
| 95 |
+
citation=_CODEQUERIES_CITATION,
|
| 96 |
+
data_url={
|
| 97 |
+
"test": "prefix_test.json"
|
| 98 |
+
},
|
| 99 |
url="",
|
| 100 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 101 |
CodequeriesConfig(
|
| 102 |
name="file_ideal",
|
| 103 |
description=_FILE_IDEAL_DESCRIPTION,
|
|
|
|
| 105 |
"supporting_fact_spans", "code_file_path", "example_type",
|
| 106 |
"subtokenized_input_sequence", "label_sequence"],
|
| 107 |
citation=_CODEQUERIES_CITATION,
|
| 108 |
+
data_url={
|
| 109 |
+
"test": "file_ideal_test.json"
|
| 110 |
+
},
|
| 111 |
+
url="",
|
| 112 |
+
),
|
| 113 |
+
CodequeriesConfig(
|
| 114 |
+
name="twostep",
|
| 115 |
+
description=_TWOSTEP_DESCRIPTION,
|
| 116 |
+
features=["query_name", "context_blocks", "answer_spans",
|
| 117 |
+
"supporting_fact_spans", "code_file_path", "example_type",
|
| 118 |
+
"subtokenized_input_sequence", "label_sequence"],
|
| 119 |
+
citation=_CODEQUERIES_CITATION,
|
| 120 |
+
data_url={
|
| 121 |
+
"test": "twostep_relevance"
|
| 122 |
+
},
|
| 123 |
url="",
|
| 124 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 125 |
]
|
| 126 |
|
| 127 |
+
DEFAULT_CONFIG_NAME = "ideal"
|
| 128 |
|
| 129 |
def _info(self):
|
|
|
|
| 130 |
features = {}
|
| 131 |
features["query_name"] = datasets.Value("string")
|
| 132 |
features["context_blocks"] = [
|
|
|
|
| 167 |
)
|
| 168 |
|
| 169 |
def _split_generators(self, dl_manager):
|
| 170 |
+
dl_dir = dl_manager.download_and_extract(self.config.data_url)
|
| 171 |
+
if self.config.name in ["prefix", "file_ideal", "twostep"]:
|
| 172 |
return [
|
| 173 |
datasets.SplitGenerator(
|
| 174 |
name=datasets.Split.TEST,
|
| 175 |
gen_kwargs={
|
| 176 |
+
"filepath": os.path.join(dl_dir["test"]),
|
| 177 |
"split": datasets.Split.TEST,
|
| 178 |
},
|
| 179 |
),
|
|
|
|
| 183 |
datasets.SplitGenerator(
|
| 184 |
name=datasets.Split.TRAIN,
|
| 185 |
gen_kwargs={
|
| 186 |
+
"filepath": os.path.join(dl_dir["train"]),
|
| 187 |
"split": datasets.Split.TRAIN,
|
| 188 |
},
|
| 189 |
),
|
| 190 |
datasets.SplitGenerator(
|
| 191 |
name=datasets.Split.VALIDATION,
|
| 192 |
gen_kwargs={
|
| 193 |
+
"filepath": os.path.join(dl_dir["dev"]),
|
| 194 |
"split": datasets.Split.VALIDATION,
|
| 195 |
},
|
| 196 |
),
|
| 197 |
datasets.SplitGenerator(
|
| 198 |
name=datasets.Split.TEST,
|
| 199 |
gen_kwargs={
|
| 200 |
+
"filepath": os.path.join(dl_dir["test"]),
|
| 201 |
"split": datasets.Split.TEST,
|
| 202 |
},
|
| 203 |
),
|
| 204 |
]
|
| 205 |
|
| 206 |
def _generate_examples(self, filepath, split):
|
| 207 |
+
if self.config.name in ["prefix", "file_ideal", "twostep"]:
|
| 208 |
assert split == datasets.Split.TEST
|
| 209 |
logger.info("generating examples from = %s", filepath)
|
| 210 |
|