howey commited on
Commit
39b18fc
·
1 Parent(s): 8903e26

remove config script

Browse files
Files changed (1) hide show
  1. super_scirep.py +0 -191
super_scirep.py DELETED
@@ -1,191 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # TODO: Address all TODOs and remove all explanatory comments
15
- """TODO: Add a description here."""
16
-
17
- import csv
18
- import json
19
-
20
- import datasets
21
- from datasets.data_files import DataFilesDict
22
- from .super_scirep_config import SUPERSCIREPEVAL_CONFIGS
23
-
24
- # from datasets.packaged_modules.json import json
25
-
26
-
27
- # TODO: Add BibTeX citation
28
- # Find for instance the citation on arxiv or on the dataset repo/website
29
- _CITATION = """\
30
- @InProceedings{huggingface:dataset,
31
- title = {A great new dataset},
32
- author={huggingface, Inc.
33
- },
34
- year={2021}
35
- }
36
- """
37
-
38
- # TODO: Add description of the dataset here
39
- # You can copy an official description
40
- _DESCRIPTION = """\
41
- This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
42
- """
43
-
44
- # TODO: Add a link to an official homepage for the dataset here
45
- _HOMEPAGE = ""
46
-
47
- # TODO: Add the licence for the dataset here if you can find it
48
- _LICENSE = ""
49
-
50
- # TODO: Add link to the official dataset URLs here
51
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
52
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
53
- _URLS = {
54
- "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
55
- "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
56
- }
57
-
58
-
59
- # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
60
- class SuperSciRep(datasets.GeneratorBasedBuilder):
61
- """TODO: Short description of my dataset."""
62
-
63
- VERSION = datasets.Version("1.1.0")
64
-
65
- # This is an example of a dataset with multiple configurations.
66
- # If you don't want/need to define several sub-sets in your dataset,
67
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
68
-
69
- # If you need to make complex sub-parts in the datasets with configurable options
70
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
71
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
72
-
73
- # You will be able to load one or the other configurations in the following list with
74
- # data = datasets.load_dataset('my_dataset', 'first_domain')
75
- # data = datasets.load_dataset('my_dataset', 'second_domain')
76
- BUILDER_CONFIGS = SUPERSCIREPEVAL_CONFIGS
77
-
78
- def _info(self):
79
- return datasets.DatasetInfo(
80
- # This is the description that will appear on the datasets page.
81
- description=self.config.description,
82
- # This defines the different columns of the dataset and their types
83
- features=datasets.Features(self.config.features),
84
- # Here we define them above because they are different between the two configurations
85
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
86
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
87
- # supervised_keys=("sentence", "label"),
88
- # Homepage of the dataset for documentation
89
- homepage="",
90
- # License for the dataset if available
91
- license=self.config.license,
92
- # Citation for the dataset
93
- citation=self.config.citation,
94
- )
95
-
96
- def _split_generators(self, dl_manager):
97
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
98
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
99
- base_url = "https://ai2-s2-research-public.s3.us-west-2.amazonaws.com/scirepeval"
100
- data_urls = dict()
101
- data_dir = self.config.url if self.config.url else self.config.name
102
- if self.config.is_training:
103
- data_urls = {"train": f"{base_url}/train/{data_dir}/train.jsonl",
104
- "val": f"{base_url}/train/{data_dir}/val.jsonl"}
105
-
106
- if "cite_prediction" not in self.config.name:
107
- data_urls.update({"test": f"{base_url}/test/{data_dir}/meta.jsonl"})
108
- print(data_urls)
109
- downloaded_files = dl_manager.download_and_extract(data_urls)
110
- splits = []
111
- if "test" in downloaded_files:
112
- splits = [datasets.SplitGenerator(
113
- name=datasets.Split("evaluation"),
114
- # These kwargs will be passed to _generate_examples
115
- gen_kwargs={
116
- "filepath": downloaded_files["test"],
117
- "split": "evaluation"
118
- },
119
- ),
120
- ]
121
-
122
- if "train" in downloaded_files:
123
- splits += [
124
- datasets.SplitGenerator(
125
- name=datasets.Split.TRAIN,
126
- # These kwargs will be passed to _generate_examples
127
- gen_kwargs={
128
- "filepath": downloaded_files["train"],
129
- "split": "train",
130
- },
131
- ),
132
- datasets.SplitGenerator(
133
- name=datasets.Split.VALIDATION,
134
- # These kwargs will be passed to _generate_examples
135
- gen_kwargs={
136
- "filepath": downloaded_files["val"],
137
- "split": "validation",
138
- })
139
- ]
140
- return splits
141
-
142
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
143
- def _generate_examples(self, filepath, split):
144
- def read_data(data_path):
145
- task_data = []
146
- try:
147
- task_data = json.load(open(data_path, "r", encoding="utf-8"))
148
- except:
149
- with open(data_path) as f:
150
- task_data = [json.loads(line) for line in f]
151
- if type(task_data) == dict:
152
- task_data = list(task_data.values())
153
- return task_data
154
-
155
- # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
156
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
157
- # data = read_data(filepath)
158
- seen_keys = set()
159
- IGNORE = set(["n_key_citations", "session_id", "user_id", "user"])
160
- with open(filepath, encoding="utf-8") as f:
161
- for line in f:
162
- d = json.loads(line)
163
- d = {k: v for k, v in d.items() if k not in IGNORE}
164
- key = "doc_id" if self.config.name != "cite_prediction_new" else "corpus_id"
165
- if self.config.task_type == "proximity":
166
- if "cite_prediction" in self.config.name:
167
- if "arxiv_id" in d["query"]:
168
- for item in ["query", "pos", "neg"]:
169
- del d[item]["arxiv_id"]
170
- del d[item]["doi"]
171
- if "fos" in d["query"]:
172
- del d["query"]["fos"]
173
- if "score" in d["pos"]:
174
- del d["pos"]["score"]
175
- yield str(d["query"][key]) + str(d["pos"][key]) + str(d["neg"][key]), d
176
- else:
177
- if d["query"][key] not in seen_keys:
178
- seen_keys.add(d["query"][key])
179
- yield str(d["query"][key]), d
180
- else:
181
- if d[key] not in seen_keys:
182
- seen_keys.add(d[key])
183
- if self.config.task_type != "search":
184
- if "corpus_id" not in d:
185
- d["corpus_id"] = None
186
- if "scidocs" in self.config.name:
187
- if "cited by" not in d:
188
- d["cited_by"] = []
189
- if type(d["corpus_id"]) == str:
190
- d["corpus_id"] = None
191
- yield d[key], d