howey commited on
Commit
fd57c3d
·
1 Parent(s): e5a76b5

add json files

Browse files
.gitattributes CHANGED
@@ -52,3 +52,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
52
  *.jpg filter=lfs diff=lfs merge=lfs -text
53
  *.jpeg filter=lfs diff=lfs merge=lfs -text
54
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  *.jpg filter=lfs diff=lfs merge=lfs -text
53
  *.jpeg filter=lfs diff=lfs merge=lfs -text
54
  *.webp filter=lfs diff=lfs merge=lfs -text
55
+ feeds_m/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
56
+ fos/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
57
+ paper_reviewer_matching/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
58
+ search/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
59
+ tweet_mentions/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
60
+ cite_count/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
61
+ feeds_1/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
62
+ feeds_title/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
63
+ high_influence_cite/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
64
+ peer_review_score_hIndex/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
65
+ pub_year/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
66
+ scidocs_view_cite_read/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
cite_count/dataset_dict.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f2316e2471443d076328a113984f158c34e838f1e8edd25bdc391312978609a
3
+ size 49
feeds_1/dataset_dict.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cf8be7ed7184c85e1be68d1fc1c541a94e65b7626e6be0eee02bab6f18001b2
3
+ size 26
feeds_m/dataset_dict.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cf8be7ed7184c85e1be68d1fc1c541a94e65b7626e6be0eee02bab6f18001b2
3
+ size 26
feeds_title/dataset_dict.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cf8be7ed7184c85e1be68d1fc1c541a94e65b7626e6be0eee02bab6f18001b2
3
+ size 26
fos/dataset_dict.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f2316e2471443d076328a113984f158c34e838f1e8edd25bdc391312978609a
3
+ size 49
high_influence_cite/dataset_dict.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2983501dfb489654980cae5d62abfbc77536b122f53e15dd89b63e0d6e0a0cbe
3
+ size 14
paper_reviewer_matching/dataset_dict.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cf8be7ed7184c85e1be68d1fc1c541a94e65b7626e6be0eee02bab6f18001b2
3
+ size 26
peer_review_score_hIndex/dataset_dict.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cf8be7ed7184c85e1be68d1fc1c541a94e65b7626e6be0eee02bab6f18001b2
3
+ size 26
pub_year/dataset_dict.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f2316e2471443d076328a113984f158c34e838f1e8edd25bdc391312978609a
3
+ size 49
scidocs_view_cite_read/dataset_dict.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cf8be7ed7184c85e1be68d1fc1c541a94e65b7626e6be0eee02bab6f18001b2
3
+ size 26
search/dataset_dict.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f2316e2471443d076328a113984f158c34e838f1e8edd25bdc391312978609a
3
+ size 49
super_scirep.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """TODO: Add a description here."""
16
+
17
+ import csv
18
+ import json
19
+ import os
20
+ import glob
21
+
22
+ import datasets
23
+ from datasets.data_files import DataFilesDict
24
+ from .scirepeval_configs import SCIREPEVAL_CONFIGS
25
+
26
+ # from datasets.packaged_modules.json import json
27
+
28
+
29
+ # TODO: Add BibTeX citation
30
+ # Find for instance the citation on arxiv or on the dataset repo/website
31
+ _CITATION = """\
32
+ @InProceedings{huggingface:dataset,
33
+ title = {A great new dataset},
34
+ author={huggingface, Inc.
35
+ },
36
+ year={2021}
37
+ }
38
+ """
39
+
40
+ # TODO: Add description of the dataset here
41
+ # You can copy an official description
42
+ _DESCRIPTION = """\
43
+ This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
44
+ """
45
+
46
+ # TODO: Add a link to an official homepage for the dataset here
47
+ _HOMEPAGE = ""
48
+
49
+ # TODO: Add the licence for the dataset here if you can find it
50
+ _LICENSE = ""
51
+
52
+ # TODO: Add link to the official dataset URLs here
53
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
54
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
55
+ _URLS = {
56
+ "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
57
+ "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
58
+ }
59
+
60
+
61
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
62
+ class SuperSciRep(datasets.GeneratorBasedBuilder):
63
+ """TODO: Short description of my dataset."""
64
+
65
+ VERSION = datasets.Version("1.1.0")
66
+
67
+ # This is an example of a dataset with multiple configurations.
68
+ # If you don't want/need to define several sub-sets in your dataset,
69
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
70
+
71
+ # If you need to make complex sub-parts in the datasets with configurable options
72
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
73
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
74
+
75
+ # You will be able to load one or the other configurations in the following list with
76
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
77
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
78
+ BUILDER_CONFIGS = SCIREPEVAL_CONFIGS
79
+
80
+ def _info(self):
81
+ return datasets.DatasetInfo(
82
+ # This is the description that will appear on the datasets page.
83
+ description=self.config.description,
84
+ # This defines the different columns of the dataset and their types
85
+ features=datasets.Features(self.config.features),
86
+ # Here we define them above because they are different between the two configurations
87
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
88
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
89
+ # supervised_keys=("sentence", "label"),
90
+ # Homepage of the dataset for documentation
91
+ homepage="",
92
+ # License for the dataset if available
93
+ license=self.config.license,
94
+ # Citation for the dataset
95
+ citation=self.config.citation,
96
+ )
97
+
98
+ def _split_generators(self, dl_manager):
99
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
100
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
101
+ base_url = "https://ai2-s2-research-public.s3.us-west-2.amazonaws.com/scirepeval"
102
+ data_urls = dict()
103
+ data_dir = self.config.url if self.config.url else self.config.name
104
+ if self.config.is_training:
105
+ data_urls = {"train": f"{base_url}/train/{data_dir}/train.jsonl",
106
+ "val": f"{base_url}/train/{data_dir}/val.jsonl"}
107
+
108
+ if "cite_prediction" not in self.config.name:
109
+ data_urls.update({"test": f"{base_url}/test/{data_dir}/meta.jsonl"})
110
+ print(data_urls)
111
+ downloaded_files = dl_manager.download_and_extract(data_urls)
112
+ splits = []
113
+ if "test" in downloaded_files:
114
+ splits = [datasets.SplitGenerator(
115
+ name=datasets.Split("evaluation"),
116
+ # These kwargs will be passed to _generate_examples
117
+ gen_kwargs={
118
+ "filepath": downloaded_files["test"],
119
+ "split": "evaluation"
120
+ },
121
+ ),
122
+ ]
123
+
124
+ if "train" in downloaded_files:
125
+ splits += [
126
+ datasets.SplitGenerator(
127
+ name=datasets.Split.TRAIN,
128
+ # These kwargs will be passed to _generate_examples
129
+ gen_kwargs={
130
+ "filepath": downloaded_files["train"],
131
+ "split": "train",
132
+ },
133
+ ),
134
+ datasets.SplitGenerator(
135
+ name=datasets.Split.VALIDATION,
136
+ # These kwargs will be passed to _generate_examples
137
+ gen_kwargs={
138
+ "filepath": downloaded_files["val"],
139
+ "split": "validation",
140
+ })
141
+ ]
142
+ return splits
143
+
144
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
145
+ def _generate_examples(self, filepath, split):
146
+ def read_data(data_path):
147
+ task_data = []
148
+ try:
149
+ task_data = json.load(open(data_path, "r", encoding="utf-8"))
150
+ except:
151
+ with open(data_path) as f:
152
+ task_data = [json.loads(line) for line in f]
153
+ if type(task_data) == dict:
154
+ task_data = list(task_data.values())
155
+ return task_data
156
+
157
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
158
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
159
+ # data = read_data(filepath)
160
+ seen_keys = set()
161
+ IGNORE = set(["n_key_citations", "session_id", "user_id", "user"])
162
+ with open(filepath, encoding="utf-8") as f:
163
+ for line in f:
164
+ d = json.loads(line)
165
+ d = {k: v for k, v in d.items() if k not in IGNORE}
166
+ key = "doc_id" if self.config.name != "cite_prediction_new" else "corpus_id"
167
+ if self.config.task_type == "proximity":
168
+ if "cite_prediction" in self.config.name:
169
+ if "arxiv_id" in d["query"]:
170
+ for item in ["query", "pos", "neg"]:
171
+ del d[item]["arxiv_id"]
172
+ del d[item]["doi"]
173
+ if "fos" in d["query"]:
174
+ del d["query"]["fos"]
175
+ if "score" in d["pos"]:
176
+ del d["pos"]["score"]
177
+ yield str(d["query"][key]) + str(d["pos"][key]) + str(d["neg"][key]), d
178
+ else:
179
+ if d["query"][key] not in seen_keys:
180
+ seen_keys.add(d["query"][key])
181
+ yield str(d["query"][key]), d
182
+ else:
183
+ if d[key] not in seen_keys:
184
+ seen_keys.add(d[key])
185
+ if self.config.task_type != "search":
186
+ if "corpus_id" not in d:
187
+ d["corpus_id"] = None
188
+ if "scidocs" in self.config.name:
189
+ if "cited by" not in d:
190
+ d["cited_by"] = []
191
+ if type(d["corpus_id"]) == str:
192
+ d["corpus_id"] = None
193
+ yield d[key], d
tweet_mentions/dataset_dict.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cf8be7ed7184c85e1be68d1fc1c541a94e65b7626e6be0eee02bab6f18001b2
3
+ size 26