ianporada commited on
Commit
9f75519
·
1 Parent(s): 7b13f40

Convert dataset to Parquet (#1)

Browse files

- Convert dataset to Parquet (5f4b481fd569cf592ddc5d9201b6385b10e9d56a)
- Delete loading script (34cc2de89419dc9fb71c144a6fcf5783d6f77641)

Files changed (3) hide show
  1. README.md +36 -0
  2. davis_pdp.py +0 -129
  3. davis_pdp/test-00000-of-00001.parquet +3 -0
README.md ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ dataset_info:
3
+ config_name: davis_pdp
4
+ features:
5
+ - name: text
6
+ dtype: string
7
+ - name: pronoun
8
+ dtype: string
9
+ - name: pronoun_loc
10
+ dtype: int32
11
+ - name: quote
12
+ dtype: string
13
+ - name: quote_loc
14
+ dtype: int32
15
+ - name: options
16
+ sequence: string
17
+ - name: label
18
+ dtype:
19
+ class_label:
20
+ names:
21
+ '0': '0'
22
+ '1': '1'
23
+ - name: source
24
+ dtype: string
25
+ splits:
26
+ - name: test
27
+ num_bytes: 17818
28
+ num_examples: 60
29
+ download_size: 14118
30
+ dataset_size: 17818
31
+ configs:
32
+ - config_name: davis_pdp
33
+ data_files:
34
+ - split: test
35
+ path: davis_pdp/test-*
36
+ ---
davis_pdp.py DELETED
@@ -1,129 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """The Winograd Schema Challenge Dataset"""
16
-
17
- import xml.etree.ElementTree as ET
18
-
19
- import datasets
20
-
21
-
22
- _DESCRIPTION = """\
23
- A Winograd schema is a pair of sentences that differ in only one or two words and that contain an ambiguity that is
24
- resolved in opposite ways in the two sentences and requires the use of world knowledge and reasoning for its
25
- resolution. The schema takes its name from a well-known example by Terry Winograd:
26
-
27
- > The city councilmen refused the demonstrators a permit because they [feared/advocated] violence.
28
-
29
- If the word is ``feared'', then ``they'' presumably refers to the city council; if it is ``advocated'' then ``they''
30
- presumably refers to the demonstrators.
31
- """
32
-
33
- _CITATION = """\
34
- @inproceedings{levesque2012winograd,
35
- title={The winograd schema challenge},
36
- author={Levesque, Hector and Davis, Ernest and Morgenstern, Leora},
37
- booktitle={Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning},
38
- year={2012},
39
- organization={Citeseer}
40
- }
41
- """
42
-
43
- _HOMPAGE = "https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html"
44
- _DOWNLOAD_URL = "https://cs.nyu.edu/~davise/papers/WinogradSchemas/PDPChallenge2016.xml"
45
-
46
-
47
- class WinogradWSCConfig(datasets.BuilderConfig):
48
- """BuilderConfig for WinogradWSC."""
49
-
50
- def __init__(self, *args, language=None, inds=None, **kwargs):
51
- super().__init__(*args, **kwargs)
52
- self.inds = set(inds) if inds is not None else None
53
-
54
- def is_in_range(self, id):
55
- """Takes an index and tells you if it belongs to the configuration's subset"""
56
- return id in self.inds if self.inds is not None else True
57
-
58
-
59
- class WinogradWSC(datasets.GeneratorBasedBuilder):
60
- """The Winograd Schema Challenge Dataset"""
61
-
62
- BUILDER_CONFIG_CLASS = WinogradWSCConfig
63
- BUILDER_CONFIGS = [
64
- WinogradWSCConfig(
65
- name="davis_pdp",
66
- description="Full set of winograd examples",
67
- ),
68
- ]
69
-
70
- def _info(self):
71
- return datasets.DatasetInfo(
72
- description=_DESCRIPTION,
73
- features=datasets.Features(
74
- {
75
- "text": datasets.Value("string"),
76
- "pronoun": datasets.Value("string"),
77
- "pronoun_loc": datasets.Value("int32"),
78
- "quote": datasets.Value("string"),
79
- "quote_loc": datasets.Value("int32"),
80
- "options": datasets.Sequence(datasets.Value("string")),
81
- "label": datasets.ClassLabel(num_classes=2),
82
- "source": datasets.Value("string"),
83
- }
84
- ),
85
- homepage=_HOMPAGE,
86
- citation=_CITATION,
87
- )
88
-
89
- def _split_generators(self, dl_manager):
90
- path = dl_manager.download_and_extract(_DOWNLOAD_URL)
91
- return [
92
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": path}),
93
- ]
94
-
95
- def _cleanup_whitespace(self, text):
96
- return " ".join(text.split())
97
-
98
- def _generate_examples(self, filepath):
99
- tree = ET.parse(filepath)
100
- for id, schema in enumerate(tree.getroot()):
101
- if not self.config.is_in_range(id):
102
- continue
103
-
104
- text_root = schema.find("text")
105
- quote_root = schema.find("quote")
106
-
107
- text_left = self._cleanup_whitespace(text_root.findtext("txt1", ""))
108
- text_right = self._cleanup_whitespace(text_root.findtext("txt2", ""))
109
- quote_left = self._cleanup_whitespace(quote_root.findtext("quote1", ""))
110
- quote_right = self._cleanup_whitespace(quote_root.findtext("quote2", ""))
111
- pronoun = self._cleanup_whitespace(text_root.findtext("pron"))
112
-
113
- features = {}
114
- features["text"] = " ".join([text_left, pronoun, text_right]).strip()
115
- features["quote"] = " ".join([quote_left, pronoun, quote_right]).strip()
116
-
117
- features["pronoun"] = pronoun
118
- features["options"] = [
119
- self._cleanup_whitespace(option.text) for option in schema.find("answers").findall("answer")
120
- ]
121
-
122
- answer_txt = self._cleanup_whitespace(schema.findtext("correctAnswer"))
123
- features["label"] = int("B" in answer_txt) # convert " A. " or " B " strings to a 0/1 index
124
-
125
- features["pronoun_loc"] = len(text_left) + 1 if len(text_left) > 0 else 0
126
- features["quote_loc"] = features["pronoun_loc"] - (len(quote_left) + 1 if len(quote_left) > 0 else 0)
127
- features["source"] = self._cleanup_whitespace(schema.findtext("source"))
128
-
129
- yield id, features
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
davis_pdp/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de048f890a37544eaedaf3e25ac2be019ad862ada0e1f9a4fe3d1379b590725f
3
+ size 14118