Commit
·
fc8b407
1
Parent(s):
a8158d1
Update parquet files
Browse files- .gitattributes +0 -39
- README.md +0 -127
- gen_train234_test2to10/v1-test.parquet +3 -0
- gen_train234_test2to10/v1-train.parquet +3 -0
- gen_train234_test2to10/v1-validation.parquet +3 -0
- gen_train23_test2to10/v1-test.parquet +3 -0
- gen_train23_test2to10/v1-train.parquet +3 -0
- gen_train23_test2to10/v1-validation.parquet +3 -0
- rob_train_clean_23_test_all_23/v1-test.parquet +3 -0
- rob_train_clean_23_test_all_23/v1-train.parquet +3 -0
- rob_train_clean_23_test_all_23/v1-validation.parquet +3 -0
- rob_train_disc_23_test_all_23/v1-test.parquet +3 -0
- rob_train_disc_23_test_all_23/v1-train.parquet +3 -0
- rob_train_disc_23_test_all_23/v1-validation.parquet +3 -0
- rob_train_irr_23_test_all_23/v1-test.parquet +3 -0
- rob_train_irr_23_test_all_23/v1-train.parquet +3 -0
- rob_train_irr_23_test_all_23/v1-validation.parquet +3 -0
- rob_train_sup_23_test_all_23/v1-test.parquet +3 -0
- rob_train_sup_23_test_all_23/v1-train.parquet +3 -0
- rob_train_sup_23_test_all_23/v1-validation.parquet +3 -0
- v1.py +0 -154
.gitattributes
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
20 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
-
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
# Audio files - uncompressed
|
29 |
-
*.pcm filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.sam filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.raw filter=lfs diff=lfs merge=lfs -text
|
32 |
-
# Audio files - compressed
|
33 |
-
*.aac filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.flac filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
36 |
-
*.ogg filter=lfs diff=lfs merge=lfs -text
|
37 |
-
*.wav filter=lfs diff=lfs merge=lfs -text
|
38 |
-
task_1.2.csv filter=lfs diff=lfs merge=lfs -text
|
39 |
-
task_1.3.csv filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README.md
DELETED
@@ -1,127 +0,0 @@
|
|
1 |
-
---
|
2 |
-
language:
|
3 |
-
- en
|
4 |
-
license:
|
5 |
-
- unknown
|
6 |
-
multilinguality:
|
7 |
-
- monolingual
|
8 |
-
size_categories:
|
9 |
-
- 10K<n<100K
|
10 |
-
---
|
11 |
-
|
12 |
-
# Dataset Card for CLUTRR
|
13 |
-
|
14 |
-
## Table of Contents
|
15 |
-
|
16 |
-
## Dataset Description
|
17 |
-
### Dataset Summary
|
18 |
-
**CLUTRR** (**C**ompositional **L**anguage **U**nderstanding and **T**ext-based **R**elational **R**easoning), a diagnostic benchmark suite, is first introduced in (https://arxiv.org/abs/1908.06177) to test the systematic generalization and inductive reasoning capabilities of NLU systems.
|
19 |
-
|
20 |
-
The CLUTRR benchmark allows us to test a model’s ability for **systematic generalization** by testing on stories that contain unseen combinations of logical rules, and test for the various forms of **model robustness** by adding different kinds of superfluous noise facts to the stories.
|
21 |
-
|
22 |
-
### Dataset Task
|
23 |
-
CLUTRR contains a large set of semi-synthetic stories involving hypothetical families. The task is to infer the relationship between two family members, whose relationship is not explicitly mentioned in the given story.
|
24 |
-
|
25 |
-
Join the CLUTRR community in https://www.cs.mcgill.ca/~ksinha4/clutrr/
|
26 |
-
## Dataset Structure
|
27 |
-
We show detailed information for all 14 configurations of the dataset.
|
28 |
-
|
29 |
-
### configurations:
|
30 |
-
**id**: a unique series of characters and numbers that identify each instance <br>
|
31 |
-
**story**: one semi-synthetic story involving hypothetical families<br>
|
32 |
-
**query**: the target query/relation which contains two names, where the goal is to classify the relation that holds between these two entities<br>
|
33 |
-
**target**: indicator for the correct relation for the query <br>
|
34 |
-
**target_text**: text for the correct relation for the query <br>
|
35 |
-
the indicator follows the rule as follows: <br> "aunt": 0, "son-in-law": 1, "grandfather": 2, "brother": 3,
|
36 |
-
"sister": 4,
|
37 |
-
"father": 5,
|
38 |
-
"mother": 6,
|
39 |
-
"grandmother": 7,
|
40 |
-
"uncle": 8,
|
41 |
-
"daughter-in-law": 9,
|
42 |
-
"grandson": 10,
|
43 |
-
"granddaughter": 11,
|
44 |
-
"father-in-law": 12,
|
45 |
-
"mother-in-law": 13,
|
46 |
-
"nephew": 14,
|
47 |
-
"son": 15,
|
48 |
-
"daughter": 16,
|
49 |
-
"niece": 17,
|
50 |
-
"husband": 18,
|
51 |
-
"wife": 19,
|
52 |
-
"sister-in-law": 20 <br>
|
53 |
-
**clean\_story**: the story without noise factors<br>
|
54 |
-
**proof\_state**: the logical rule of the kinship generation <br>
|
55 |
-
**f\_comb**: the kinships of the query followed by the logical rule<br>
|
56 |
-
**task\_name**: the task of the sub-dataset in a form of "task_[num1].[num2]"<br>
|
57 |
-
The first number [num1] indicates the status of noise facts added in the story: 1- no noise facts; 2- Irrelevant facts*; 3- Supporting facts*; 4- Disconnected facts*.<br>
|
58 |
-
The second number [num2] directly indicates the length of clauses for the task target.<br>
|
59 |
-
*for example:*<br>
|
60 |
-
*task_1.2 -- task requiring clauses of length 2 without adding noise facts*<br>
|
61 |
-
*task_2.3 -- task requiring clauses of length 3 with Irrelevant noise facts added in the story*<br>
|
62 |
-
**story\_edges**: all the edges in the kinship graph<br>
|
63 |
-
**edge\_types**: similar to the f\_comb, another form of the query's kinships followed by the logical rule <br>
|
64 |
-
**query\_edge**: the corresponding edge of the target query in the kinship graph<br>
|
65 |
-
**genders**: genders of names appeared in the story<br>
|
66 |
-
**task\_split**: train,test <br>
|
67 |
-
|
68 |
-
*Further explanation of Irrelevant facts, Supporting facts and Disconnected facts can be found in the 3.5 Robust Reasoning section in https://arxiv.org/abs/1908.06177
|
69 |
-
|
70 |
-
### Data Instances
|
71 |
-
|
72 |
-
An example of 'train'in Task 1.2 looks as follows.
|
73 |
-
```
|
74 |
-
{
|
75 |
-
"id": b2b9752f-d7fa-46a9-83ae-d474184c35b6,
|
76 |
-
"story": "[Lillian] and her daughter [April] went to visit [Lillian]'s mother [Ashley] last Sunday.",
|
77 |
-
"query": ('April', 'Ashley'),
|
78 |
-
"target": 7,
|
79 |
-
"target_text": "grandmother",
|
80 |
-
"clean_story": [Lillian] and her daughter [April] went to visit [Lillian]'s mother [Ashley] last Sunday.,
|
81 |
-
"proof_state": [{('April', 'grandmother', 'Ashley'): [('April', 'mother', 'Lillian'), ('Lillian', 'mother', 'Ashley')]}],
|
82 |
-
"f_comb": "mother-mother",
|
83 |
-
"task_name": "task_1.2",
|
84 |
-
"story_edges": [(0, 1), (1, 2)],
|
85 |
-
"edge_types": ['mother', 'mother'],
|
86 |
-
"query_edge": (0, 2),
|
87 |
-
"genders": "April:female,Lillian:female,Ashley:female",
|
88 |
-
"task_split": trian
|
89 |
-
}
|
90 |
-
```
|
91 |
-
### Data Splits
|
92 |
-
|
93 |
-
#### Data Split Name
|
94 |
-
(corresponding with the name used in the paper)
|
95 |
-
|
96 |
-
| task_split | split name in paper | train &validation task |test task |
|
97 |
-
| :---: | :---: | :-: | :-: |
|
98 |
-
| gen_train23_test2to10 | data_089907f8 | 1.2, 1.3 | 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 1.10 |
|
99 |
-
| gen_train234_test2to10 | data_db9b8f04 | 1.2, 1.3, 1.4| 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 1.10 |
|
100 |
-
| rob_train_clean_23_test_all_23 | data_7c5b0e70 | 1.2,1.3 | 1.2, 1.3, 2.3, 3.3, 4.3 |
|
101 |
-
| rob_train_sup_23_test_all_23 | data_06b8f2a1 | 2.2, 2.3 | 2.2, 2.3, 1.3, 3.3, 4.3 |
|
102 |
-
| rob_train_irr_23_test_all_23 | data_523348e6 | 3.2, 3.3 | 3.2, 3.3, 1.3, 2.3, 4.3 |
|
103 |
-
| rob_train_disc_23_test_all_23 | data_d83ecc3e | 4.2, 4.3 | 4.2, 4.3, 1.3, 2.3, 3.3 |
|
104 |
-
|
105 |
-
#### Data Split Summary
|
106 |
-
Number of Instances in each split
|
107 |
-
|
108 |
-
| task_split | train | validation | test |
|
109 |
-
| :-: | :---: | :---: | :---: |
|
110 |
-
| gen_train23_test2to10 | 9074 | 2020 | 1146 |
|
111 |
-
| gen_train234_test2to10 | 12064 | 3019 | 1048 |
|
112 |
-
| rob_train_clean_23_test_all_23 | 8098 | 2026 | 447 |
|
113 |
-
| rob_train_disc_23_test_all_23 | 8080 | 2020 | 445 |
|
114 |
-
| rob_train_irr_23_test_all_23 | 8079 | 2020 | 444 |
|
115 |
-
| rob_train_sup_23_test_all_23 | 8123 | 2031 | 447 |
|
116 |
-
|
117 |
-
|
118 |
-
## Citation Information
|
119 |
-
```
|
120 |
-
@article{sinha2019clutrr,
|
121 |
-
Author = {Koustuv Sinha and Shagun Sodhani and Jin Dong and Joelle Pineau and William L. Hamilton},
|
122 |
-
Title = {CLUTRR: A Diagnostic Benchmark for Inductive Reasoning from Text},
|
123 |
-
Year = {2019},
|
124 |
-
journal = {Empirical Methods of Natural Language Processing (EMNLP)},
|
125 |
-
arxiv = {1908.06177}
|
126 |
-
}
|
127 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gen_train234_test2to10/v1-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:618938733acc4f385e108d88964e3872b707065e5593c61fcb7624a2550ba4da
|
3 |
+
size 598203
|
gen_train234_test2to10/v1-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:285d8a01ca44a17267d1b02e5d3d2d0123c588dd0b973c0beb95631fb310b022
|
3 |
+
size 3694198
|
gen_train234_test2to10/v1-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9c9dd98f889ed9a2f98d1d80c8ab979a119755ebf7210d5527fcf976c0e2f86c
|
3 |
+
size 938229
|
gen_train23_test2to10/v1-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d58dc24808f707f7467e48c93e0468fa8f1900d9c7ab2cdc72311f76ca002b6e
|
3 |
+
size 627028
|
gen_train23_test2to10/v1-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3a79fa137bdc5534c076e828964f3e917dd1dffec1152f99bbdfcde220c328e7
|
3 |
+
size 2426087
|
gen_train23_test2to10/v1-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:257a3944542b1841f6db2cdbdc5bb931ffc97f303c25ca94f8664eee8b5c6ef8
|
3 |
+
size 537400
|
rob_train_clean_23_test_all_23/v1-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5ce74ed3fbea93abc044c157302d866c8cad855cfd275bf57df962fe765d6d4b
|
3 |
+
size 123610
|
rob_train_clean_23_test_all_23/v1-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e07c710a8ac7526fde0a613772f10927cc49fb736e041d3784c7717ab010722b
|
3 |
+
size 2111023
|
rob_train_clean_23_test_all_23/v1-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:252db1299b8ae81a5d00a118e47e3ac1e417d774515fe6e1381d16d3bc08cab6
|
3 |
+
size 543562
|
rob_train_disc_23_test_all_23/v1-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:024e3d3a360515b2f2f4c764334cb7eff64042d93063730d71fe26fb0a1dd7b7
|
3 |
+
size 123005
|
rob_train_disc_23_test_all_23/v1-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:68ec5eca51add9f7d9bf3c0ce97ac5f32be8b5cebacbeadbc7748c81f47e2e03
|
3 |
+
size 2409442
|
rob_train_disc_23_test_all_23/v1-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3bf83590c255081c67ef61de975ca2e924e6607c4c363adf4fdcac9225fc9626
|
3 |
+
size 613911
|
rob_train_irr_23_test_all_23/v1-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dd11b8a6f28facd2d9a048abbcbe37f870afb5ffe647876ff8fc58e78daa02c5
|
3 |
+
size 121666
|
rob_train_irr_23_test_all_23/v1-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2fcfb1b7bca149bc791b12a71323fd07e040bfbc193800102025d69433d4371f
|
3 |
+
size 2375542
|
rob_train_irr_23_test_all_23/v1-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a8ba3cb0e44159f6f33522b6d20146a272064a338bc057d15f8f16a03e482eb1
|
3 |
+
size 605316
|
rob_train_sup_23_test_all_23/v1-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ef71cc61bc10ddeb695b68ec401c3ad1f117baec48eeaa76b7f55206553bf828
|
3 |
+
size 122235
|
rob_train_sup_23_test_all_23/v1-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2883bcf963e11edb85993a004d17b3f384e59c99a7a37780abf150e45fcbe20d
|
3 |
+
size 2613660
|
rob_train_sup_23_test_all_23/v1-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ead96a942ce416fe537c5c7bc800f83830d5c8d7d05f452828b2c87dac61be65
|
3 |
+
size 659218
|
v1.py
DELETED
@@ -1,154 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
"""CLUTRR_Dataset Loading Script.ipynb
|
3 |
-
Automatically generated by Colaboratory.
|
4 |
-
Original file is located at
|
5 |
-
https://colab.research.google.com/drive/1q9DdeHA5JbgTHkH6kfZe_KWHQOwHZA97
|
6 |
-
"""
|
7 |
-
# coding=utf-8
|
8 |
-
# Copyright 2019 The CLUTRR Datasets Authors and the HuggingFace Datasets Authors.
|
9 |
-
#
|
10 |
-
# CLUTRR is CC-BY-NC 4.0 (Attr Non-Commercial Inter.) licensed, as found in the LICENSE file.
|
11 |
-
#
|
12 |
-
# Unless required by applicable law or agreed to in writing, software
|
13 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
14 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
15 |
-
# See the License for the specific language governing permissions and
|
16 |
-
# limitations under the License.
|
17 |
-
|
18 |
-
# Lint as: python3
|
19 |
-
"""The CLUTRR (Compositional Language Understanding and Text-based Relational Reasoning) benchmark."""
|
20 |
-
|
21 |
-
|
22 |
-
import csv
|
23 |
-
import os
|
24 |
-
import textwrap
|
25 |
-
|
26 |
-
import numpy as np
|
27 |
-
|
28 |
-
import datasets
|
29 |
-
import json
|
30 |
-
|
31 |
-
_CLUTRR_CITATION = """\
|
32 |
-
@article{sinha2019clutrr,
|
33 |
-
Author = {Koustuv Sinha and Shagun Sodhani and Jin Dong and Joelle Pineau and William L. Hamilton},
|
34 |
-
Title = {CLUTRR: A Diagnostic Benchmark for Inductive Reasoning from Text},
|
35 |
-
Year = {2019},
|
36 |
-
journal = {Empirical Methods of Natural Language Processing (EMNLP)},
|
37 |
-
arxiv = {1908.06177}
|
38 |
-
}
|
39 |
-
"""
|
40 |
-
|
41 |
-
_CLUTRR_DESCRIPTION = """\
|
42 |
-
CLUTRR (Compositional Language Understanding and Text-based Relational Reasoning),
|
43 |
-
a diagnostic benchmark suite, is first introduced in (https://arxiv.org/abs/1908.06177)
|
44 |
-
to test the systematic generalization and inductive reasoning capabilities of NLU systems.
|
45 |
-
"""
|
46 |
-
_URL = "https://raw.githubusercontent.com/kliang5/CLUTRR_huggingface_dataset/main/"
|
47 |
-
_TASK = ["gen_train23_test2to10", "gen_train234_test2to10", "rob_train_clean_23_test_all_23", "rob_train_disc_23_test_all_23", "rob_train_irr_23_test_all_23","rob_train_sup_23_test_all_23"]
|
48 |
-
|
49 |
-
class v1(datasets.GeneratorBasedBuilder):
|
50 |
-
"""BuilderConfig for CLUTRR."""
|
51 |
-
|
52 |
-
BUILDER_CONFIGS = [
|
53 |
-
datasets.BuilderConfig(
|
54 |
-
name=task,
|
55 |
-
version=datasets.Version("1.0.0"),
|
56 |
-
description="",
|
57 |
-
)
|
58 |
-
for task in _TASK
|
59 |
-
]
|
60 |
-
|
61 |
-
def _info(self):
|
62 |
-
return datasets.DatasetInfo(
|
63 |
-
description=_CLUTRR_DESCRIPTION,
|
64 |
-
features=datasets.Features(
|
65 |
-
{
|
66 |
-
"id": datasets.Value("string"),
|
67 |
-
"story": datasets.Value("string"),
|
68 |
-
"query": datasets.Value("string"),
|
69 |
-
"target": datasets.Value("int32"),
|
70 |
-
"target_text": datasets.Value("string"),
|
71 |
-
"clean_story": datasets.Value("string"),
|
72 |
-
"proof_state": datasets.Value("string"),
|
73 |
-
"f_comb": datasets.Value("string"),
|
74 |
-
"task_name": datasets.Value("string"),
|
75 |
-
"story_edges": datasets.Value("string"),
|
76 |
-
"edge_types": datasets.Value("string"),
|
77 |
-
"query_edge": datasets.Value("string"),
|
78 |
-
"genders": datasets.Value("string"),
|
79 |
-
"task_split": datasets.Value("string"),
|
80 |
-
}
|
81 |
-
),
|
82 |
-
# No default supervised_keys (as we have to pass both premise
|
83 |
-
# and hypothesis as input).
|
84 |
-
supervised_keys=None,
|
85 |
-
homepage="https://www.cs.mcgill.ca/~ksinha4/clutrr/",
|
86 |
-
citation=_CLUTRR_CITATION,
|
87 |
-
)
|
88 |
-
|
89 |
-
def _split_generators(self, dl_manager):
|
90 |
-
"""Returns SplitGenerators."""
|
91 |
-
# dl_manager is a datasets.download.DownloadManager that can be used to
|
92 |
-
# download and extract URLs
|
93 |
-
|
94 |
-
task = str(self.config.name)
|
95 |
-
urls_to_download = {
|
96 |
-
"test": _URL + task + "/test.csv",
|
97 |
-
"train": _URL + task + "/train.csv",
|
98 |
-
"validation": _URL + task + "/validation.csv",
|
99 |
-
}
|
100 |
-
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
101 |
-
|
102 |
-
|
103 |
-
return [
|
104 |
-
datasets.SplitGenerator(
|
105 |
-
name=datasets.Split.TRAIN,
|
106 |
-
# These kwargs will be passed to _generate_examples
|
107 |
-
gen_kwargs={
|
108 |
-
"filepath": downloaded_files["train"],
|
109 |
-
"task": task,
|
110 |
-
},
|
111 |
-
),
|
112 |
-
datasets.SplitGenerator(
|
113 |
-
name=datasets.Split.VALIDATION,
|
114 |
-
# These kwargs will be passed to _generate_examples
|
115 |
-
gen_kwargs={
|
116 |
-
"filepath": downloaded_files["validation"],
|
117 |
-
"task": task,
|
118 |
-
},
|
119 |
-
),
|
120 |
-
datasets.SplitGenerator(
|
121 |
-
name=datasets.Split.TEST,
|
122 |
-
# These kwargs will be passed to _generate_examples
|
123 |
-
gen_kwargs={
|
124 |
-
"filepath": downloaded_files["test"],
|
125 |
-
"task": task,
|
126 |
-
},
|
127 |
-
),
|
128 |
-
]
|
129 |
-
|
130 |
-
def _generate_examples(self, filepath, task):
|
131 |
-
"""Yields examples."""
|
132 |
-
with open(filepath, encoding="utf-8") as f:
|
133 |
-
reader = csv.reader(f)
|
134 |
-
for id_, data in enumerate(reader):
|
135 |
-
if id_ == 0:
|
136 |
-
continue
|
137 |
-
# yield id_, data
|
138 |
-
# id_ += 1
|
139 |
-
yield id_, {
|
140 |
-
"id": data[1],
|
141 |
-
"story": data[2],
|
142 |
-
"query": data[3],
|
143 |
-
"target": data[4],
|
144 |
-
"target_text": data[5],
|
145 |
-
"clean_story": data[6],
|
146 |
-
"proof_state": data[7],
|
147 |
-
"f_comb": data[8],
|
148 |
-
"task_name": data[9],
|
149 |
-
"story_edges": data[10],
|
150 |
-
"edge_types": data[11],
|
151 |
-
"query_edge": data[12],
|
152 |
-
"genders": data[13],
|
153 |
-
"task_split": data[14],
|
154 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|