matteogabburo commited on
Commit
c86aa38
·
verified ·
1 Parent(s): 6694f26

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +35 -35
README.md CHANGED
@@ -74,29 +74,29 @@ configs:
74
  path: "por-test_clean.jsonl"
75
  - split: test_clean_sp
76
  path: "spa-test_clean.jsonl"
77
- - split: validation_++_en
78
  path: "eng-dev_no_allneg.jsonl"
79
- - split: validation_++_de
80
  path: "deu-dev_no_allneg.jsonl"
81
- - split: validation_++_fr
82
  path: "fra-dev_no_allneg.jsonl"
83
- - split: validation_++_it
84
  path: "ita-dev_no_allneg.jsonl"
85
- - split: validation_++_po
86
  path: "por-dev_no_allneg.jsonl"
87
- - split: validation_++_sp
88
  path: "spa-dev_no_allneg.jsonl"
89
- - split: test_++_en
90
  path: "eng-test_no_allneg.jsonl"
91
- - split: test_++_de
92
  path: "deu-test_no_allneg.jsonl"
93
- - split: test_++_fr
94
  path: "fra-test_no_allneg.jsonl"
95
- - split: test_++_it
96
  path: "ita-test_no_allneg.jsonl"
97
- - split: test_++_po
98
  path: "por-test_no_allneg.jsonl"
99
- - split: test_++_sp
100
  path: "spa-test_no_allneg.jsonl"
101
  - config_name: clean
102
  data_files:
@@ -136,7 +136,7 @@ configs:
136
  path: "por-test_clean.jsonl"
137
  - split: test_clean_sp
138
  path: "spa-test_clean.jsonl"
139
- - config_name: ++
140
  data_files:
141
  - split: train_en
142
  path: "eng-train.jsonl"
@@ -150,29 +150,29 @@ configs:
150
  path: "por-train.jsonl"
151
  - split: train_sp
152
  path: "spa-train.jsonl"
153
- - split: validation_++_en
154
  path: "eng-dev_no_allneg.jsonl"
155
- - split: validation_++_de
156
  path: "deu-dev_no_allneg.jsonl"
157
- - split: validation_++_fr
158
  path: "fra-dev_no_allneg.jsonl"
159
- - split: validation_++_it
160
  path: "ita-dev_no_allneg.jsonl"
161
- - split: validation_++_po
162
  path: "por-dev_no_allneg.jsonl"
163
- - split: validation_++_sp
164
  path: "spa-dev_no_allneg.jsonl"
165
- - split: test_++_en
166
  path: "eng-test_no_allneg.jsonl"
167
- - split: test_++_de
168
  path: "deu-test_no_allneg.jsonl"
169
- - split: test_++_fr
170
  path: "fra-test_no_allneg.jsonl"
171
- - split: test_++_it
172
  path: "ita-test_no_allneg.jsonl"
173
- - split: test_++_po
174
  path: "por-test_no_allneg.jsonl"
175
- - split: test_++_sp
176
  path: "spa-test_no_allneg.jsonl"
177
  - config_name: en
178
  data_files:
@@ -222,7 +222,7 @@ configs:
222
  path: "spa-dev.jsonl"
223
  - split: test
224
  path: "spa-test.jsonl"
225
- - config_name: en_++
226
  data_files:
227
  - split: train
228
  path: "eng-train.jsonl"
@@ -230,7 +230,7 @@ configs:
230
  path: "eng-dev_no_allneg.jsonl"
231
  - split: test
232
  path: "eng-test_no_allneg.jsonl"
233
- - config_name: de_++
234
  data_files:
235
  - split: train
236
  path: "deu-train.jsonl"
@@ -238,7 +238,7 @@ configs:
238
  path: "deu-dev_no_allneg.jsonl"
239
  - split: test
240
  path: "deu-test_no_allneg.jsonl"
241
- - config_name: fr_++
242
  data_files:
243
  - split: train
244
  path: "fra-train.jsonl"
@@ -246,7 +246,7 @@ configs:
246
  path: "fra-dev_no_allneg.jsonl"
247
  - split: test
248
  path: "fra-test_no_allneg.jsonl"
249
- - config_name: it_++
250
  data_files:
251
  - split: train
252
  path: "ita-train.jsonl"
@@ -254,7 +254,7 @@ configs:
254
  path: "ita-dev_no_allneg.jsonl"
255
  - split: test
256
  path: "ita-test_no_allneg.jsonl"
257
- - config_name: po_++
258
  data_files:
259
  - split: train
260
  path: "por-train.jsonl"
@@ -262,7 +262,7 @@ configs:
262
  path: "por-dev_no_allneg.jsonl"
263
  - split: test
264
  path: "por-test_no_allneg.jsonl"
265
- - config_name: sp_++
266
  data_files:
267
  - split: train
268
  path: "spa-train.jsonl"
@@ -336,11 +336,11 @@ For each language (English, French, German, Italian, Portuguese, and Spanish), w
336
 
337
  In addition, the validation and the test splits are available also in the following preprocessed versions:
338
 
339
- - **++**: without questions with only negative answer candidates
340
  - **clean**: without questions with only negative and only positive answer candidates
341
 
342
  ### How to load them:
343
- To use these splits, you can use the following snippet of code replacing ``[LANG]`` with a language identifier (en, fr, de, it, po, sp), and ``[VERSION]`` with the version identifier (++, clean)
344
 
345
  ```
346
  from datasets import load_dataset
@@ -352,7 +352,7 @@ corpora = load_dataset("matteogabburo/mTRECQA")
352
  corpora = load_dataset("matteogabburo/mTRECQA", "clean")
353
 
354
  # if you want the "no all negatives" validation and test sets
355
- corpora = load_dataset("matteogabburo/mTRECQA", "++")
356
 
357
  """
358
  if you want the default splits of a specific language, replace [LANG] with an identifier in: en, fr, de, it, po, sp
@@ -363,7 +363,7 @@ italian_dataset = load_dataset("matteogabburo/mTRECQA", "it")
363
 
364
 
365
  """
366
- if you want the processed splits ("clean" and "no all negatives" sets), replace [LANG] with a language identifier and [VERSION] with "++" or "clean"
367
  dataset = load_dataset("matteogabburo/mTRECQA", "[LANG]_[VERSION]")
368
  """
369
  # example:
 
74
  path: "por-test_clean.jsonl"
75
  - split: test_clean_sp
76
  path: "spa-test_clean.jsonl"
77
+ - split: validation_noneg_en
78
  path: "eng-dev_no_allneg.jsonl"
79
+ - split: validation_noneg_de
80
  path: "deu-dev_no_allneg.jsonl"
81
+ - split: validation_noneg_fr
82
  path: "fra-dev_no_allneg.jsonl"
83
+ - split: validation_noneg_it
84
  path: "ita-dev_no_allneg.jsonl"
85
+ - split: validation_noneg_po
86
  path: "por-dev_no_allneg.jsonl"
87
+ - split: validation_noneg_sp
88
  path: "spa-dev_no_allneg.jsonl"
89
+ - split: test_noneg_en
90
  path: "eng-test_no_allneg.jsonl"
91
+ - split: test_noneg_de
92
  path: "deu-test_no_allneg.jsonl"
93
+ - split: test_noneg_fr
94
  path: "fra-test_no_allneg.jsonl"
95
+ - split: test_noneg_it
96
  path: "ita-test_no_allneg.jsonl"
97
+ - split: test_noneg_po
98
  path: "por-test_no_allneg.jsonl"
99
+ - split: test_noneg_sp
100
  path: "spa-test_no_allneg.jsonl"
101
  - config_name: clean
102
  data_files:
 
136
  path: "por-test_clean.jsonl"
137
  - split: test_clean_sp
138
  path: "spa-test_clean.jsonl"
139
+ - config_name: noneg
140
  data_files:
141
  - split: train_en
142
  path: "eng-train.jsonl"
 
150
  path: "por-train.jsonl"
151
  - split: train_sp
152
  path: "spa-train.jsonl"
153
+ - split: validation_noneg_en
154
  path: "eng-dev_no_allneg.jsonl"
155
+ - split: validation_noneg_de
156
  path: "deu-dev_no_allneg.jsonl"
157
+ - split: validation_noneg_fr
158
  path: "fra-dev_no_allneg.jsonl"
159
+ - split: validation_noneg_it
160
  path: "ita-dev_no_allneg.jsonl"
161
+ - split: validation_noneg_po
162
  path: "por-dev_no_allneg.jsonl"
163
+ - split: validation_noneg_sp
164
  path: "spa-dev_no_allneg.jsonl"
165
+ - split: test_noneg_en
166
  path: "eng-test_no_allneg.jsonl"
167
+ - split: test_noneg_de
168
  path: "deu-test_no_allneg.jsonl"
169
+ - split: test_noneg_fr
170
  path: "fra-test_no_allneg.jsonl"
171
+ - split: test_noneg_it
172
  path: "ita-test_no_allneg.jsonl"
173
+ - split: test_noneg_po
174
  path: "por-test_no_allneg.jsonl"
175
+ - split: test_noneg_sp
176
  path: "spa-test_no_allneg.jsonl"
177
  - config_name: en
178
  data_files:
 
222
  path: "spa-dev.jsonl"
223
  - split: test
224
  path: "spa-test.jsonl"
225
+ - config_name: en_noneg
226
  data_files:
227
  - split: train
228
  path: "eng-train.jsonl"
 
230
  path: "eng-dev_no_allneg.jsonl"
231
  - split: test
232
  path: "eng-test_no_allneg.jsonl"
233
+ - config_name: de_noneg
234
  data_files:
235
  - split: train
236
  path: "deu-train.jsonl"
 
238
  path: "deu-dev_no_allneg.jsonl"
239
  - split: test
240
  path: "deu-test_no_allneg.jsonl"
241
+ - config_name: fr_noneg
242
  data_files:
243
  - split: train
244
  path: "fra-train.jsonl"
 
246
  path: "fra-dev_no_allneg.jsonl"
247
  - split: test
248
  path: "fra-test_no_allneg.jsonl"
249
+ - config_name: it_noneg
250
  data_files:
251
  - split: train
252
  path: "ita-train.jsonl"
 
254
  path: "ita-dev_no_allneg.jsonl"
255
  - split: test
256
  path: "ita-test_no_allneg.jsonl"
257
+ - config_name: po_noneg
258
  data_files:
259
  - split: train
260
  path: "por-train.jsonl"
 
262
  path: "por-dev_no_allneg.jsonl"
263
  - split: test
264
  path: "por-test_no_allneg.jsonl"
265
+ - config_name: sp_noneg
266
  data_files:
267
  - split: train
268
  path: "spa-train.jsonl"
 
336
 
337
  In addition, the validation and the test splits are available also in the following preprocessed versions:
338
 
339
+ - **noneg**: without questions with only negative answer candidates
340
  - **clean**: without questions with only negative and only positive answer candidates
341
 
342
  ### How to load them:
343
+ To use these splits, you can use the following snippet of code replacing ``[LANG]`` with a language identifier (en, fr, de, it, po, sp), and ``[VERSION]`` with the version identifier (noneg, clean)
344
 
345
  ```
346
  from datasets import load_dataset
 
352
  corpora = load_dataset("matteogabburo/mTRECQA", "clean")
353
 
354
  # if you want the "no all negatives" validation and test sets
355
+ corpora = load_dataset("matteogabburo/mTRECQA", "noneg")
356
 
357
  """
358
  if you want the default splits of a specific language, replace [LANG] with an identifier in: en, fr, de, it, po, sp
 
363
 
364
 
365
  """
366
+ if you want the processed splits ("clean" and "no all negatives" sets), replace [LANG] with a language identifier and [VERSION] with "noneg" or "clean"
367
  dataset = load_dataset("matteogabburo/mTRECQA", "[LANG]_[VERSION]")
368
  """
369
  # example: