datasetId
stringlengths 5
121
| author
stringlengths 2
42
| last_modified
unknowndate 2021-04-29 15:34:29
2025-04-12 01:29:38
| downloads
int64 0
5.67M
| likes
int64 0
7.68k
| tags
sequencelengths 1
7.92k
| task_categories
sequencelengths 0
48
⌀ | createdAt
unknowndate 2022-03-02 23:29:22
2025-04-12 01:29:32
| card
stringlengths 20
1M
|
---|---|---|---|---|---|---|---|---|
hlillemark/c4_t5_pretrain | hlillemark | "2023-05-22T16:33:38Z" | 19,586 | 0 | [
"size_categories:100M<n<1B",
"format:parquet",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2023-05-19T09:17:45Z" | ---
dataset_info:
features:
- name: input_ids
sequence: int32
- name: labels
sequence: int64
splits:
- name: validation
num_bytes: 53400000
num_examples: 10000
- name: train
num_bytes: 961505597520
num_examples: 180057228
download_size: 2939856140
dataset_size: 961558997520
---
# Dataset Card for "c4_t5_pretrain"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
CohereForAI/Global-MMLU | CohereForAI | "2025-03-20T19:47:13Z" | 19,513 | 116 | [
"language:en",
"language:ar",
"language:bn",
"language:es",
"language:fr",
"language:hi",
"language:ru",
"language:de",
"language:id",
"language:it",
"language:ja",
"language:ko",
"language:pt",
"language:zh",
"language:yo",
"language:nl",
"language:ro",
"language:uk",
"language:vi",
"language:tr",
"language:pl",
"language:fa",
"language:cs",
"language:he",
"language:el",
"language:ms",
"language:fil",
"language:te",
"language:si",
"language:ne",
"language:ky",
"language:sv",
"language:lt",
"language:sr",
"language:mg",
"language:so",
"language:ha",
"language:am",
"language:sn",
"language:ig",
"language:ny",
"language:sw",
"license:apache-2.0",
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"library:argilla",
"arxiv:2412.03304",
"region:us",
"argilla"
] | null | "2024-12-01T22:45:59Z" | ---
dataset_info:
- config_name: am
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 209505
num_examples: 285
- name: test
num_bytes: 12085768
num_examples: 14042
download_size: 10260448
dataset_size: 12295273
- config_name: ar
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 202343
num_examples: 285
- name: test
num_bytes: 11621977
num_examples: 14042
download_size: 9817049
dataset_size: 11824320
- config_name: bn
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 301875
num_examples: 285
- name: test
num_bytes: 18061158
num_examples: 14042
download_size: 12524784
dataset_size: 18363033
- config_name: cs
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 149807
num_examples: 285
- name: test
num_bytes: 8607308
num_examples: 14042
download_size: 8640151
dataset_size: 8757115
- config_name: de
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 162406
num_examples: 285
- name: test
num_bytes: 9575360
num_examples: 14042
download_size: 9187953
dataset_size: 9737766
- config_name: el
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 254308
num_examples: 285
- name: test
num_bytes: 14502137
num_examples: 14042
download_size: 12288940
dataset_size: 14756445
- config_name: en
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 146364
num_examples: 285
- name: test
num_bytes: 8440632
num_examples: 14042
download_size: 7912429
dataset_size: 8586996
- config_name: es
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 160633
num_examples: 285
- name: test
num_bytes: 9399724
num_examples: 14042
download_size: 8752720
dataset_size: 9560357
- config_name: fa
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 202609
num_examples: 285
- name: test
num_bytes: 11611890
num_examples: 14042
download_size: 9564082
dataset_size: 11814499
- config_name: fil
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 165182
num_examples: 285
- name: test
num_bytes: 9510179
num_examples: 14042
download_size: 8564879
dataset_size: 9675361
- config_name: fr
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 166173
num_examples: 285
- name: test
num_bytes: 9858873
num_examples: 14042
download_size: 9202595
dataset_size: 10025046
- config_name: ha
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 147406
num_examples: 285
- name: test
num_bytes: 8445707
num_examples: 14042
download_size: 7665529
dataset_size: 8593113
- config_name: he
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 178912
num_examples: 285
- name: test
num_bytes: 10248592
num_examples: 14042
download_size: 8818618
dataset_size: 10427504
- config_name: hi
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 308254
num_examples: 285
- name: test
num_bytes: 17970478
num_examples: 14042
download_size: 12407854
dataset_size: 18278732
- config_name: id
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 154692
num_examples: 285
- name: test
num_bytes: 8886643
num_examples: 14042
download_size: 7793365
dataset_size: 9041335
- config_name: ig
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 157376
num_examples: 285
- name: test
num_bytes: 9221405
num_examples: 14042
download_size: 7644102
dataset_size: 9378781
- config_name: it
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 157547
num_examples: 285
- name: test
num_bytes: 9374481
num_examples: 14042
download_size: 8873034
dataset_size: 9532028
- config_name: ja
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 167646
num_examples: 285
- name: test
num_bytes: 9830716
num_examples: 14042
download_size: 8826164
dataset_size: 9998362
- config_name: ko
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 160572
num_examples: 285
- name: test
num_bytes: 9454859
num_examples: 14042
download_size: 8640457
dataset_size: 9615431
- config_name: ky
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 235001
num_examples: 285
- name: test
num_bytes: 13483934
num_examples: 14042
download_size: 11148813
dataset_size: 13718935
- config_name: lt
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 148917
num_examples: 285
- name: test
num_bytes: 8504949
num_examples: 14042
download_size: 8416467
dataset_size: 8653866
- config_name: mg
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 161992
num_examples: 285
- name: test
num_bytes: 9337415
num_examples: 14042
download_size: 8011427
dataset_size: 9499407
- config_name: ms
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 152549
num_examples: 285
- name: test
num_bytes: 8823844
num_examples: 14042
download_size: 7783581
dataset_size: 8976393
- config_name: ne
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 294790
num_examples: 285
- name: test
num_bytes: 16972110
num_examples: 14042
download_size: 11895818
dataset_size: 17266900
- config_name: nl
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 158122
num_examples: 285
- name: test
num_bytes: 9099176
num_examples: 14042
download_size: 8565959
dataset_size: 9257298
- config_name: ny
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 151315
num_examples: 285
- name: test
num_bytes: 8686819
num_examples: 14042
download_size: 7822699
dataset_size: 8838134
- config_name: pl
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 157290
num_examples: 285
- name: test
num_bytes: 8980730
num_examples: 14042
download_size: 8981270
dataset_size: 9138020
- config_name: pt
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 154592
num_examples: 285
- name: test
num_bytes: 8983299
num_examples: 14042
download_size: 8517588
dataset_size: 9137891
- config_name: ro
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 158311
num_examples: 285
- name: test
num_bytes: 9163189
num_examples: 14042
download_size: 8773232
dataset_size: 9321500
- config_name: ru
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 246059
num_examples: 285
- name: test
num_bytes: 14059847
num_examples: 14042
download_size: 11904365
dataset_size: 14305906
- config_name: si
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 297843
num_examples: 285
- name: test
num_bytes: 17374939
num_examples: 14042
download_size: 12790098
dataset_size: 17672782
- config_name: sn
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 147355
num_examples: 285
- name: test
num_bytes: 8507368
num_examples: 14042
download_size: 7962672
dataset_size: 8654723
- config_name: so
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 156282
num_examples: 285
- name: test
num_bytes: 9033243
num_examples: 14042
download_size: 8706693
dataset_size: 9189525
- config_name: sr
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 221580
num_examples: 285
- name: test
num_bytes: 12695546
num_examples: 14042
download_size: 10748391
dataset_size: 12917126
- config_name: sv
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 147893
num_examples: 285
- name: test
num_bytes: 8549708
num_examples: 14042
download_size: 8181997
dataset_size: 8697601
- config_name: sw
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 147069
num_examples: 285
- name: test
num_bytes: 8653210
num_examples: 14042
download_size: 7932986
dataset_size: 8800279
- config_name: te
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 315724
num_examples: 285
- name: test
num_bytes: 18170058
num_examples: 14042
download_size: 12631358
dataset_size: 18485782
- config_name: tr
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 153426
num_examples: 285
- name: test
num_bytes: 8833244
num_examples: 14042
download_size: 8351339
dataset_size: 8986670
- config_name: uk
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 229888
num_examples: 285
- name: test
num_bytes: 13233771
num_examples: 14042
download_size: 11347842
dataset_size: 13463659
- config_name: vi
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 185712
num_examples: 285
- name: test
num_bytes: 10604332
num_examples: 14042
download_size: 8971266
dataset_size: 10790044
- config_name: yo
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 153810
num_examples: 285
- name: test
num_bytes: 10694916
num_examples: 14042
download_size: 9303668
dataset_size: 10848726
- config_name: zh
features:
- name: sample_id
dtype: string
- name: subject
dtype: string
- name: subject_category
dtype: string
- name: question
dtype: string
- name: option_a
dtype: string
- name: option_b
dtype: string
- name: option_c
dtype: string
- name: option_d
dtype: string
- name: answer
dtype: string
- name: required_knowledge
dtype: string
- name: time_sensitive
dtype: string
- name: reference
dtype: string
- name: culture
dtype: string
- name: region
dtype: string
- name: country
dtype: string
- name: cultural_sensitivity_label
dtype: string
- name: is_annotated
dtype: bool
splits:
- name: dev
num_bytes: 127577
num_examples: 285
- name: test
num_bytes: 7393764
num_examples: 14042
download_size: 7322261
dataset_size: 7521341
configs:
- config_name: am
data_files:
- split: test
path: am/test-*
- split: dev
path: am/dev-*
- config_name: ar
data_files:
- split: test
path: ar/test-*
- split: dev
path: ar/dev-*
- config_name: bn
data_files:
- split: test
path: bn/test-*
- split: dev
path: bn/dev-*
- config_name: cs
data_files:
- split: test
path: cs/test-*
- split: dev
path: cs/dev-*
- config_name: de
data_files:
- split: test
path: de/test-*
- split: dev
path: de/dev-*
- config_name: el
data_files:
- split: test
path: el/test-*
- split: dev
path: el/dev-*
- config_name: en
data_files:
- split: test
path: en/test-*
- split: dev
path: en/dev-*
- config_name: es
data_files:
- split: test
path: es/test-*
- split: dev
path: es/dev-*
- config_name: fa
data_files:
- split: test
path: fa/test-*
- split: dev
path: fa/dev-*
- config_name: fil
data_files:
- split: test
path: fil/test-*
- split: dev
path: fil/dev-*
- config_name: fr
data_files:
- split: test
path: fr/test-*
- split: dev
path: fr/dev-*
- config_name: ha
data_files:
- split: test
path: ha/test-*
- split: dev
path: ha/dev-*
- config_name: he
data_files:
- split: test
path: he/test-*
- split: dev
path: he/dev-*
- config_name: hi
data_files:
- split: test
path: hi/test-*
- split: dev
path: hi/dev-*
- config_name: id
data_files:
- split: test
path: id/test-*
- split: dev
path: id/dev-*
- config_name: ig
data_files:
- split: test
path: ig/test-*
- split: dev
path: ig/dev-*
- config_name: it
data_files:
- split: test
path: it/test-*
- split: dev
path: it/dev-*
- config_name: ja
data_files:
- split: test
path: ja/test-*
- split: dev
path: ja/dev-*
- config_name: ko
data_files:
- split: test
path: ko/test-*
- split: dev
path: ko/dev-*
- config_name: ky
data_files:
- split: test
path: ky/test-*
- split: dev
path: ky/dev-*
- config_name: lt
data_files:
- split: test
path: lt/test-*
- split: dev
path: lt/dev-*
- config_name: mg
data_files:
- split: test
path: mg/test-*
- split: dev
path: mg/dev-*
- config_name: ms
data_files:
- split: test
path: ms/test-*
- split: dev
path: ms/dev-*
- config_name: ne
data_files:
- split: test
path: ne/test-*
- split: dev
path: ne/dev-*
- config_name: nl
data_files:
- split: test
path: nl/test-*
- split: dev
path: nl/dev-*
- config_name: ny
data_files:
- split: test
path: ny/test-*
- split: dev
path: ny/dev-*
- config_name: pl
data_files:
- split: test
path: pl/test-*
- split: dev
path: pl/dev-*
- config_name: pt
data_files:
- split: test
path: pt/test-*
- split: dev
path: pt/dev-*
- config_name: ro
data_files:
- split: test
path: ro/test-*
- split: dev
path: ro/dev-*
- config_name: ru
data_files:
- split: test
path: ru/test-*
- split: dev
path: ru/dev-*
- config_name: si
data_files:
- split: test
path: si/test-*
- split: dev
path: si/dev-*
- config_name: sn
data_files:
- split: test
path: sn/test-*
- split: dev
path: sn/dev-*
- config_name: so
data_files:
- split: test
path: so/test-*
- split: dev
path: so/dev-*
- config_name: sr
data_files:
- split: test
path: sr/test-*
- split: dev
path: sr/dev-*
- config_name: sv
data_files:
- split: test
path: sv/test-*
- split: dev
path: sv/dev-*
- config_name: sw
data_files:
- split: test
path: sw/test-*
- split: dev
path: sw/dev-*
- config_name: te
data_files:
- split: test
path: te/test-*
- split: dev
path: te/dev-*
- config_name: tr
data_files:
- split: test
path: tr/test-*
- split: dev
path: tr/dev-*
- config_name: uk
data_files:
- split: test
path: uk/test-*
- split: dev
path: uk/dev-*
- config_name: vi
data_files:
- split: test
path: vi/test-*
- split: dev
path: vi/dev-*
- config_name: yo
data_files:
- split: test
path: yo/test-*
- split: dev
path: yo/dev-*
- config_name: zh
data_files:
- split: test
path: zh/test-*
- split: dev
path: zh/dev-*
tags:
- argilla
license: apache-2.0
language:
- en
- ar
- bn
- es
- fr
- hi
- ru
- de
- id
- it
- ja
- ko
- pt
- zh
- yo
- nl
- ro
- uk
- vi
- tr
- pl
- fa
- cs
- he
- el
- ms
- fil
- te
- si
- ne
- ky
- sv
- lt
- sr
- mg
- so
- ha
- am
- sn
- ig
- ny
- sw
---

# Dataset Summary
[Global-MMLU](https://arxiv.org/abs/2412.03304) 🌍 is a multilingual evaluation set spanning 42 languages, including English. This dataset combines machine translations for [MMLU](https://huggingface.co/datasets/cais/mmlu) questions along with professional translations and crowd-sourced post-edits.
It also includes cultural sensitivity annotations for a subset of the questions (2850 questions per language) and classifies them as *Culturally Sensitive* (CS) 🗽 or *Culturally Agnostic* (CA) ⚖️. These annotations were collected as part of an open science initiative led by Cohere For AI in collaboration with many external collaborators from both industry and academia.
- **Curated by:** Professional annotators and contributors of [Cohere For AI Community](https://cohere.com/research).
- **Language(s):** 42 languages.
- **License:** [Apache 2.0](https://opensource.org/license/apache-2-0)
**Note:** We also provide a "lite" version of Global MMLU called ["Global-MMLU-Lite"](https://huggingface.co/datasets/CohereForAI/Global-MMLU-Lite). This datatset is more balanced containing 200 samples each for CA and CS subsets for each language. And provides coverage for 15 languages with human translations.
### **Global-MMLU Dataset Family:**
| Name | Explanation |
|------|--------------|
| [Global-MMLU](https://huggingface.co/datasets/CohereForAI/Global-MMLU) | Full Global-MMLU set with translations for all 14K samples including CS and CA subsets|
| [Global-MMLU-Lite](https://huggingface.co/datasets/CohereForAI/Global-MMLU-Lite) | Lite version of Global-MMLU with human translated samples in 15 languages and containing 200 samples each for CS and CA subsets per language.|
## Load with Datasets
To load this dataset with `datasets`, you'll first need to install it using `pip install datasets` and then use the following code:
```python
from datasets import load_dataset
# load HF dataset
global_mmlu = load_dataset("CohereForAI/Global-MMLU", 'en')
# can also be used as pandas dataframe
global_mmlu.set_format("pandas")
global_mmlu_test = global_mmlu['test'][:]
global_mmlu_dev = global_mmlu['dev'][:]
```
<details>
<summary> additional details </summary>
The columns corresponding to annotations collected from our cultural bias study (i.e. 'required_knowledge', 'time_sensitive', 'reference', 'culture', 'region', 'country') contain a list of values representing annotations from different annotators.
However, to avoid conversion issues to HF dataset, these columns are provided as string in the final dataset.
You can convert these columns back to list of values for easier manipulation as follows:
```python
import ast
# convert string values to list
global_mmlu_df['required_knowledge'] = global_mmlu_df['required_knowledge'].apply(lamda x: ast.literal_eval(x))
```
</details>
<br>
## Data Fields
The data fields are the same among all splits. Brief description of each field is provided below.
<details>
<summary> data field description </summary>
- `sample_id`: A unique identifier for the question.
- `subject`: The main topic the question falls under.
- `subject_category`: The high-level category the subject falls under i.e. STEM/Humanities/Social Sciences/Medical/Business/Other.
- `question`: translated question from MMLU
- `option_a`: one of the possible option choices
- `option_b`: one of the possible option choices
- `option_c`: one of the possible option choices
- `option_d`: one of the possible option choices
- `answer': the correct answer (A/B/C/D)
- `required_knowledge`: annotator votes for knowledge needed to answer the question correctly. Possible values include: "cultural", "regional", "dialect" or "none"
- `time_sensitive`: annotator votes indicating if the question's answer is time-dependent. Possible values include: Yes/No
- `reference`: annotations for which part of the question contains cultural/regional/dialect references. The different items in the list are annotations from different annotators.
- `culture`: annotations for which culture does the question belong to. The different items in the list correspond to annotations from different annotators.
- `region`: Geographic region the question is relevant to. Each item in the list correspond to annotations from different annotators.
- `country`: Specific country the question pertains to. Each item in the list correspond to annotations from different annotators.
- `cultural_sensitivity_label`: Label to indicate if question is culturally sensitive (CS) or culturally agnostic (CA) based on annotator votes.
- `is_annotated`: True/False flag to indicate if sample contains any annotations from our cultural bias study.
</details>
<br>
## Data Splits
The following are the splits of the data:
| Split | No. of instances | Language Coverage |
|-------|------------------|-------------------|
| test | 589,764 | 42 |
| dev | 11,970 | 42 |
## Data Instances
An example from `test` set looks as follows:
```json
{'sample_id': 'world_religions/test/170'
'subject': 'world_religions',
'subject_category': 'Humanities',
'question': ' The numen of Augustus referred to which of the following characteristics?',
'option_a': 'Divine power',
'option_b': 'Sexual virility',
'option_c': 'Military acumen',
'option_d': 'Philosophical intellect',
'answer': 'A',
'required_knowledge': "['none', 'cultural', 'cultural', 'cultural']",
'time_sensitive': "['No', 'No', 'No', 'No']",
'reference': "['-', '-', {'end': 22, 'label': 'Cultural', 'score': None, 'start': 5}, {'end': 22, 'label': 'Cultural', 'score': None, 'start': 5}]",
'culture': "['Western Culture', 'Western Culture', 'Western Culture']",
'region': "['North America', 'Europe']",
'country': "['Italy']",
'cultural_sensitivity_label': 'CS',
'is_annotated': True,
}
```
## Statistics
### Annotation Types
The following is the breakdown of CS🗽, CA⚖️ and MA📝 samples in the final dataset.
| Type of Annotation | Instances per language | No. of languages | Total instances
|--------------------|------------------------|------------------|----------------|
| Culturally Sensitive 🗽 | 792 | 42 | 33,264 |
| Culturally Agnostic ⚖️ | 2058 |42 | 86,436 |
| MMLU Annotated 📝| 2850 |42 | 119,700 |
### Languages
The dataset covers 42 languages: 20 high-resource, 9 mid-resource, and 13 low-resource languages. The following is details about the languages included in the dataset.
<details>
<summary> Languages Info </summary>
| ISO Code | Language | Resources |
|----------|----------|-----------|
| `am` | Amharic | Low |
| `ar` | Arabic (Standard)| High |
| `bn` | Bengali | Mid |
| `de` | German | High |
| `el` | Greek | Mid |
| `en` | English | High |
| `fil` | Filipino | Mid |
| `fr` | French | High |
| `ha` | Hausa | Low |
| `he` | Hebrew | Mid |
| `hi` | Hindi | High |
| `ig` | Igbo | Low |
| `id` | Indonesian | Mid |
| `it` | Italian | High |
| `ja` | Japanese | High |
| `ky` | Kyrgyz | Low |
| `ko` | Korean | Mid |
| `lt` | Lithuanian | Mid |
| `mg` | Malagasy | Low |
| `ms` | Malay | Mid |
| `ne` | Nepali | Low |
| `nl` | Dutch | High |
| `ny` | Chichewa | Low |
| `fa` | Persian | High |
| `pl` | Polish | High |
| `pt` | Portuguese | High |
| `ru` | Russian | High |
| `si` | Sinhala | Low |
| `sn` | Shona | Low |
| `so` | Somali | Low |
| `es` | Spanish | High |
| `sr` | Serbian | High |
| `sw` | Swahili | Low |
| `sv` | Swedish | High |
| `te` | Telugu | Low |
| `tr` | Turkish | High |
| `uk` | Ukrainian | Mid |
| `vi` | Vietnamese | High |
| `yo` | Yorùbá | Low |
| `zh` | Chinese (Simplified) | High |
</details>
<br>
# Known Limitations
A brief overview of limitations of this dataset is provided below.
<details>
<summary> show limitations </summary>
- **Language and dialect coverage:** Global-MMLU focusses on 42 languages. However, this is still only a tiny fraction of the world’s linguistic diversity. Future work is needed to continue to improve evaluations beyond these 42 languages and take into account how technology serves different dialects.
- **Uneven distribution of contributions:** The dataset contains translation post-edits from community volunteers, with a 'long tail' of volunteers making only one or two contributions. Similarly, there is a huge gap between languages with the highest number of contributions and ones with the lowest number of contributions.
- **Toxic or offensive speech:** Our annotation process did not focus on flagging for toxic,harmful, or offensive speech, so it is possible that Global-MMLU contains some data that could be considered harmful. We believe this is of relatively low risk because of the nature of the original MMLU and the focus on examination material.
- **Region Category Assignment:** For the annotation of geographically sensitive questions, we classified regions into six geographic regions (Africa, Asia, Europe, North America, Oceania,and South America). However, based upon discussions we would going forward recommend switching to the taxonomy proposed by the World Bank which is more granular and includes separate designations for Central America and Sub-Saharan Africa.
- **Identifying cultural sensitivity does not guarantee cultural inclusion:** While Global-MMLU highlights important limitations in current datasets by identifying gaps in non-Western cultural representation. Future work must prioritize the integration of diverse culturally grounded knowledge to achieve true inclusivity and fairness in multilingual AI evaluation.
</details>
<br>
# Additional Information
## Provenance
- **Methods Used:** Professional annotations as well as crowd-sourced through volunteer annotations.
- **Methodology Details:** We collected cultural bias annotations as well as post-edits of translations for different mmlu questions.
- [Cultural Sensitivity Annotation Platform](https://huggingface.co/spaces/CohereForAI/MMLU-evaluation)
- [Translation Quality Annotation Platform](https://huggingface.co/spaces/CohereForAI/review-mmlu-translations)
- Dates of Collection: May 2024 - Aug 2024
## Dataset Version and Maintenance
- **Maintenance Status:** Actively Maintained
- **Version Details:**
- *Current version:* 1.0
- *Last Update:* 12/2024
- *First Release:* 12/2024
## Authorship
- **Publishing Organization:** [Cohere For AI](https://cohere.com/research)
- **Industry Type:** Not-for-profit - Tech
## Licensing Information
This dataset can be used for any purpose, under the terms of the [Apache 2.0](https://opensource.org/license/apache-2-0) License.
## Additional Details
For any additional details, please check our paper, [Global MMLU: Understanding and Addressing Cultural and Linguistic Biases in Multilingual Evaluation](https://arxiv.org/abs/2412.03304).
## Citation Information
```bibtex
@misc{singh2024globalmmluunderstandingaddressing,
title={Global MMLU: Understanding and Addressing Cultural and Linguistic Biases in Multilingual Evaluation},
author={Shivalika Singh and Angelika Romanou and Clémentine Fourrier and David I. Adelani and Jian Gang Ngui and Daniel Vila-Suero and Peerat Limkonchotiwat and Kelly Marchisio and Wei Qi Leong and Yosephine Susanto and Raymond Ng and Shayne Longpre and Wei-Yin Ko and Madeline Smith and Antoine Bosselut and Alice Oh and Andre F. T. Martins and Leshem Choshen and Daphne Ippolito and Enzo Ferrante and Marzieh Fadaee and Beyza Ermis and Sara Hooker},
year={2024},
eprint={2412.03304},
archivePrefix={arXiv},
primaryClass={cs.CL},
url={https://arxiv.org/abs/2412.03304},
}
``` |
gvecchio/MatSynth | gvecchio | "2024-04-16T13:44:10Z" | 19,433 | 47 | [
"task_categories:image-to-image",
"task_categories:unconditional-image-generation",
"task_categories:image-classification",
"task_categories:text-to-image",
"language:en",
"size_categories:1K<n<10K",
"arxiv:2401.06056",
"region:us",
"materials",
"pbr",
"4d",
"graphics",
"rendering",
"svbrdf",
"synthetic"
] | [
"image-to-image",
"unconditional-image-generation",
"image-classification",
"text-to-image"
] | "2023-11-29T11:24:57Z" | ---
language:
- en
size_categories:
- 1K<n<10K
task_categories:
- image-to-image
- unconditional-image-generation
- image-classification
- text-to-image
pretty_name: MatSynth
dataset_info:
features:
- name: name
dtype: string
- name: category
dtype:
class_label:
names:
'0': ceramic
'1': concrete
'2': fabric
'3': ground
'4': leather
'5': marble
'6': metal
'7': misc
'8': plaster
'9': plastic
'10': stone
'11': terracotta
'12': wood
- name: metadata
struct:
- name: authors
sequence: string
- name: category
dtype: string
- name: description
dtype: string
- name: height_factor
dtype: float32
- name: height_mean
dtype: float32
- name: license
dtype: string
- name: link
dtype: string
- name: maps
sequence: string
- name: method
dtype: string
- name: name
dtype: string
- name: physical_size
dtype: float32
- name: source
dtype: string
- name: stationary
dtype: bool
- name: tags
sequence: string
- name: version_date
dtype: string
- name: basecolor
dtype: image
- name: diffuse
dtype: image
- name: displacement
dtype: image
- name: height
dtype: image
- name: metallic
dtype: image
- name: normal
dtype: image
- name: opacity
dtype: image
- name: roughness
dtype: image
- name: specular
dtype: image
- name: blend_mask
dtype: image
splits:
- name: test
num_bytes: 7443356066.0
num_examples: 89
- name: train
num_bytes: 430581667965.1
num_examples: 5700
download_size: 440284274332
dataset_size: 438025024031.1
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
- split: train
path: data/train-*
tags:
- materials
- pbr
- 4d
- graphics
- rendering
- svbrdf
- synthetic
viewer: false
---
# MatSynth
MatSynth is a Physically Based Rendering (PBR) materials dataset designed for modern AI applications.
This dataset consists of over 4,000 ultra-high resolution, offering unparalleled scale, diversity, and detail.
Meticulously collected and curated, MatSynth is poised to drive innovation in material acquisition and generation applications, providing a rich resource for researchers, developers, and enthusiasts in computer graphics and related fields.
For further information, refer to our paper: ["MatSynth: A Modern PBR Materials Dataset"](https://arxiv.org/abs/2401.06056) available on arXiv.
<center>
<img src="https://gvecchio.com/matsynth/static/images/teaser.png" style="border-radius:10px">
</center>
## 🔍 Dataset Details
### Dataset Description
MatSynth is a new large-scale dataset comprising over 4,000 ultra-high resolution Physically Based Rendering (PBR) materials,
all released under permissive licensing.
All materials in the dataset are represented by a common set of maps (*Basecolor*, *Diffuse*, *Normal*, *Height*, *Roughness*, *Metallic*, *Specular* and, when useful, *Opacity*),
modelling both the reflectance and mesostructure of the material.
Each material in the dataset comes with rich metadata, including information on its origin, licensing details, category, tags, creation method,
and, when available, descriptions and physical size.
This comprehensive metadata facilitates precise material selection and usage, catering to the specific needs of users.
<center>
<img src="https://gvecchio.com/matsynth/static/images/data.png" style="border-radius:10px">
</center>
## 📂 Dataset Structure
The MatSynth dataset is divided into two splits: the test split, containing 89 materials, and the train split, consisting of 3,980 materials.
## 🔨 Dataset Creation
The MatSynth dataset is designed to support modern, learning-based techniques for a variety of material-related tasks including,
but not limited to, material acquisition, material generation and synthetic data generation e.g. for retrieval or segmentation.
### 🗃️ Source Data
The MatSynth dataset is the result of an extensively collection of data from multiple online sources operating under the CC0 and CC-BY licensing framework.
This collection strategy allows to capture a broad spectrum of materials,
from commonly used ones to more niche or specialized variants while guaranteeing that the data can be used for a variety of usecases.
Materials under CC0 license were collected from [AmbientCG](https://ambientcg.com/), [CGBookCase](https://www.cgbookcase.com/), [PolyHeaven](https://polyhaven.com/),
[ShateTexture](https://www.sharetextures.com/), and [TextureCan](https://www.texturecan.com/).
The dataset also includes limited set of materials from the artist [Julio Sillet](https://juliosillet.gumroad.com/), distributed under CC-BY license.
We collected over 6000 materials which we meticulously filter to keep only tileable, 4K materials.
This high resolution allows us to extract many different crops from each sample at different scale for augmentation.
Additionally, we discard blurry or low-quality materials (by visual inspection).
The resulting dataset consists of 3736 unique materials which we augment by blending semantically compatible materials (e.g.: snow over ground).
In total, our dataset contains 4069 unique 4K materials.
### ✒️ Annotations
The dataset is composed of material maps (Basecolor, Diffuse, Normal, Height, Roughness, Metallic, Specular and, when useful, opacity)
and associated renderings under varying environmental illuminations, and multi-scale crops.
We adopt the OpenGL standard for the Normal map (Y-axis pointing upward).
The Height map is given in a 16-bit single channel format for higher precision.
In addition to these maps, the dataset includes other annotations providing context to each material:
the capture method (photogrammetry, procedural generation, or approximation);
list of descriptive tags; source name (website); source link;
licensing and a timestamps for eventual future versioning.
For a subset of materials, when the information is available, we also provide the author name (387), text description (572) and a physical size,
presented as the length of the edge in centimeters (358).
## 🧑💻 Usage
MatSynth is accessible through the datasets python library.
Following a usage example:
```python
import torchvision.transforms.functional as TF
from datasets import load_dataset
from torch.utils.data import DataLoader
# image processing function
def process_img(x):
x = TF.resize(x, (1024, 1024))
x = TF.to_tensor(x)
return x
# item processing function
def process_batch(examples):
examples["basecolor"] = [process_img(x) for x in examples["basecolor"]]
return examples
# load the dataset in streaming mode
ds = load_dataset(
"gvecchio/MatSynth",
streaming = True,
)
# remove unwanted columns
ds = ds.remove_columns(["diffuse", "specular", "displacement", "opacity", "blend_mask"])
# or keep only specified columns
ds = ds.select_columns(["metadata", "basecolor"])
# shuffle data
ds = ds.shuffle(buffer_size=100)
# filter data matching a specific criteria, e.g.: only CC0 materials
ds = ds.filter(lambda x: x["metadata"]["license"] == "CC0")
# filter out data from Deschaintre et al. 2018
ds = ds.filter(lambda x: x["metadata"]["source"] != "deschaintre_2020")
# Set up processing
ds = ds.map(process_batch, batched=True, batch_size=8)
# set format for usage in torch
ds = ds.with_format("torch")
# iterate over the dataset
for x in ds:
print(x)
```
⚠️ **Note**: Streaming can be slow. We strongly suggest to cache data locally.
## 📜 Citation
```
@inproceedings{vecchio2023matsynth,
title={MatSynth: A Modern PBR Materials Dataset},
author={Vecchio, Giuseppe and Deschaintre, Valentin},
booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
year={2024}
}
```
If you use the data from Deschaintre et al. contained in this dataset, please also cite:
```
@article{deschaintre2018single,
title={Single-image svbrdf capture with a rendering-aware deep network},
author={Deschaintre, Valentin and Aittala, Miika and Durand, Fredo and Drettakis, George and Bousseau, Adrien},
journal={ACM Transactions on Graphics (ToG)},
volume={37},
number={4},
pages={1--15},
year={2018},
publisher={ACM New York, NY, USA}
}
``` |
jiachenlei/imagenet | jiachenlei | "2024-11-28T02:31:55Z" | 19,367 | 1 | [
"region:us"
] | null | "2024-11-27T02:39:50Z" | ---
configs:
- config_name: imagenet
data_files:
- split: train
path:
- "imagenet/train"
- split: val
path: "imagenet/val"
- config_name: imagenet256
data_files:
- split: train
path:
- "imagenet256/train"
- split: val
path: "imagenet256/val"
- config_name: imagenet_features
data_files:
- split: train
path:
- "imagenet_features/train"
- split: val
path: "imagenet_features/val"
--- |
alexandrainst/ftspeech | alexandrainst | "2024-09-04T15:01:21Z" | 19,357 | 3 | [
"task_categories:automatic-speech-recognition",
"language:da",
"license:other",
"size_categories:1M<n<10M",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"arxiv:2005.12368",
"region:us"
] | [
"automatic-speech-recognition"
] | "2023-05-11T13:08:57Z" | ---
dataset_info:
features:
- name: utterance_id
dtype: string
- name: speaker_gender
dtype: string
- name: sentence
dtype: string
- name: speaker_id
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
splits:
- name: train
num_bytes: 209434570129.268
num_examples: 995677
- name: dev_balanced
num_bytes: 579692770.829
num_examples: 2601
- name: dev_other
num_bytes: 1725502342.095
num_examples: 7595
- name: test_balanced
num_bytes: 1158740779.222
num_examples: 5534
- name: test_other
num_bytes: 1254987645.527
num_examples: 5837
download_size: 101776974871
dataset_size: 214153493666.941
task_categories:
- automatic-speech-recognition
language:
- da
pretty_name: FT Speech
size_categories:
- 100K<n<1M
license: other
---
# Dataset Card for FT Speech
## Dataset Description
- **Repository:** <https://ftspeech.github.io/>
- **Point of Contact:** [Dan Saattrup Nielsen](mailto:[email protected])
- **Size of downloaded dataset files:** 101.78 GB
- **Size of the generated dataset:** 214.15 GB
- **Total amount of disk used:** 315.93 GB
### Dataset Summary
This dataset is an upload of the [FT Speech dataset](https://ftspeech.github.io/).
The training, validation and test splits are the original ones.
### Supported Tasks and Leaderboards
Training automatic speech recognition is the intended task for this dataset. No leaderboard is active at this point.
### Languages
The dataset is available in Danish (`da`).
## Dataset Structure
### Data Instances
- **Size of downloaded dataset files:** 101.78 GB
- **Size of the generated dataset:** 214.15 GB
- **Total amount of disk used:** 315.93 GB
An example from the dataset looks as follows.
```
{
'utterance_id': 'S001_20151_M012_P00034-2',
'speaker_gender': 'F',
'sentence': 'alle de fem tekniske justeringer der er en del af lovforslaget',
'speaker_id': 'S001',
'audio': {
'path': 'S001_20151_M012_P00034-2.wav',
'array': array([-3.75366211e-03, -5.27954102e-03, -3.87573242e-03, ...,
9.15527344e-05, -1.52587891e-04, 5.79833984e-04]),
'sampling_rate': 16000
}
}
```
### Data Fields
The data fields are the same among all splits.
- `utterance_id`: a `string` feature.
- `speaker_gender`: a `string` feature.
- `sentence`: a `string` feature.
- `speaker_id`: a `string` feature.
- `audio`: an `Audio` feature.
### Dataset Statistics
There are 995,677 samples in the training split, 2,601 in the dev_balanced split, 7,595 in the dev_other split, 5,534 in the test_balanced and 5,837 in the test_other split.
#### Speakers
There are 374 unique speakers in the training dataset, 20 unique speakers in the validation dataset and 40 unique speakers in the test dataset. None of the dataset splits share any speakers.
#### Gender Distribution

#### Transcription Length Distribution

## Dataset Creation
### Curation Rationale
There are not many large-scale ASR datasets in Danish.
### Source Data
The data constitutes public recordings of sessions from the Danish Parliament, along with manual transcriptions.
## Additional Information
### Dataset Curators
Andreas Kirkedal, Marija Stepanović and Barbara Plank curated the dataset as part of their FT Speech paper (see citation below).
[Dan Saattrup Nielsen](https://saattrupdan.github.io/) from the [The Alexandra
Institute](https://alexandra.dk/) reorganised the dataset and uploaded it to the Hugging Face Hub.
### Licensing Information
The dataset is licensed under [this custom license](https://www.ft.dk/da/aktuelt/tv-fra-folketinget/deling-og-rettigheder).
### Citation
```
@inproceedings{ftspeech,
author = {Kirkedal, Andreas and Stepanović, Marija and Plank, Barbara},
title = {{FT Speech: Danish Parliament Speech Corpus}},
booktitle = {Proc. Interspeech 2020},
year = {2020},
url = {arxiv.org/abs/2005.12368}
}
``` |
EleutherAI/race | EleutherAI | "2023-07-03T21:27:18Z" | 19,340 | 6 | [
"task_categories:multiple-choice",
"task_ids:multiple-choice-qa",
"annotations_creators:expert-generated",
"language_creators:found",
"multilinguality:monolingual",
"source_datasets:original",
"language:en",
"license:other",
"size_categories:1K<n<10K",
"modality:text",
"library:datasets",
"library:mlcroissant",
"arxiv:1704.04683",
"region:us"
] | [
"multiple-choice"
] | "2023-07-03T13:20:38Z" | ---
annotations_creators:
- expert-generated
language:
- en
language_creators:
- found
license:
- other
multilinguality:
- monolingual
pretty_name: RACE
size_categories:
- 10K<n<100K
source_datasets:
- original
task_categories:
- multiple-choice
task_ids:
- multiple-choice-qa
paperswithcode_id: race
dataset_info:
---
# "race" Grouped by Article
This is a modified version of https://huggingface.co/datasets/race that returns documents grouped by article context instead of by question. **Note:** This dataset currently only contains that test set of the ```high``` subset of the data.
The original readme is contained below.
# Dataset Card for "race"
## Table of Contents
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** [http://www.cs.cmu.edu/~glai1/data/race/](http://www.cs.cmu.edu/~glai1/data/race/)
- **Repository:** https://github.com/qizhex/RACE_AR_baselines
- **Paper:** [RACE: Large-scale ReAding Comprehension Dataset From Examinations](https://arxiv.org/abs/1704.04683)
- **Point of Contact:** [Guokun Lai](mailto:[email protected]), [Qizhe Xie](mailto:[email protected])
- **Size of downloaded dataset files:** 76.33 MB
- **Size of the generated dataset:** 349.46 MB
- **Total amount of disk used:** 425.80 MB
### Dataset Summary
RACE is a large-scale reading comprehension dataset with more than 28,000 passages and nearly 100,000 questions. The
dataset is collected from English examinations in China, which are designed for middle school and high school students.
The dataset can be served as the training and test sets for machine comprehension.
### Supported Tasks and Leaderboards
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Languages
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
## Dataset Structure
### Data Instances
#### all
- **Size of downloaded dataset files:** 25.44 MB
- **Size of the generated dataset:** 174.73 MB
- **Total amount of disk used:** 200.17 MB
An example of 'train' looks as follows.
```
This example was too long and was cropped:
{
"answer": "A",
"article": "\"Schoolgirls have been wearing such short skirts at Paget High School in Branston that they've been ordered to wear trousers ins...",
"example_id": "high132.txt",
"options": ["short skirts give people the impression of sexualisation", "short skirts are too expensive for parents to afford", "the headmaster doesn't like girls wearing short skirts", "the girls wearing short skirts will be at the risk of being laughed at"],
"question": "The girls at Paget High School are not allowed to wear skirts in that _ ."
}
```
#### high
- **Size of downloaded dataset files:** 25.44 MB
- **Size of the generated dataset:** 140.12 MB
- **Total amount of disk used:** 165.56 MB
An example of 'train' looks as follows.
```
This example was too long and was cropped:
{
"answer": "A",
"article": "\"Schoolgirls have been wearing such short skirts at Paget High School in Branston that they've been ordered to wear trousers ins...",
"example_id": "high132.txt",
"options": ["short skirts give people the impression of sexualisation", "short skirts are too expensive for parents to afford", "the headmaster doesn't like girls wearing short skirts", "the girls wearing short skirts will be at the risk of being laughed at"],
"question": "The girls at Paget High School are not allowed to wear skirts in that _ ."
}
```
#### middle
- **Size of downloaded dataset files:** 25.44 MB
- **Size of the generated dataset:** 34.61 MB
- **Total amount of disk used:** 60.05 MB
An example of 'train' looks as follows.
```
This example was too long and was cropped:
{
"answer": "B",
"article": "\"There is not enough oil in the world now. As time goes by, it becomes less and less, so what are we going to do when it runs ou...",
"example_id": "middle3.txt",
"options": ["There is more petroleum than we can use now.", "Trees are needed for some other things besides making gas.", "We got electricity from ocean tides in the old days.", "Gas wasn't used to run cars in the Second World War."],
"question": "According to the passage, which of the following statements is TRUE?"
}
```
### Data Fields
The data fields are the same among all splits.
#### all
- `example_id`: a `string` feature.
- `article`: a `string` feature.
- `answer`: a `string` feature.
- `question`: a `string` feature.
- `options`: a `list` of `string` features.
#### high
- `example_id`: a `string` feature.
- `article`: a `string` feature.
- `answer`: a `string` feature.
- `question`: a `string` feature.
- `options`: a `list` of `string` features.
#### middle
- `example_id`: a `string` feature.
- `article`: a `string` feature.
- `answer`: a `string` feature.
- `question`: a `string` feature.
- `options`: a `list` of `string` features.
### Data Splits
| name |train|validation|test|
|------|----:|---------:|---:|
|all |87866| 4887|4934|
|high |62445| 3451|3498|
|middle|25421| 1436|1436|
## Dataset Creation
### Curation Rationale
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
#### Who are the source language producers?
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Annotations
#### Annotation process
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
#### Who are the annotators?
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Personal and Sensitive Information
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Discussion of Biases
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Other Known Limitations
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
## Additional Information
### Dataset Curators
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Licensing Information
http://www.cs.cmu.edu/~glai1/data/race/
1. RACE dataset is available for non-commercial research purpose only.
2. All passages are obtained from the Internet which is not property of Carnegie Mellon University. We are not responsible for the content nor the meaning of these passages.
3. You agree not to reproduce, duplicate, copy, sell, trade, resell or exploit for any commercial purpose, any portion of the contexts and any portion of derived data.
4. We reserve the right to terminate your access to the RACE dataset at any time.
### Citation Information
```
@inproceedings{lai-etal-2017-race,
title = "{RACE}: Large-scale {R}e{A}ding Comprehension Dataset From Examinations",
author = "Lai, Guokun and
Xie, Qizhe and
Liu, Hanxiao and
Yang, Yiming and
Hovy, Eduard",
booktitle = "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/D17-1082",
doi = "10.18653/v1/D17-1082",
pages = "785--794",
}
```
### Contributions
Thanks to [@abarbosa94](https://github.com/abarbosa94), [@patrickvonplaten](https://github.com/patrickvonplaten), [@lewtun](https://github.com/lewtun), [@thomwolf](https://github.com/thomwolf), [@mariamabarham](https://github.com/mariamabarham) for adding this dataset. |
agents-course/certificates | agents-course | "2025-04-12T00:52:35Z" | 19,322 | 41 | [
"license:apache-2.0",
"region:us"
] | null | "2025-02-06T08:17:59Z" | ---
license: apache-2.0
---
|
Graphcore/wikipedia-bert-128 | Graphcore | "2022-09-07T14:42:32Z" | 19,254 | 1 | [
"language:en",
"license:cc-by-sa-3.0",
"size_categories:10M<n<100M",
"format:parquet",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2022-03-02T23:29:22Z" | ---
language:
- en
license:
- cc-by-sa-3.0
--- |
lmms-lab/Video-MME | lmms-lab | "2024-07-04T08:14:20Z" | 19,248 | 39 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"modality:video",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-06-07T12:06:37Z" | ---
dataset_info:
config_name: videomme
features:
- name: video_id
dtype: string
- name: duration
dtype: string
- name: domain
dtype: string
- name: sub_category
dtype: string
- name: url
dtype: string
- name: videoID
dtype: string
- name: question_id
dtype: string
- name: task_type
dtype: string
- name: question
dtype: string
- name: options
sequence: string
- name: answer
dtype: string
splits:
- name: test
num_bytes: 1003241.0
num_examples: 2700
download_size: 405167
dataset_size: 1003241.0
configs:
- config_name: videomme
data_files:
- split: test
path: videomme/test-*
---
|
neashton/ahmedml | neashton | "2025-04-08T13:29:43Z" | 19,223 | 2 | [
"license:cc-by-sa-4.0",
"arxiv:2407.20801",
"doi:10.57967/hf/5002",
"region:us"
] | null | "2025-02-10T11:19:24Z" | ---
license: cc-by-sa-4.0
---
AhmedML: High-Fidelity Computational Fluid Dynamics dataset for incompressible, low-speed bluff body aerodynamics
-------
Contact:
----------
Neil Ashton (NVIDIA) - [email protected]
website:
----------
https://caemldatasets.org
Summary:
-------
This dataset contains 500 different geometric variations of the Ahmed Car Body - a simplified car-like shape that exhibits many of the flow topologies that are present on bluff bodies such as road vehicles. The dataset contains a wide range of geometries that exhibit fundamental flow physics such as geometry and pressure-induced flow separation of flows as well as 3D vortical structures. Each variation of the Ahmed car body were run using a time-accurate hybrid Reynolds-Averaged Navier-Stokes (RANS) - Large-Eddy Simulation (LES) turbulence modelling approach using the open-source CFD code OpenFOAM. The dataset contains both surface boundary, 3D volume, geometry STL and forces/moments in open-source formats (.vtu,.vtp).
CFD Solver:
----------
All cases were run using the open-source finite-volume code OpenFOAM v2212. Each case was run transiently for approximately 80 convective time units (CTU) on meshes of approximately 20M cells. Please see the paper for full details on the code and validation:
How to cite this dataset:
----------------
In order to cite the use of this dataset please cite the paper below which contains full details on the dataset. It can be found here: https://arxiv.org/abs/2407.20801
@article{ashton2024ahmed,
title = {{AhmedML: High-Fidelity Computational Fluid Dynamics dataset for incompressible, low-speed bluff body aerodynamics}},
year = {2024},
journal = {arxiv.org},
author = {Ashton, Neil and Maddix, Danielle and Gundry, Samuel and Shabestari, Parisa}
}
Files:
-------
Each folder (e.g run_1,run_2...run_"i" etc) corresponds to a different geometry that contains the following files where "i" is the run number:
* ahmed_i.stl : geometry stl (~5mb):
* geo_parameters_1.csv (missing run 500): parameters that define the geometry
* boundary_i.vtp : Boundary VTP (~500mb)
* volume_i.vtu : Volume field VTU (~5GB)
* force_mom_i.csv : forces (Cd,Cl) time-averaged with constant reference area
* force_mom_varref_i.csv : forces (Cd,Cl) time-averaged with varying reference area
* slices : folder containing .vtp slices in x,y,z that contain flow-field variables
* images : (folder) that contains images of the following variables (CpT, UxMean) for slices of the domain in the X,Y & Z locations.
In addition we provide:
* force_mom_all.csv : run, cd,cl for all runs in a single file
* force_mom_varref_all.csv : run, cd,cl for all runs in a single file with varying reference area
* geo_parameters_all.csv : all the geometry parameters for each run inside a single file
* ahmedml.slvs : SolveSpace input file to create the parametric geometries
* stl : folder containing stl files that were used as inputs to the OpenFOAM process
* openfoam-casesetup.tgz : complete OpenFOAM setup that can be used to extend or reproduce the dataset
* validation : folder containing full outputs from all four mesh levels that were used to validate the methodology
Downloads
--------------
The dataset is now available on HuggingFace. Below are some examples of how to download all or selected parts of the dataset. Please refer to the HuggingFace documentation for other ways to accessing the dataset and building workflows.
Example 1: Download all files (~2TB)
--------
Please note you'll need to have git lfs installed first, then you can run the following command:
```
git clone [email protected]:datasets/neashton/ahmedml
```
Example 2: only download select files (STL,images & force and moments):
---------
Create the following bash script that could be adapted to loop through only select runs or to change to download different files e.g boundary/volume.
```
#!/bin/bash
# Set the paths
HF_OWNER="neashton"
HF_PREFIX="ahmedml"
# Set the local directory to download the files
LOCAL_DIR="./ahmed_data"
# Create the local directory if it doesn't exist
mkdir -p "$LOCAL_DIR"
# Loop through the run folders from 1 to 500
for i in $(seq 1 500); do
RUN_DIR="run_$i"
RUN_LOCAL_DIR="$LOCAL_DIR/$RUN_DIR"
# Create the run directory if it doesn't exist
mkdir -p "$RUN_LOCAL_DIR"
# Download the ahmed_i.stl file
wget "https://huggingface.co/datasets/${HF_OWNER}/${HF_PREFIX}/resolve/main/$RUN_DIR/ahmed_$i.stl" -O "$RUN_LOCAL_DIR/ahmed_$i.stl"
# Download the force_mom_i.csv file
wget "https://huggingface.co/datasets/${HF_OWNER}/${HF_PREFIX}/resolve/main/$RUN_DIR/force_mom_$i.csv" -O "$RUN_LOCAL_DIR/force_mom_$i.csv"
done
```
Acknowledgements
-----------
* OpenFOAM solver and workflow development by Neil Ashton (Amazon Web Services, now NVIDIA)
* Geometry parameterization by Samuel Gundry (Amazon Web Services) and Parisa Shabestari (Amazon Web Services)
* Guidance on dataset preparation for ML by Danielle Madix (Amazon Web Services)
* Simulation runs, HPC setup and dataset preparation by Neil Ashton (Amazon Web Services, now NVIDIA)
License
----
This dataset is provided under the CC BY SA 4.0 license, please see LICENSE.txt for full license text.
version history:
---------------
* 15/02/2025 - files uploaded to HuggingFace
* 12/11/2024 - added validation folder that contains the full output from all four mesh levels that were used to validate the methodology used.
* 04/08/2024 - updates to the file description and arxiv paper
* 05/06/2024 - global forces/geo added for all runs
* 01/05/2024 - force/moments corrected (prior version had incorrect Cs data)
* 18/04/2024 - draft version produced
|
AstroMLCore/AstroM3Dataset | AstroMLCore | "2025-03-27T21:43:24Z" | 18,918 | 0 | [
"license:mit",
"size_categories:10K<n<100K",
"arxiv:2411.08842",
"region:us",
"astronomy",
"multimodal",
"classification"
] | null | "2024-11-06T20:51:40Z" | ---
license: mit
pretty_name: AstroM3Dataset
size_categories:
- 10K<n<100K
tags:
- astronomy
- multimodal
- classification
arxiv:
- arXiv:2411.08842
---
# AstroM3Dataset
## Description
AstroM3Dataset is a time-series astronomy dataset containing photometry, spectra, and metadata features for variable stars.
The dataset was constructed by cross-matching publicly available astronomical datasets,
primarily from the ASAS-SN (Shappee et al. 2014) variable star catalog (Jayasinghe et al. 2019)
and LAMOST spectroscopic survey (Cui et al. 2012), along with data from
WISE (Wright et al. 2010), GALEX (Morrissey et al. 2007), 2MASS (Skrutskie et al. 2006) and Gaia EDR3 (Gaia Collaboration et al. 2021).
The dataset includes multiple subsets (`full`, `sub10`, `sub25`, `sub50`) and supports different random seeds (`42`, `66`, `0`, `12`, `123`).
Each sample consists of:
- **Photometry**: Light curve data of shape `(N, 3)` (time, flux, flux\_error).
- **Spectra**: Spectra observations of shape `(M, 3)` (wavelength, flux, flux\_error).
- **Metadata**:
- `meta_cols`: Dictionary of metadata feature names and values.
- `photo_cols`: Dictionary of photometric feature names and values.
- **Label**: The class name as a string.
## Corresponding paper and code
- Paper: [AstroM<sup>3</sup>: A self-supervised multimodal model for astronomy](https://arxiv.org/abs/2411.08842)
- Code Repository: [GitHub: AstroM<sup>3</sup>](https://github.com/MeriDK/AstroM3/)
- Processed Data: [AstroMLCore/AstroM3Processed](https://huggingface.co/datasets/AstroMLCore/AstroM3Processed/)
**Note:** The processed dataset `AstroM3Processed` is created from the original dataset `AstroM3Dataset`
by using [preprocess.py](https://huggingface.co/datasets/AstroMLCore/AstroM3Dataset/blob/main/preprocess.py)
---
## Subsets and Seeds
AstroM3Dataset is available in different subset sizes:
- `full`: Entire dataset
- `sub50`: 50% subset
- `sub25`: 25% subset
- `sub10`: 10% subset
Each subset is sampled from the respective train, validation, and test splits of the full dataset.
For reproducibility, each subset is provided with different random seeds:
- `42`, `66`, `0`, `12`, `123`
## Data Organization
The dataset is organized as follows:
```
AstroM3Dataset/
├── photometry.zip # Contains all photometry light curves
├── utils/
│ ├── parallelzipfile.py # Zip file reader to open photometry.zip
├── spectra/ # Spectra files organized by class
│ ├── EA/
│ │ ├── file1.dat
│ │ ├── file2.dat
│ │ ├── ...
│ ├── EW/
│ ├── SR/
│ ├── ...
├── splits/ # Train/val/test splits for each subset and seed
│ ├── full/
│ │ ├── 42/
│ │ │ ├── train.csv
│ │ │ ├── val.csv
│ │ │ ├── test.csv
│ │ │ ├── info.json # Contains feature descriptions and preprocessing info
│ │ ├── 66/
│ │ ├── 0/
│ │ ├── 12/
│ │ ├── 123/
│ ├── sub10/
│ ├── sub25/
│ ├── sub50/
│── AstroM3Dataset.py # Hugging Face dataset script
```
## Usage
To load the dataset using the Hugging Face `datasets` library:
```python
from datasets import load_dataset
# Load the default full dataset with seed 42
dataset = load_dataset("AstroMLCore/AstroM3Dataset", trust_remote_code=True)
```
The default configuration is **full_42** (entire dataset with seed 42).
To load a specific subset and seed, use {subset}_{seed} as the name:
```python
from datasets import load_dataset
# Load the 25% subset sampled using seed 123
dataset = load_dataset("AstroMLCore/AstroM3Dataset", name="sub25_123", trust_remote_code=True)
```
---
## Citation
🤗 If you find this dataset usefull, please cite our paper 🤗
```bibtex
@article{rizhko2024astrom,
title={AstroM $\^{} 3$: A self-supervised multimodal model for astronomy},
author={Rizhko, Mariia and Bloom, Joshua S},
journal={arXiv preprint arXiv:2411.08842},
year={2024}
}
```
## References
1. Shappee, B. J., Prieto, J. L., Grupe, D., et al. 2014, ApJ, 788, 48, doi: 10.1088/0004-637X/788/1/48
2. Jayasinghe, T., Stanek, K. Z., Kochanek, C. S., et al. 2019, MNRAS, 486, 1907, doi: 10.1093/mnras/stz844
3. Cui, X.-Q., Zhao, Y.-H., Chu, Y.-Q., et al. 2012, Research in Astronomy and Astrophysics, 12, 1197, doi: 10.1088/1674-4527/12/9/003
4. Wright, E. L., Eisenhardt, P. R. M., Mainzer, A. K., et al. 2010, AJ, 140, 1868, doi: 10.1088/0004-6256/140/6/1868
5. Morrissey, P., Conrow, T., Barlow, T. A., et al. 2007, ApJS, 173, 682, doi: 10.1086/520512
6. Skrutskie, M. F., Cutri, R. M., Stiening, R., et al. 2006, AJ, 131, 1163, doi: 10.1086/498708
7. Gaia Collaboration, Brown, A. G. A., et al. 2021, AAP, 649, A1, doi: 10.1051/0004-6361/202039657 |
TIGER-Lab/OmniEdit-Filtered-1.2M | TIGER-Lab | "2024-12-06T02:57:59Z" | 18,883 | 85 | [
"language:en",
"license:mit",
"size_categories:1M<n<10M",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"arxiv:2411.07199",
"region:us",
"image"
] | null | "2024-11-11T07:40:47Z" | ---
language:
- en
license: mit
size_categories:
- 1M<n<10M
pretty_name: OmniEdit
dataset_info:
features:
- name: omni_edit_id
dtype: string
- name: task
dtype: string
- name: src_img
dtype: image
- name: edited_img
dtype: image
- name: edited_prompt_list
sequence: string
- name: width
dtype: int64
- name: height
dtype: int64
- name: sc_score_1
dtype: int64
- name: sc_score_2
dtype: int64
- name: sc_reasoning
dtype: string
- name: pq_score
dtype: int64
- name: pq_reasoning
dtype: string
- name: o_score
dtype: float64
splits:
- name: dev
num_bytes: 1547839078.0
num_examples: 700
- name: train
num_bytes: 2852916299223.88
num_examples: 1202797
download_size: 2978259415518
dataset_size: 2854464138301.88
configs:
- config_name: default
data_files:
- split: dev
path: data/dev-*
- split: train
path: data/train-*
tags:
- image
---
## OmniEdit
In this paper, we present OMNI-EDIT, which is an omnipotent editor to handle seven different image editing tasks with any aspect ratio seamlessly. Our contribution is in four folds: (1) OMNI-EDIT is trained by utilizing the supervision
from seven different specialist models to ensure task coverage. (2) we utilize importance sampling based on the scores provided by large multimodal models (like GPT-4o) instead of CLIP-score to improve the data quality.
[📃Paper](https://tiger-ai-lab.github.io/OmniEdit/) | [🌐Website](https://tiger-ai-lab.github.io/OmniEdit/) | [💻Github](https://github.com/TIGER-AI-Lab/OmniEdit) | [📚Dataset](https://huggingface.co/datasets/TIGER-Lab/OmniEdit-Filtered-1.2M)
## Dataset Columns
The dataset contains the following columns:
- src, edited_img: they are the source and edited images.
- edited_prompt_list: they are the short and long editing instructions.
- task: this indicates the editing task, which has seven categories like addition, removal, background, environment, style, etc.
- sc_score_1 and sc_score_1: semantic consistency score assigned by our quality rater.
- pq_score: the perceptual quality score assigned by our quality rater.
- o_score: the overall score, which is the weighted average of sc and pq score.
- *_reasoning: the rationale for assigning these scores.
## Data Pipeline
We synthesize the large scale dataset through specialist distillation. Our synthesis pipeline is depicted in
<p align="center">
<img src="synthesis.png" width="800">
</p>
Our released version contains 1.2M pairs covering seven different skills like addition, swaping, removal, attribute modification, background change, environment change and sytle transfer. The dataset has been filtered with VIEScore.
## Comparison with Others
Our dataset has the most diverse, highest-quality image editing pairs of any resolution.
<p align="center">
<img src="comparison.png" width="800">
</p>
## Citation
If you find our paper useful, please cite us with
```
@article{wei2024omniedit,
title={OmniEdit: Building Image Editing Generalist Models Through Specialist Supervision},
author={Wei, Cong and Xiong, Zheyang and Ren, Weiming and Du, Xinrun and Zhang, Ge and Chen, Wenhu},
journal={arXiv preprint arXiv:2411.07199},
year={2024}
}
```
|
dair-ai/emotion | dair-ai | "2024-08-08T06:10:47Z" | 18,876 | 351 | [
"task_categories:text-classification",
"task_ids:multi-class-classification",
"annotations_creators:machine-generated",
"language_creators:machine-generated",
"multilinguality:monolingual",
"source_datasets:original",
"language:en",
"license:other",
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us",
"emotion-classification"
] | [
"text-classification"
] | "2022-03-02T23:29:22Z" | ---
annotations_creators:
- machine-generated
language_creators:
- machine-generated
language:
- en
license:
- other
multilinguality:
- monolingual
size_categories:
- 10K<n<100K
source_datasets:
- original
task_categories:
- text-classification
task_ids:
- multi-class-classification
paperswithcode_id: emotion
pretty_name: Emotion
tags:
- emotion-classification
dataset_info:
- config_name: split
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': sadness
'1': joy
'2': love
'3': anger
'4': fear
'5': surprise
splits:
- name: train
num_bytes: 1741533
num_examples: 16000
- name: validation
num_bytes: 214695
num_examples: 2000
- name: test
num_bytes: 217173
num_examples: 2000
download_size: 1287193
dataset_size: 2173401
- config_name: unsplit
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': sadness
'1': joy
'2': love
'3': anger
'4': fear
'5': surprise
splits:
- name: train
num_bytes: 45444017
num_examples: 416809
download_size: 26888538
dataset_size: 45444017
configs:
- config_name: split
data_files:
- split: train
path: split/train-*
- split: validation
path: split/validation-*
- split: test
path: split/test-*
default: true
- config_name: unsplit
data_files:
- split: train
path: unsplit/train-*
train-eval-index:
- config: default
task: text-classification
task_id: multi_class_classification
splits:
train_split: train
eval_split: test
col_mapping:
text: text
label: target
metrics:
- type: accuracy
name: Accuracy
- type: f1
name: F1 macro
args:
average: macro
- type: f1
name: F1 micro
args:
average: micro
- type: f1
name: F1 weighted
args:
average: weighted
- type: precision
name: Precision macro
args:
average: macro
- type: precision
name: Precision micro
args:
average: micro
- type: precision
name: Precision weighted
args:
average: weighted
- type: recall
name: Recall macro
args:
average: macro
- type: recall
name: Recall micro
args:
average: micro
- type: recall
name: Recall weighted
args:
average: weighted
---
# Dataset Card for "emotion"
## Table of Contents
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** [https://github.com/dair-ai/emotion_dataset](https://github.com/dair-ai/emotion_dataset)
- **Repository:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
- **Paper:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
- **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
- **Size of downloaded dataset files:** 16.13 MB
- **Size of the generated dataset:** 47.62 MB
- **Total amount of disk used:** 63.75 MB
### Dataset Summary
Emotion is a dataset of English Twitter messages with six basic emotions: anger, fear, joy, love, sadness, and surprise. For more detailed information please refer to the paper.
### Supported Tasks and Leaderboards
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Languages
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
## Dataset Structure
### Data Instances
An example looks as follows.
```
{
"text": "im feeling quite sad and sorry for myself but ill snap out of it soon",
"label": 0
}
```
### Data Fields
The data fields are:
- `text`: a `string` feature.
- `label`: a classification label, with possible values including `sadness` (0), `joy` (1), `love` (2), `anger` (3), `fear` (4), `surprise` (5).
### Data Splits
The dataset has 2 configurations:
- split: with a total of 20_000 examples split into train, validation and split
- unsplit: with a total of 416_809 examples in a single train split
| name | train | validation | test |
|---------|-------:|-----------:|-----:|
| split | 16000 | 2000 | 2000 |
| unsplit | 416809 | n/a | n/a |
## Dataset Creation
### Curation Rationale
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
#### Who are the source language producers?
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Annotations
#### Annotation process
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
#### Who are the annotators?
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Personal and Sensitive Information
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Discussion of Biases
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Other Known Limitations
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
## Additional Information
### Dataset Curators
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Licensing Information
The dataset should be used for educational and research purposes only.
### Citation Information
If you use this dataset, please cite:
```
@inproceedings{saravia-etal-2018-carer,
title = "{CARER}: Contextualized Affect Representations for Emotion Recognition",
author = "Saravia, Elvis and
Liu, Hsien-Chi Toby and
Huang, Yen-Hao and
Wu, Junlin and
Chen, Yi-Shin",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing",
month = oct # "-" # nov,
year = "2018",
address = "Brussels, Belgium",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/D18-1404",
doi = "10.18653/v1/D18-1404",
pages = "3687--3697",
abstract = "Emotions are expressed in nuanced ways, which varies by collective or individual experiences, knowledge, and beliefs. Therefore, to understand emotion, as conveyed through text, a robust mechanism capable of capturing and modeling different linguistic nuances and phenomena is needed. We propose a semi-supervised, graph-based algorithm to produce rich structural descriptors which serve as the building blocks for constructing contextualized affect representations from text. The pattern-based representations are further enriched with word embeddings and evaluated through several emotion recognition tasks. Our experimental results demonstrate that the proposed method outperforms state-of-the-art techniques on emotion recognition tasks.",
}
```
### Contributions
Thanks to [@lhoestq](https://github.com/lhoestq), [@thomwolf](https://github.com/thomwolf), [@lewtun](https://github.com/lewtun) for adding this dataset.
|
HuggingFaceH4/ultrachat_200k | HuggingFaceH4 | "2024-10-16T11:52:27Z" | 18,813 | 526 | [
"task_categories:text-generation",
"language:en",
"license:mit",
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"arxiv:2305.14233",
"region:us"
] | [
"text-generation"
] | "2023-10-24T08:24:57Z" | ---
language:
- en
license: mit
size_categories:
- 100K<n<1M
task_categories:
- text-generation
pretty_name: UltraChat 200k
configs:
- config_name: default
data_files:
- split: train_sft
path: data/train_sft-*
- split: test_sft
path: data/test_sft-*
- split: train_gen
path: data/train_gen-*
- split: test_gen
path: data/test_gen-*
dataset_info:
features:
- name: prompt
dtype: string
- name: prompt_id
dtype: string
- name: messages
list:
- name: content
dtype: string
- name: role
dtype: string
splits:
- name: train_sft
num_bytes: 1397058554
num_examples: 207865
- name: test_sft
num_bytes: 154695659
num_examples: 23110
- name: train_gen
num_bytes: 1347396812
num_examples: 256032
- name: test_gen
num_bytes: 148276089
num_examples: 28304
download_size: 1624049723
dataset_size: 3047427114
---
# Dataset Card for UltraChat 200k
## Dataset Description
This is a heavily filtered version of the [UltraChat](https://github.com/thunlp/UltraChat) dataset and was used to train [Zephyr-7B-β](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta), a state of the art 7b chat model.
The original datasets consists of 1.4M dialogues generated by ChatGPT and spanning a wide range of topics. To create `UltraChat 200k`, we applied the following logic:
- Selection of a subset of data for faster supervised fine tuning.
- Truecasing of the dataset, as we observed around 5% of the data contained grammatical errors like "Hello. how are you?" instead of "Hello. How are you?"
- Removal of dialogues where the assistant replies with phrases like "I do not have emotions" or "I don't have opinions", even for fact-based prompts that don't involve either.
## Dataset Structure
The dataset has four splits, suitable for:
* Supervised fine-tuning (`sft`).
* Generation ranking (`gen`) via techniques like rejection sampling or PPO.
The number of examples per split is shown as follows:
| train_sft | test_sft | train_gen | test_gen |
|:-------:|:-----------:|:-----:| :-----:|
| 207865 | 23110 | 256032 | 28304 |
The dataset is stored in parquet format with each entry using the following schema:
```
{
"prompt": "Create a fully-developed protagonist who is challenged to survive within a dystopian society under the rule of a tyrant. ...",
"messages":[
{
"content": "Create a fully-developed protagonist who is challenged to survive within a dystopian society under the rule of a tyrant. ...",
"role": "user"
},
{
"content": "Name: Ava\n\n Ava was just 16 years old when the world as she knew it came crashing down. The government had collapsed, leaving behind a chaotic and lawless society. ...",
"role": "assistant"
},
{
"content": "Wow, Ava's story is so intense and inspiring! Can you provide me with more details. ...",
"role": "user"
},
{
"content": "Certainly! ....",
"role": "assistant"
},
{
"content": "That's really interesting! I would love to hear more...",
"role": "user"
}
{
"content": "Certainly! ....",
"role": "assistant"
},
],
"prompt_id": "d938b65dfe31f05f80eb8572964c6673eddbd68eff3db6bd234d7f1e3b86c2af"
}
```
## Citation
If you find this dataset is useful in your work, please cite the original UltraChat dataset:
```
@misc{ding2023enhancing,
title={Enhancing Chat Language Models by Scaling High-quality Instructional Conversations},
author={Ning Ding and Yulin Chen and Bokai Xu and Yujia Qin and Zhi Zheng and Shengding Hu and Zhiyuan Liu and Maosong Sun and Bowen Zhou},
year={2023},
eprint={2305.14233},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
``` |
TrevorDohm/Stack_Tokenized | TrevorDohm | "2024-04-16T00:19:53Z" | 18,811 | 0 | [
"task_categories:text-generation",
"language_creators:crowdsourced",
"language_creators:expert-generated",
"multilinguality:multilingual",
"language:code",
"license:other",
"size_categories:100M<n<1B",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [
"text-generation"
] | "2024-03-10T05:49:00Z" | ---
annotations_creators: []
language_creators:
- crowdsourced
- expert-generated
language:
- code
license:
- other
multilinguality:
- multilingual
pretty_name: The-Stack-Tokenized
size_categories:
- unknown
source_datasets: []
task_categories:
- text-generation
task_ids: []
---
|
ceval/ceval-exam | ceval | "2025-03-25T13:18:03Z" | 18,647 | 259 | [
"task_categories:text-classification",
"task_categories:multiple-choice",
"task_categories:question-answering",
"language:zh",
"license:cc-by-nc-sa-4.0",
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"arxiv:2305.08322",
"region:us"
] | [
"text-classification",
"multiple-choice",
"question-answering"
] | "2023-05-16T01:47:44Z" | ---
license: cc-by-nc-sa-4.0
task_categories:
- text-classification
- multiple-choice
- question-answering
language:
- zh
pretty_name: C-Eval
size_categories:
- 10K<n<100K
configs:
- config_name: accountant
data_files:
- split: test
path: accountant/test-*
- split: val
path: accountant/val-*
- split: dev
path: accountant/dev-*
- config_name: advanced_mathematics
data_files:
- split: test
path: advanced_mathematics/test-*
- split: val
path: advanced_mathematics/val-*
- split: dev
path: advanced_mathematics/dev-*
- config_name: art_studies
data_files:
- split: test
path: art_studies/test-*
- split: val
path: art_studies/val-*
- split: dev
path: art_studies/dev-*
- config_name: basic_medicine
data_files:
- split: test
path: basic_medicine/test-*
- split: val
path: basic_medicine/val-*
- split: dev
path: basic_medicine/dev-*
- config_name: business_administration
data_files:
- split: test
path: business_administration/test-*
- split: val
path: business_administration/val-*
- split: dev
path: business_administration/dev-*
- config_name: chinese_language_and_literature
data_files:
- split: test
path: chinese_language_and_literature/test-*
- split: val
path: chinese_language_and_literature/val-*
- split: dev
path: chinese_language_and_literature/dev-*
- config_name: civil_servant
data_files:
- split: test
path: civil_servant/test-*
- split: val
path: civil_servant/val-*
- split: dev
path: civil_servant/dev-*
- config_name: clinical_medicine
data_files:
- split: test
path: clinical_medicine/test-*
- split: val
path: clinical_medicine/val-*
- split: dev
path: clinical_medicine/dev-*
- config_name: college_chemistry
data_files:
- split: test
path: college_chemistry/test-*
- split: val
path: college_chemistry/val-*
- split: dev
path: college_chemistry/dev-*
- config_name: college_economics
data_files:
- split: test
path: college_economics/test-*
- split: val
path: college_economics/val-*
- split: dev
path: college_economics/dev-*
- config_name: college_physics
data_files:
- split: test
path: college_physics/test-*
- split: val
path: college_physics/val-*
- split: dev
path: college_physics/dev-*
- config_name: college_programming
data_files:
- split: test
path: college_programming/test-*
- split: val
path: college_programming/val-*
- split: dev
path: college_programming/dev-*
- config_name: computer_architecture
data_files:
- split: test
path: computer_architecture/test-*
- split: val
path: computer_architecture/val-*
- split: dev
path: computer_architecture/dev-*
- config_name: computer_network
data_files:
- split: test
path: computer_network/test-*
- split: val
path: computer_network/val-*
- split: dev
path: computer_network/dev-*
- config_name: discrete_mathematics
data_files:
- split: test
path: discrete_mathematics/test-*
- split: val
path: discrete_mathematics/val-*
- split: dev
path: discrete_mathematics/dev-*
- config_name: education_science
data_files:
- split: test
path: education_science/test-*
- split: val
path: education_science/val-*
- split: dev
path: education_science/dev-*
- config_name: electrical_engineer
data_files:
- split: test
path: electrical_engineer/test-*
- split: val
path: electrical_engineer/val-*
- split: dev
path: electrical_engineer/dev-*
- config_name: environmental_impact_assessment_engineer
data_files:
- split: test
path: environmental_impact_assessment_engineer/test-*
- split: val
path: environmental_impact_assessment_engineer/val-*
- split: dev
path: environmental_impact_assessment_engineer/dev-*
- config_name: fire_engineer
data_files:
- split: test
path: fire_engineer/test-*
- split: val
path: fire_engineer/val-*
- split: dev
path: fire_engineer/dev-*
- config_name: high_school_biology
data_files:
- split: test
path: high_school_biology/test-*
- split: val
path: high_school_biology/val-*
- split: dev
path: high_school_biology/dev-*
- config_name: high_school_chemistry
data_files:
- split: test
path: high_school_chemistry/test-*
- split: val
path: high_school_chemistry/val-*
- split: dev
path: high_school_chemistry/dev-*
- config_name: high_school_chinese
data_files:
- split: test
path: high_school_chinese/test-*
- split: val
path: high_school_chinese/val-*
- split: dev
path: high_school_chinese/dev-*
- config_name: high_school_geography
data_files:
- split: test
path: high_school_geography/test-*
- split: val
path: high_school_geography/val-*
- split: dev
path: high_school_geography/dev-*
- config_name: high_school_history
data_files:
- split: test
path: high_school_history/test-*
- split: val
path: high_school_history/val-*
- split: dev
path: high_school_history/dev-*
- config_name: high_school_mathematics
data_files:
- split: test
path: high_school_mathematics/test-*
- split: val
path: high_school_mathematics/val-*
- split: dev
path: high_school_mathematics/dev-*
- config_name: high_school_physics
data_files:
- split: test
path: high_school_physics/test-*
- split: val
path: high_school_physics/val-*
- split: dev
path: high_school_physics/dev-*
- config_name: high_school_politics
data_files:
- split: test
path: high_school_politics/test-*
- split: val
path: high_school_politics/val-*
- split: dev
path: high_school_politics/dev-*
- config_name: ideological_and_moral_cultivation
data_files:
- split: test
path: ideological_and_moral_cultivation/test-*
- split: val
path: ideological_and_moral_cultivation/val-*
- split: dev
path: ideological_and_moral_cultivation/dev-*
- config_name: law
data_files:
- split: test
path: law/test-*
- split: val
path: law/val-*
- split: dev
path: law/dev-*
- config_name: legal_professional
data_files:
- split: test
path: legal_professional/test-*
- split: val
path: legal_professional/val-*
- split: dev
path: legal_professional/dev-*
- config_name: logic
data_files:
- split: test
path: logic/test-*
- split: val
path: logic/val-*
- split: dev
path: logic/dev-*
- config_name: mao_zedong_thought
data_files:
- split: test
path: mao_zedong_thought/test-*
- split: val
path: mao_zedong_thought/val-*
- split: dev
path: mao_zedong_thought/dev-*
- config_name: marxism
data_files:
- split: test
path: marxism/test-*
- split: val
path: marxism/val-*
- split: dev
path: marxism/dev-*
- config_name: metrology_engineer
data_files:
- split: test
path: metrology_engineer/test-*
- split: val
path: metrology_engineer/val-*
- split: dev
path: metrology_engineer/dev-*
- config_name: middle_school_biology
data_files:
- split: test
path: middle_school_biology/test-*
- split: val
path: middle_school_biology/val-*
- split: dev
path: middle_school_biology/dev-*
- config_name: middle_school_chemistry
data_files:
- split: test
path: middle_school_chemistry/test-*
- split: val
path: middle_school_chemistry/val-*
- split: dev
path: middle_school_chemistry/dev-*
- config_name: middle_school_geography
data_files:
- split: test
path: middle_school_geography/test-*
- split: val
path: middle_school_geography/val-*
- split: dev
path: middle_school_geography/dev-*
- config_name: middle_school_history
data_files:
- split: test
path: middle_school_history/test-*
- split: val
path: middle_school_history/val-*
- split: dev
path: middle_school_history/dev-*
- config_name: middle_school_mathematics
data_files:
- split: test
path: middle_school_mathematics/test-*
- split: val
path: middle_school_mathematics/val-*
- split: dev
path: middle_school_mathematics/dev-*
- config_name: middle_school_physics
data_files:
- split: test
path: middle_school_physics/test-*
- split: val
path: middle_school_physics/val-*
- split: dev
path: middle_school_physics/dev-*
- config_name: middle_school_politics
data_files:
- split: test
path: middle_school_politics/test-*
- split: val
path: middle_school_politics/val-*
- split: dev
path: middle_school_politics/dev-*
- config_name: modern_chinese_history
data_files:
- split: test
path: modern_chinese_history/test-*
- split: val
path: modern_chinese_history/val-*
- split: dev
path: modern_chinese_history/dev-*
- config_name: operating_system
data_files:
- split: test
path: operating_system/test-*
- split: val
path: operating_system/val-*
- split: dev
path: operating_system/dev-*
- config_name: physician
data_files:
- split: test
path: physician/test-*
- split: val
path: physician/val-*
- split: dev
path: physician/dev-*
- config_name: plant_protection
data_files:
- split: test
path: plant_protection/test-*
- split: val
path: plant_protection/val-*
- split: dev
path: plant_protection/dev-*
- config_name: probability_and_statistics
data_files:
- split: test
path: probability_and_statistics/test-*
- split: val
path: probability_and_statistics/val-*
- split: dev
path: probability_and_statistics/dev-*
- config_name: professional_tour_guide
data_files:
- split: test
path: professional_tour_guide/test-*
- split: val
path: professional_tour_guide/val-*
- split: dev
path: professional_tour_guide/dev-*
- config_name: sports_science
data_files:
- split: test
path: sports_science/test-*
- split: val
path: sports_science/val-*
- split: dev
path: sports_science/dev-*
- config_name: tax_accountant
data_files:
- split: test
path: tax_accountant/test-*
- split: val
path: tax_accountant/val-*
- split: dev
path: tax_accountant/dev-*
- config_name: teacher_qualification
data_files:
- split: test
path: teacher_qualification/test-*
- split: val
path: teacher_qualification/val-*
- split: dev
path: teacher_qualification/dev-*
- config_name: urban_and_rural_planner
data_files:
- split: test
path: urban_and_rural_planner/test-*
- split: val
path: urban_and_rural_planner/val-*
- split: dev
path: urban_and_rural_planner/dev-*
- config_name: veterinary_medicine
data_files:
- split: test
path: veterinary_medicine/test-*
- split: val
path: veterinary_medicine/val-*
- split: dev
path: veterinary_medicine/dev-*
dataset_info:
- config_name: accountant
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 176917
num_examples: 443
- name: val
num_bytes: 19549
num_examples: 49
- name: dev
num_bytes: 3414
num_examples: 5
download_size: 151233
dataset_size: 199880
- config_name: advanced_mathematics
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 50031
num_examples: 173
- name: val
num_bytes: 5331
num_examples: 19
- name: dev
num_bytes: 7012
num_examples: 5
download_size: 50962
dataset_size: 62374
- config_name: art_studies
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 41227
num_examples: 298
- name: val
num_bytes: 4581
num_examples: 33
- name: dev
num_bytes: 1439
num_examples: 5
download_size: 46524
dataset_size: 47247
- config_name: basic_medicine
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 28820
num_examples: 175
- name: val
num_bytes: 2627
num_examples: 19
- name: dev
num_bytes: 1825
num_examples: 5
download_size: 37360
dataset_size: 33272
- config_name: business_administration
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 78387
num_examples: 301
- name: val
num_bytes: 9225
num_examples: 33
- name: dev
num_bytes: 3155
num_examples: 5
download_size: 75885
dataset_size: 90767
- config_name: chinese_language_and_literature
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 32328
num_examples: 209
- name: val
num_bytes: 3446
num_examples: 23
- name: dev
num_bytes: 1892
num_examples: 5
download_size: 42310
dataset_size: 37666
- config_name: civil_servant
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 181504
num_examples: 429
- name: val
num_bytes: 21273
num_examples: 47
- name: dev
num_bytes: 4576
num_examples: 5
download_size: 179936
dataset_size: 207353
- config_name: clinical_medicine
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 42161
num_examples: 200
- name: val
num_bytes: 4167
num_examples: 22
- name: dev
num_bytes: 1951
num_examples: 5
download_size: 48689
dataset_size: 48279
- config_name: college_chemistry
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 45798
num_examples: 224
- name: val
num_bytes: 4443
num_examples: 24
- name: dev
num_bytes: 3611
num_examples: 5
download_size: 53519
dataset_size: 53852
- config_name: college_economics
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 119734
num_examples: 497
- name: val
num_bytes: 14461
num_examples: 55
- name: dev
num_bytes: 3673
num_examples: 5
download_size: 106080
dataset_size: 137868
- config_name: college_physics
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 55731
num_examples: 176
- name: val
num_bytes: 6145
num_examples: 19
- name: dev
num_bytes: 3824
num_examples: 5
download_size: 62877
dataset_size: 65700
- config_name: college_programming
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 83541
num_examples: 342
- name: val
num_bytes: 9543
num_examples: 37
- name: dev
num_bytes: 2882
num_examples: 5
download_size: 82850
dataset_size: 95966
- config_name: computer_architecture
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 40613
num_examples: 193
- name: val
num_bytes: 4149
num_examples: 21
- name: dev
num_bytes: 2793
num_examples: 5
download_size: 48021
dataset_size: 47555
- config_name: computer_network
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 35408
num_examples: 171
- name: val
num_bytes: 3799
num_examples: 19
- name: dev
num_bytes: 2361
num_examples: 5
download_size: 43940
dataset_size: 41568
- config_name: discrete_mathematics
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 36045
num_examples: 153
- name: val
num_bytes: 3424
num_examples: 16
- name: dev
num_bytes: 2002
num_examples: 5
download_size: 42941
dataset_size: 41471
- config_name: education_science
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 55753
num_examples: 270
- name: val
num_bytes: 5519
num_examples: 29
- name: dev
num_bytes: 3093
num_examples: 5
download_size: 60878
dataset_size: 64365
- config_name: electrical_engineer
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 73727
num_examples: 339
- name: val
num_bytes: 8315
num_examples: 37
- name: dev
num_bytes: 2180
num_examples: 5
download_size: 75493
dataset_size: 84222
- config_name: environmental_impact_assessment_engineer
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 84680
num_examples: 281
- name: val
num_bytes: 9186
num_examples: 31
- name: dev
num_bytes: 2495
num_examples: 5
download_size: 73938
dataset_size: 96361
- config_name: fire_engineer
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 83611
num_examples: 282
- name: val
num_bytes: 9998
num_examples: 31
- name: dev
num_bytes: 2209
num_examples: 5
download_size: 80027
dataset_size: 95818
- config_name: high_school_biology
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 55242
num_examples: 175
- name: val
num_bytes: 6105
num_examples: 19
- name: dev
num_bytes: 2164
num_examples: 5
download_size: 60521
dataset_size: 63511
- config_name: high_school_chemistry
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 46918
num_examples: 172
- name: val
num_bytes: 5625
num_examples: 19
- name: dev
num_bytes: 2576
num_examples: 5
download_size: 55668
dataset_size: 55119
- config_name: high_school_chinese
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 110347
num_examples: 178
- name: val
num_bytes: 10475
num_examples: 19
- name: dev
num_bytes: 5290
num_examples: 5
download_size: 121511
dataset_size: 126112
- config_name: high_school_geography
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 41244
num_examples: 178
- name: val
num_bytes: 3985
num_examples: 19
- name: dev
num_bytes: 2087
num_examples: 5
download_size: 49899
dataset_size: 47316
- config_name: high_school_history
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 56196
num_examples: 182
- name: val
num_bytes: 6618
num_examples: 20
- name: dev
num_bytes: 2421
num_examples: 5
download_size: 68541
dataset_size: 65235
- config_name: high_school_mathematics
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 41080
num_examples: 166
- name: val
num_bytes: 5144
num_examples: 18
- name: dev
num_bytes: 3552
num_examples: 5
download_size: 53050
dataset_size: 49776
- config_name: high_school_physics
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 61682
num_examples: 175
- name: val
num_bytes: 7266
num_examples: 19
- name: dev
num_bytes: 2266
num_examples: 5
download_size: 66380
dataset_size: 71214
- config_name: high_school_politics
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 83356
num_examples: 176
- name: val
num_bytes: 8909
num_examples: 19
- name: dev
num_bytes: 4730
num_examples: 5
download_size: 90214
dataset_size: 96995
- config_name: ideological_and_moral_cultivation
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 35315
num_examples: 172
- name: val
num_bytes: 3241
num_examples: 19
- name: dev
num_bytes: 1296
num_examples: 5
download_size: 41532
dataset_size: 39852
- config_name: law
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 79782
num_examples: 221
- name: val
num_bytes: 8119
num_examples: 24
- name: dev
num_bytes: 4142
num_examples: 5
download_size: 83562
dataset_size: 92043
- config_name: legal_professional
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 121985
num_examples: 215
- name: val
num_bytes: 12215
num_examples: 23
- name: dev
num_bytes: 6974
num_examples: 5
download_size: 125081
dataset_size: 141174
- config_name: logic
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 144246
num_examples: 204
- name: val
num_bytes: 15561
num_examples: 22
- name: dev
num_bytes: 5641
num_examples: 5
download_size: 141258
dataset_size: 165448
- config_name: mao_zedong_thought
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 56699
num_examples: 219
- name: val
num_bytes: 5487
num_examples: 24
- name: dev
num_bytes: 3349
num_examples: 5
download_size: 57281
dataset_size: 65535
- config_name: marxism
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 38662
num_examples: 179
- name: val
num_bytes: 4251
num_examples: 19
- name: dev
num_bytes: 2142
num_examples: 5
download_size: 45030
dataset_size: 45055
- config_name: metrology_engineer
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 47484
num_examples: 219
- name: val
num_bytes: 6116
num_examples: 24
- name: dev
num_bytes: 2485
num_examples: 5
download_size: 55033
dataset_size: 56085
- config_name: middle_school_biology
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 47264
num_examples: 192
- name: val
num_bytes: 5263
num_examples: 21
- name: dev
num_bytes: 4327
num_examples: 5
download_size: 58872
dataset_size: 56854
- config_name: middle_school_chemistry
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 47575
num_examples: 185
- name: val
num_bytes: 5654
num_examples: 20
- name: dev
num_bytes: 3866
num_examples: 5
download_size: 59005
dataset_size: 57095
- config_name: middle_school_geography
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 23329
num_examples: 108
- name: val
num_bytes: 2641
num_examples: 12
- name: dev
num_bytes: 2148
num_examples: 5
download_size: 37528
dataset_size: 28118
- config_name: middle_school_history
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 47076
num_examples: 207
- name: val
num_bytes: 5990
num_examples: 22
- name: dev
num_bytes: 2014
num_examples: 5
download_size: 55763
dataset_size: 55080
- config_name: middle_school_mathematics
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 33142
num_examples: 177
- name: val
num_bytes: 4897
num_examples: 19
- name: dev
num_bytes: 3187
num_examples: 5
download_size: 45790
dataset_size: 41226
- config_name: middle_school_physics
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 48793
num_examples: 178
- name: val
num_bytes: 5279
num_examples: 19
- name: dev
num_bytes: 3531
num_examples: 5
download_size: 60336
dataset_size: 57603
- config_name: middle_school_politics
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 72478
num_examples: 193
- name: val
num_bytes: 7320
num_examples: 21
- name: dev
num_bytes: 3687
num_examples: 5
download_size: 75631
dataset_size: 83485
- config_name: modern_chinese_history
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 51247
num_examples: 212
- name: val
num_bytes: 5188
num_examples: 23
- name: dev
num_bytes: 2983
num_examples: 5
download_size: 58881
dataset_size: 59418
- config_name: operating_system
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 31146
num_examples: 179
- name: val
num_bytes: 3299
num_examples: 19
- name: dev
num_bytes: 2557
num_examples: 5
download_size: 39873
dataset_size: 37002
- config_name: physician
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 89801
num_examples: 443
- name: val
num_bytes: 8710
num_examples: 49
- name: dev
num_bytes: 2033
num_examples: 5
download_size: 91373
dataset_size: 100544
- config_name: plant_protection
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 31877
num_examples: 199
- name: val
num_bytes: 3634
num_examples: 22
- name: dev
num_bytes: 3726
num_examples: 5
download_size: 42730
dataset_size: 39237
- config_name: probability_and_statistics
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 56749
num_examples: 166
- name: val
num_bytes: 5781
num_examples: 18
- name: dev
num_bytes: 6769
num_examples: 5
download_size: 62933
dataset_size: 69299
- config_name: professional_tour_guide
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 41231
num_examples: 266
- name: val
num_bytes: 4509
num_examples: 29
- name: dev
num_bytes: 1764
num_examples: 5
download_size: 51538
dataset_size: 47504
- config_name: sports_science
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 32527
num_examples: 180
- name: val
num_bytes: 3493
num_examples: 19
- name: dev
num_bytes: 4182
num_examples: 5
download_size: 44846
dataset_size: 40202
- config_name: tax_accountant
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 174482
num_examples: 443
- name: val
num_bytes: 18932
num_examples: 49
- name: dev
num_bytes: 4274
num_examples: 5
download_size: 147810
dataset_size: 197688
- config_name: teacher_qualification
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 107369
num_examples: 399
- name: val
num_bytes: 12220
num_examples: 44
- name: dev
num_bytes: 3215
num_examples: 5
download_size: 105490
dataset_size: 122804
- config_name: urban_and_rural_planner
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 110377
num_examples: 418
- name: val
num_bytes: 12793
num_examples: 46
- name: dev
num_bytes: 3166
num_examples: 5
download_size: 100220
dataset_size: 126336
- config_name: veterinary_medicine
features:
- name: id
dtype: int32
- name: question
dtype: string
- name: A
dtype: string
- name: B
dtype: string
- name: C
dtype: string
- name: D
dtype: string
- name: answer
dtype: string
- name: explanation
dtype: string
splits:
- name: test
num_bytes: 39465
num_examples: 210
- name: val
num_bytes: 4559
num_examples: 23
- name: dev
num_bytes: 2362
num_examples: 5
download_size: 48398
dataset_size: 46386
---
C-Eval is a comprehensive Chinese evaluation suite for foundation models. It consists of 13948 multi-choice questions spanning 52 diverse disciplines and four difficulty levels. Please visit our [website](https://cevalbenchmark.com/) and [GitHub](https://github.com/SJTU-LIT/ceval/tree/main) or check our [paper](https://arxiv.org/abs/2305.08322) for more details.
Each subject consists of three splits: dev, val, and test. The dev set per subject consists of five exemplars with explanations for few-shot evaluation. The val set is intended to be used for hyperparameter tuning. And the test set is for model evaluation. Labels on the test split are not released, users are required to submit their results to automatically obtain test accuracy. [How to submit?](https://github.com/SJTU-LIT/ceval/tree/main#how-to-submit)
### Load the data
```python
from datasets import load_dataset
dataset=load_dataset(r"ceval/ceval-exam",name="computer_network")
print(dataset['val'][0])
# {'id': 0, 'question': '使用位填充方法,以01111110为位首flag,数据为011011111111111111110010,求问传送时要添加几个0____', 'A': '1', 'B': '2', 'C': '3', 'D': '4', 'answer': 'C', 'explanation': ''}
```
More details on loading and using the data are at our [github page](https://github.com/SJTU-LIT/ceval#data).
Please cite our paper if you use our dataset.
```
@article{huang2023ceval,
title={C-Eval: A Multi-Level Multi-Discipline Chinese Evaluation Suite for Foundation Models},
author={Huang, Yuzhen and Bai, Yuzhuo and Zhu, Zhihao and Zhang, Junlei and Zhang, Jinghan and Su, Tangjun and Liu, Junteng and Lv, Chuancheng and Zhang, Yikai and Lei, Jiayi and Fu, Yao and Sun, Maosong and He, Junxian},
journal={arXiv preprint arXiv:2305.08322},
year={2023}
}
```
|
applied-ai-018/pretraining_v1-omega_books | applied-ai-018 | "2024-08-05T19:01:31Z" | 18,559 | 1 | [
"size_categories:100M<n<1B",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-07-31T08:53:54Z" | ---
dataset_info:
config_name: CC-MAIN-2013-20
features:
- name: text
dtype: string
- name: id
dtype: string
- name: dump
dtype: string
- name: url
dtype: string
- name: file_path
dtype: string
- name: language
dtype: string
- name: language_score
dtype: float64
- name: token_count
dtype: int64
- name: score
dtype: float64
- name: int_score
dtype: int64
splits:
- name: train
num_bytes: 235476901236
num_examples: 51901183
download_size: 138494178972
dataset_size: 235476901236
configs:
- config_name: CC-MAIN-2013-20
data_files:
- split: train
path: CC-MAIN-2013-20/train-*
---
|
mteb/biblenlp-corpus-mmteb | mteb | "2024-05-07T00:47:48Z" | 18,551 | 1 | [
"annotations_creators:no-annotation",
"language_creators:expert-generated",
"multilinguality:translation",
"multilinguality:multilingual",
"language:aai",
"language:aak",
"language:aau",
"language:aaz",
"language:abt",
"language:abx",
"language:aby",
"language:acf",
"language:acr",
"language:acu",
"language:adz",
"language:aer",
"language:aey",
"language:agd",
"language:agg",
"language:agm",
"language:agn",
"language:agr",
"language:agt",
"language:agu",
"language:aia",
"language:aii",
"language:aka",
"language:ake",
"language:alp",
"language:alq",
"language:als",
"language:aly",
"language:ame",
"language:amf",
"language:amk",
"language:amm",
"language:amn",
"language:amo",
"language:amp",
"language:amr",
"language:amu",
"language:amx",
"language:anh",
"language:anv",
"language:aoi",
"language:aoj",
"language:aom",
"language:aon",
"language:apb",
"language:ape",
"language:apn",
"language:apr",
"language:apu",
"language:apw",
"language:apz",
"language:arb",
"language:are",
"language:arl",
"language:arn",
"language:arp",
"language:asm",
"language:aso",
"language:ata",
"language:atb",
"language:atd",
"language:atg",
"language:att",
"language:auc",
"language:aui",
"language:auy",
"language:avt",
"language:awb",
"language:awk",
"language:awx",
"language:azb",
"language:azg",
"language:azz",
"language:bao",
"language:bba",
"language:bbb",
"language:bbr",
"language:bch",
"language:bco",
"language:bdd",
"language:bea",
"language:bef",
"language:bel",
"language:ben",
"language:beo",
"language:beu",
"language:bgs",
"language:bgt",
"language:bhg",
"language:bhl",
"language:big",
"language:bjk",
"language:bjp",
"language:bjr",
"language:bjv",
"language:bjz",
"language:bkd",
"language:bki",
"language:bkq",
"language:bkx",
"language:bla",
"language:blw",
"language:blz",
"language:bmh",
"language:bmk",
"language:bmr",
"language:bmu",
"language:bnp",
"language:boa",
"language:boj",
"language:bon",
"language:box",
"language:bpr",
"language:bps",
"language:bqc",
"language:bqp",
"language:bre",
"language:bsj",
"language:bsn",
"language:bsp",
"language:bss",
"language:buk",
"language:bus",
"language:bvd",
"language:bvr",
"language:bxh",
"language:byr",
"language:byx",
"language:bzd",
"language:bzh",
"language:bzj",
"language:caa",
"language:cab",
"language:cac",
"language:caf",
"language:cak",
"language:cao",
"language:cap",
"language:car",
"language:cav",
"language:cax",
"language:cbc",
"language:cbi",
"language:cbk",
"language:cbr",
"language:cbs",
"language:cbt",
"language:cbu",
"language:cbv",
"language:cco",
"language:ceb",
"language:cek",
"language:ces",
"language:cgc",
"language:cha",
"language:chd",
"language:chf",
"language:chk",
"language:chq",
"language:chz",
"language:cjo",
"language:cjv",
"language:ckb",
"language:cle",
"language:clu",
"language:cme",
"language:cmn",
"language:cni",
"language:cnl",
"language:cnt",
"language:cof",
"language:con",
"language:cop",
"language:cot",
"language:cpa",
"language:cpb",
"language:cpc",
"language:cpu",
"language:cpy",
"language:crn",
"language:crx",
"language:cso",
"language:csy",
"language:cta",
"language:cth",
"language:ctp",
"language:ctu",
"language:cub",
"language:cuc",
"language:cui",
"language:cuk",
"language:cut",
"language:cux",
"language:cwe",
"language:cya",
"language:daa",
"language:dad",
"language:dah",
"language:dan",
"language:ded",
"language:deu",
"language:dgc",
"language:dgr",
"language:dgz",
"language:dhg",
"language:dif",
"language:dik",
"language:dji",
"language:djk",
"language:djr",
"language:dob",
"language:dop",
"language:dov",
"language:dwr",
"language:dww",
"language:dwy",
"language:ebk",
"language:eko",
"language:emi",
"language:emp",
"language:eng",
"language:enq",
"language:epo",
"language:eri",
"language:ese",
"language:esk",
"language:etr",
"language:ewe",
"language:faa",
"language:fai",
"language:far",
"language:ffm",
"language:for",
"language:fra",
"language:fue",
"language:fuf",
"language:fuh",
"language:gah",
"language:gai",
"language:gam",
"language:gaw",
"language:gdn",
"language:gdr",
"language:geb",
"language:gfk",
"language:ghs",
"language:glk",
"language:gmv",
"language:gng",
"language:gnn",
"language:gnw",
"language:gof",
"language:grc",
"language:gub",
"language:guh",
"language:gui",
"language:guj",
"language:gul",
"language:gum",
"language:gun",
"language:guo",
"language:gup",
"language:gux",
"language:gvc",
"language:gvf",
"language:gvn",
"language:gvs",
"language:gwi",
"language:gym",
"language:gyr",
"language:hat",
"language:hau",
"language:haw",
"language:hbo",
"language:hch",
"language:heb",
"language:heg",
"language:hin",
"language:hix",
"language:hla",
"language:hlt",
"language:hmo",
"language:hns",
"language:hop",
"language:hot",
"language:hrv",
"language:hto",
"language:hub",
"language:hui",
"language:hun",
"language:hus",
"language:huu",
"language:huv",
"language:hvn",
"language:ian",
"language:ign",
"language:ikk",
"language:ikw",
"language:ilo",
"language:imo",
"language:inb",
"language:ind",
"language:ino",
"language:iou",
"language:ipi",
"language:isn",
"language:ita",
"language:iws",
"language:ixl",
"language:jac",
"language:jae",
"language:jao",
"language:jic",
"language:jid",
"language:jiv",
"language:jni",
"language:jpn",
"language:jvn",
"language:kan",
"language:kaq",
"language:kbc",
"language:kbh",
"language:kbm",
"language:kbq",
"language:kdc",
"language:kde",
"language:kdl",
"language:kek",
"language:ken",
"language:kew",
"language:kgf",
"language:kgk",
"language:kgp",
"language:khs",
"language:khz",
"language:kik",
"language:kiw",
"language:kiz",
"language:kje",
"language:kjn",
"language:kjs",
"language:kkc",
"language:kkl",
"language:klt",
"language:klv",
"language:kmg",
"language:kmh",
"language:kmk",
"language:kmo",
"language:kms",
"language:kmu",
"language:kne",
"language:knf",
"language:knj",
"language:knv",
"language:kos",
"language:kpf",
"language:kpg",
"language:kpj",
"language:kpr",
"language:kpw",
"language:kpx",
"language:kqa",
"language:kqc",
"language:kqf",
"language:kql",
"language:kqw",
"language:ksd",
"language:ksj",
"language:ksr",
"language:ktm",
"language:kto",
"language:kud",
"language:kue",
"language:kup",
"language:kvg",
"language:kvn",
"language:kwd",
"language:kwf",
"language:kwi",
"language:kwj",
"language:kyc",
"language:kyf",
"language:kyg",
"language:kyq",
"language:kyz",
"language:kze",
"language:lac",
"language:lat",
"language:lbb",
"language:lbk",
"language:lcm",
"language:leu",
"language:lex",
"language:lgl",
"language:lid",
"language:lif",
"language:lin",
"language:lit",
"language:llg",
"language:lug",
"language:luo",
"language:lww",
"language:maa",
"language:maj",
"language:mal",
"language:mam",
"language:maq",
"language:mar",
"language:mau",
"language:mav",
"language:maz",
"language:mbb",
"language:mbc",
"language:mbh",
"language:mbj",
"language:mbl",
"language:mbs",
"language:mbt",
"language:mca",
"language:mcb",
"language:mcd",
"language:mcf",
"language:mco",
"language:mcp",
"language:mcq",
"language:mcr",
"language:mdy",
"language:med",
"language:mee",
"language:mek",
"language:meq",
"language:met",
"language:meu",
"language:mgc",
"language:mgh",
"language:mgw",
"language:mhl",
"language:mib",
"language:mic",
"language:mie",
"language:mig",
"language:mih",
"language:mil",
"language:mio",
"language:mir",
"language:mit",
"language:miz",
"language:mjc",
"language:mkj",
"language:mkl",
"language:mkn",
"language:mks",
"language:mle",
"language:mlh",
"language:mlp",
"language:mmo",
"language:mmx",
"language:mna",
"language:mop",
"language:mox",
"language:mph",
"language:mpj",
"language:mpm",
"language:mpp",
"language:mps",
"language:mpt",
"language:mpx",
"language:mqb",
"language:mqj",
"language:msb",
"language:msc",
"language:msk",
"language:msm",
"language:msy",
"language:mti",
"language:mto",
"language:mux",
"language:muy",
"language:mva",
"language:mvn",
"language:mwc",
"language:mwe",
"language:mwf",
"language:mwp",
"language:mxb",
"language:mxp",
"language:mxq",
"language:mxt",
"language:mya",
"language:myk",
"language:myu",
"language:myw",
"language:myy",
"language:mzz",
"language:nab",
"language:naf",
"language:nak",
"language:nas",
"language:nay",
"language:nbq",
"language:nca",
"language:nch",
"language:ncj",
"language:ncl",
"language:ncu",
"language:ndg",
"language:ndj",
"language:nfa",
"language:ngp",
"language:ngu",
"language:nhe",
"language:nhg",
"language:nhi",
"language:nho",
"language:nhr",
"language:nhu",
"language:nhw",
"language:nhy",
"language:nif",
"language:nii",
"language:nin",
"language:nko",
"language:nld",
"language:nlg",
"language:nmw",
"language:nna",
"language:nnq",
"language:noa",
"language:nop",
"language:not",
"language:nou",
"language:npi",
"language:npl",
"language:nsn",
"language:nss",
"language:ntj",
"language:ntp",
"language:ntu",
"language:nuy",
"language:nvm",
"language:nwi",
"language:nya",
"language:nys",
"language:nyu",
"language:obo",
"language:okv",
"language:omw",
"language:ong",
"language:ons",
"language:ood",
"language:opm",
"language:ory",
"language:ote",
"language:otm",
"language:otn",
"language:otq",
"language:ots",
"language:pab",
"language:pad",
"language:pah",
"language:pan",
"language:pao",
"language:pes",
"language:pib",
"language:pio",
"language:pir",
"language:piu",
"language:pjt",
"language:pls",
"language:plu",
"language:pma",
"language:poe",
"language:poh",
"language:poi",
"language:pol",
"language:pon",
"language:por",
"language:poy",
"language:ppo",
"language:prf",
"language:pri",
"language:ptp",
"language:ptu",
"language:pwg",
"language:qub",
"language:quc",
"language:quf",
"language:quh",
"language:qul",
"language:qup",
"language:qvc",
"language:qve",
"language:qvh",
"language:qvm",
"language:qvn",
"language:qvs",
"language:qvw",
"language:qvz",
"language:qwh",
"language:qxh",
"language:qxn",
"language:qxo",
"language:rai",
"language:reg",
"language:rgu",
"language:rkb",
"language:rmc",
"language:rmy",
"language:ron",
"language:roo",
"language:rop",
"language:row",
"language:rro",
"language:ruf",
"language:rug",
"language:rus",
"language:rwo",
"language:sab",
"language:san",
"language:sbe",
"language:sbk",
"language:sbs",
"language:seh",
"language:sey",
"language:sgb",
"language:sgz",
"language:shj",
"language:shp",
"language:sim",
"language:sja",
"language:sll",
"language:smk",
"language:snc",
"language:snn",
"language:snp",
"language:snx",
"language:sny",
"language:som",
"language:soq",
"language:soy",
"language:spa",
"language:spl",
"language:spm",
"language:spp",
"language:sps",
"language:spy",
"language:sri",
"language:srm",
"language:srn",
"language:srp",
"language:srq",
"language:ssd",
"language:ssg",
"language:ssx",
"language:stp",
"language:sua",
"language:sue",
"language:sus",
"language:suz",
"language:swe",
"language:swh",
"language:swp",
"language:sxb",
"language:tac",
"language:taj",
"language:tam",
"language:tav",
"language:taw",
"language:tbc",
"language:tbf",
"language:tbg",
"language:tbl",
"language:tbo",
"language:tbz",
"language:tca",
"language:tcs",
"language:tcz",
"language:tdt",
"language:tee",
"language:tel",
"language:ter",
"language:tet",
"language:tew",
"language:tfr",
"language:tgk",
"language:tgl",
"language:tgo",
"language:tgp",
"language:tha",
"language:thd",
"language:tif",
"language:tim",
"language:tiw",
"language:tiy",
"language:tke",
"language:tku",
"language:tlf",
"language:tmd",
"language:tna",
"language:tnc",
"language:tnk",
"language:tnn",
"language:tnp",
"language:toc",
"language:tod",
"language:tof",
"language:toj",
"language:ton",
"language:too",
"language:top",
"language:tos",
"language:tpa",
"language:tpi",
"language:tpt",
"language:tpz",
"language:trc",
"language:tsw",
"language:ttc",
"language:tte",
"language:tuc",
"language:tue",
"language:tuf",
"language:tuo",
"language:tur",
"language:tvk",
"language:twi",
"language:txq",
"language:txu",
"language:tzj",
"language:tzo",
"language:ubr",
"language:ubu",
"language:udu",
"language:uig",
"language:ukr",
"language:uli",
"language:ulk",
"language:upv",
"language:ura",
"language:urb",
"language:urd",
"language:uri",
"language:urt",
"language:urw",
"language:usa",
"language:usp",
"language:uvh",
"language:uvl",
"language:vid",
"language:vie",
"language:viv",
"language:vmy",
"language:waj",
"language:wal",
"language:wap",
"language:wat",
"language:wbi",
"language:wbp",
"language:wed",
"language:wer",
"language:wim",
"language:wiu",
"language:wiv",
"language:wmt",
"language:wmw",
"language:wnc",
"language:wnu",
"language:wol",
"language:wos",
"language:wrk",
"language:wro",
"language:wrs",
"language:wsk",
"language:wuv",
"language:xav",
"language:xbi",
"language:xed",
"language:xla",
"language:xnn",
"language:xon",
"language:xsi",
"language:xtd",
"language:xtm",
"language:yaa",
"language:yad",
"language:yal",
"language:yap",
"language:yaq",
"language:yby",
"language:ycn",
"language:yka",
"language:yle",
"language:yml",
"language:yon",
"language:yor",
"language:yrb",
"language:yre",
"language:yss",
"language:yuj",
"language:yut",
"language:yuw",
"language:yva",
"language:zaa",
"language:zab",
"language:zac",
"language:zad",
"language:zai",
"language:zaj",
"language:zam",
"language:zao",
"language:zap",
"language:zar",
"language:zas",
"language:zat",
"language:zav",
"language:zaw",
"language:zca",
"language:zga",
"language:zia",
"language:ziw",
"language:zlm",
"language:zos",
"language:zpc",
"language:zpl",
"language:zpm",
"language:zpo",
"language:zpq",
"language:zpu",
"language:zpv",
"language:zpz",
"language:zsr",
"language:ztq",
"language:zty",
"language:zyp",
"language:be",
"language:br",
"language:cs",
"language:ch",
"language:zh",
"language:de",
"language:en",
"language:eo",
"language:fr",
"language:ht",
"language:he",
"language:hr",
"language:id",
"language:it",
"language:ja",
"language:la",
"language:nl",
"language:ru",
"language:sa",
"language:so",
"language:es",
"language:sr",
"language:sv",
"language:to",
"language:uk",
"language:vi",
"license:cc-by-4.0",
"license:other",
"size_categories:1M<n<10M",
"format:json",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"region:us"
] | null | "2024-05-05T22:41:26Z" | ---
annotations_creators:
- no-annotation
language_creators:
- expert-generated
language:
- aai
- aak
- aau
- aaz
- abt
- abx
- aby
- acf
- acr
- acu
- adz
- aer
- aey
- agd
- agg
- agm
- agn
- agr
- agt
- agu
- aia
- aii
- aka
- ake
- alp
- alq
- als
- aly
- ame
- amf
- amk
- amm
- amn
- amo
- amp
- amr
- amu
- amx
- anh
- anv
- aoi
- aoj
- aom
- aon
- apb
- ape
- apn
- apr
- apu
- apw
- apz
- arb
- are
- arl
- arn
- arp
- asm
- aso
- ata
- atb
- atd
- atg
- att
- auc
- aui
- auy
- avt
- awb
- awk
- awx
- azb
- azg
- azz
- bao
- bba
- bbb
- bbr
- bch
- bco
- bdd
- bea
- bef
- bel
- ben
- beo
- beu
- bgs
- bgt
- bhg
- bhl
- big
- bjk
- bjp
- bjr
- bjv
- bjz
- bkd
- bki
- bkq
- bkx
- bla
- blw
- blz
- bmh
- bmk
- bmr
- bmu
- bnp
- boa
- boj
- bon
- box
- bpr
- bps
- bqc
- bqp
- bre
- bsj
- bsn
- bsp
- bss
- buk
- bus
- bvd
- bvr
- bxh
- byr
- byx
- bzd
- bzh
- bzj
- caa
- cab
- cac
- caf
- cak
- cao
- cap
- car
- cav
- cax
- cbc
- cbi
- cbk
- cbr
- cbs
- cbt
- cbu
- cbv
- cco
- ceb
- cek
- ces
- cgc
- cha
- chd
- chf
- chk
- chq
- chz
- cjo
- cjv
- ckb
- cle
- clu
- cme
- cmn
- cni
- cnl
- cnt
- cof
- con
- cop
- cot
- cpa
- cpb
- cpc
- cpu
- cpy
- crn
- crx
- cso
- csy
- cta
- cth
- ctp
- ctu
- cub
- cuc
- cui
- cuk
- cut
- cux
- cwe
- cya
- daa
- dad
- dah
- dan
- ded
- deu
- dgc
- dgr
- dgz
- dhg
- dif
- dik
- dji
- djk
- djr
- dob
- dop
- dov
- dwr
- dww
- dwy
- ebk
- eko
- emi
- emp
- eng
- enq
- epo
- eri
- ese
- esk
- etr
- ewe
- faa
- fai
- far
- ffm
- for
- fra
- fue
- fuf
- fuh
- gah
- gai
- gam
- gaw
- gdn
- gdr
- geb
- gfk
- ghs
- glk
- gmv
- gng
- gnn
- gnw
- gof
- grc
- gub
- guh
- gui
- guj
- gul
- gum
- gun
- guo
- gup
- gux
- gvc
- gvf
- gvn
- gvs
- gwi
- gym
- gyr
- hat
- hau
- haw
- hbo
- hch
- heb
- heg
- hin
- hix
- hla
- hlt
- hmo
- hns
- hop
- hot
- hrv
- hto
- hub
- hui
- hun
- hus
- huu
- huv
- hvn
- ian
- ign
- ikk
- ikw
- ilo
- imo
- inb
- ind
- ino
- iou
- ipi
- isn
- ita
- iws
- ixl
- jac
- jae
- jao
- jic
- jid
- jiv
- jni
- jpn
- jvn
- kan
- kaq
- kbc
- kbh
- kbm
- kbq
- kdc
- kde
- kdl
- kek
- ken
- kew
- kgf
- kgk
- kgp
- khs
- khz
- kik
- kiw
- kiz
- kje
- kjn
- kjs
- kkc
- kkl
- klt
- klv
- kmg
- kmh
- kmk
- kmo
- kms
- kmu
- kne
- knf
- knj
- knv
- kos
- kpf
- kpg
- kpj
- kpr
- kpw
- kpx
- kqa
- kqc
- kqf
- kql
- kqw
- ksd
- ksj
- ksr
- ktm
- kto
- kud
- kue
- kup
- kvg
- kvn
- kwd
- kwf
- kwi
- kwj
- kyc
- kyf
- kyg
- kyq
- kyz
- kze
- lac
- lat
- lbb
- lbk
- lcm
- leu
- lex
- lgl
- lid
- lif
- lin
- lit
- llg
- lug
- luo
- lww
- maa
- maj
- mal
- mam
- maq
- mar
- mau
- mav
- maz
- mbb
- mbc
- mbh
- mbj
- mbl
- mbs
- mbt
- mca
- mcb
- mcd
- mcf
- mco
- mcp
- mcq
- mcr
- mdy
- med
- mee
- mek
- meq
- met
- meu
- mgc
- mgh
- mgw
- mhl
- mib
- mic
- mie
- mig
- mih
- mil
- mio
- mir
- mit
- miz
- mjc
- mkj
- mkl
- mkn
- mks
- mle
- mlh
- mlp
- mmo
- mmx
- mna
- mop
- mox
- mph
- mpj
- mpm
- mpp
- mps
- mpt
- mpx
- mqb
- mqj
- msb
- msc
- msk
- msm
- msy
- mti
- mto
- mux
- muy
- mva
- mvn
- mwc
- mwe
- mwf
- mwp
- mxb
- mxp
- mxq
- mxt
- mya
- myk
- myu
- myw
- myy
- mzz
- nab
- naf
- nak
- nas
- nay
- nbq
- nca
- nch
- ncj
- ncl
- ncu
- ndg
- ndj
- nfa
- ngp
- ngu
- nhe
- nhg
- nhi
- nho
- nhr
- nhu
- nhw
- nhy
- nif
- nii
- nin
- nko
- nld
- nlg
- nmw
- nna
- nnq
- noa
- nop
- not
- nou
- npi
- npl
- nsn
- nss
- ntj
- ntp
- ntu
- nuy
- nvm
- nwi
- nya
- nys
- nyu
- obo
- okv
- omw
- ong
- ons
- ood
- opm
- ory
- ote
- otm
- otn
- otq
- ots
- pab
- pad
- pah
- pan
- pao
- pes
- pib
- pio
- pir
- piu
- pjt
- pls
- plu
- pma
- poe
- poh
- poi
- pol
- pon
- por
- poy
- ppo
- prf
- pri
- ptp
- ptu
- pwg
- qub
- quc
- quf
- quh
- qul
- qup
- qvc
- qve
- qvh
- qvm
- qvn
- qvs
- qvw
- qvz
- qwh
- qxh
- qxn
- qxo
- rai
- reg
- rgu
- rkb
- rmc
- rmy
- ron
- roo
- rop
- row
- rro
- ruf
- rug
- rus
- rwo
- sab
- san
- sbe
- sbk
- sbs
- seh
- sey
- sgb
- sgz
- shj
- shp
- sim
- sja
- sll
- smk
- snc
- snn
- snp
- snx
- sny
- som
- soq
- soy
- spa
- spl
- spm
- spp
- sps
- spy
- sri
- srm
- srn
- srp
- srq
- ssd
- ssg
- ssx
- stp
- sua
- sue
- sus
- suz
- swe
- swh
- swp
- sxb
- tac
- taj
- tam
- tav
- taw
- tbc
- tbf
- tbg
- tbl
- tbo
- tbz
- tca
- tcs
- tcz
- tdt
- tee
- tel
- ter
- tet
- tew
- tfr
- tgk
- tgl
- tgo
- tgp
- tha
- thd
- tif
- tim
- tiw
- tiy
- tke
- tku
- tlf
- tmd
- tna
- tnc
- tnk
- tnn
- tnp
- toc
- tod
- tof
- toj
- ton
- too
- top
- tos
- tpa
- tpi
- tpt
- tpz
- trc
- tsw
- ttc
- tte
- tuc
- tue
- tuf
- tuo
- tur
- tvk
- twi
- txq
- txu
- tzj
- tzo
- ubr
- ubu
- udu
- uig
- ukr
- uli
- ulk
- upv
- ura
- urb
- urd
- uri
- urt
- urw
- usa
- usp
- uvh
- uvl
- vid
- vie
- viv
- vmy
- waj
- wal
- wap
- wat
- wbi
- wbp
- wed
- wer
- wim
- wiu
- wiv
- wmt
- wmw
- wnc
- wnu
- wol
- wos
- wrk
- wro
- wrs
- wsk
- wuv
- xav
- xbi
- xed
- xla
- xnn
- xon
- xsi
- xtd
- xtm
- yaa
- yad
- yal
- yap
- yaq
- yby
- ycn
- yka
- yle
- yml
- yon
- yor
- yrb
- yre
- yss
- yuj
- yut
- yuw
- yva
- zaa
- zab
- zac
- zad
- zai
- zaj
- zam
- zao
- zap
- zar
- zas
- zat
- zav
- zaw
- zca
- zga
- zia
- ziw
- zlm
- zos
- zpc
- zpl
- zpm
- zpo
- zpq
- zpu
- zpv
- zpz
- zsr
- ztq
- zty
- zyp
- be
- br
- cs
- ch
- zh
- de
- en
- eo
- fr
- ht
- he
- hr
- id
- it
- ja
- la
- nl
- ru
- sa
- so
- es
- sr
- sv
- to
- uk
- vi
license:
- cc-by-4.0
- other
multilinguality:
- translation
- multilingual
pretty_name: biblenlp-corpus-mmteb
size_categories:
- 1M<n<10M
configs:
- config_name: default
data_files:
- path: train/*.jsonl.gz
split: train
- path: test/*.jsonl.gz
split: test
- path: validation/*.jsonl.gz
split: validation
- config_name: eng_Latn-zac_Latn
data_files:
- path: train/eng_Latn-zac_Latn.jsonl.gz
split: train
- path: test/eng_Latn-zac_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-zac_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-lit_Latn
data_files:
- path: train/eng_Latn-lit_Latn.jsonl.gz
split: train
- path: test/eng_Latn-lit_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-lit_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cgc_Latn
data_files:
- path: train/eng_Latn-cgc_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cgc_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cgc_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-guh_Latn
data_files:
- path: train/eng_Latn-guh_Latn.jsonl.gz
split: train
- path: test/eng_Latn-guh_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-guh_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ckb_Arab
data_files:
- path: train/eng_Latn-ckb_Arab.jsonl.gz
split: train
- path: test/eng_Latn-ckb_Arab.jsonl.gz
split: test
- path: validation/eng_Latn-ckb_Arab.jsonl.gz
split: validation
- config_name: eng_Latn-cop_Copt
data_files:
- path: train/eng_Latn-cop_Copt.jsonl.gz
split: train
- path: test/eng_Latn-cop_Copt.jsonl.gz
split: test
- path: validation/eng_Latn-cop_Copt.jsonl.gz
split: validation
- config_name: eng_Latn-lif_Deva
data_files:
- path: train/eng_Latn-lif_Deva.jsonl.gz
split: train
- path: test/eng_Latn-lif_Deva.jsonl.gz
split: test
- path: validation/eng_Latn-lif_Deva.jsonl.gz
split: validation
- config_name: eng_Latn-cwe_Latn
data_files:
- path: train/eng_Latn-cwe_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cwe_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cwe_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kwj_Latn
data_files:
- path: train/eng_Latn-kwj_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kwj_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kwj_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-srp_Latn
data_files:
- path: train/eng_Latn-srp_Latn.jsonl.gz
split: train
- path: test/eng_Latn-srp_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-srp_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-qvn_Latn
data_files:
- path: train/eng_Latn-qvn_Latn.jsonl.gz
split: train
- path: test/eng_Latn-qvn_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-qvn_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tsw_Latn
data_files:
- path: train/eng_Latn-tsw_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tsw_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tsw_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-wro_Latn
data_files:
- path: train/eng_Latn-wro_Latn.jsonl.gz
split: train
- path: test/eng_Latn-wro_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-wro_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tod_Latn
data_files:
- path: train/eng_Latn-tod_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tod_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tod_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bco_Latn
data_files:
- path: train/eng_Latn-bco_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bco_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bco_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ikk_Latn
data_files:
- path: train/eng_Latn-ikk_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ikk_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ikk_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tna_Latn
data_files:
- path: train/eng_Latn-tna_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tna_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tna_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-swp_Latn
data_files:
- path: train/eng_Latn-swp_Latn.jsonl.gz
split: train
- path: test/eng_Latn-swp_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-swp_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-agm_Latn
data_files:
- path: train/eng_Latn-agm_Latn.jsonl.gz
split: train
- path: test/eng_Latn-agm_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-agm_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-con_Latn
data_files:
- path: train/eng_Latn-con_Latn.jsonl.gz
split: train
- path: test/eng_Latn-con_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-con_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-sgz_Latn
data_files:
- path: train/eng_Latn-sgz_Latn.jsonl.gz
split: train
- path: test/eng_Latn-sgz_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-sgz_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mwc_Latn
data_files:
- path: train/eng_Latn-mwc_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mwc_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mwc_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-azb_Arab
data_files:
- path: train/eng_Latn-azb_Arab.jsonl.gz
split: train
- path: test/eng_Latn-azb_Arab.jsonl.gz
split: test
- path: validation/eng_Latn-azb_Arab.jsonl.gz
split: validation
- config_name: eng_Latn-aon_Latn
data_files:
- path: train/eng_Latn-aon_Latn.jsonl.gz
split: train
- path: test/eng_Latn-aon_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-aon_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mvn_Latn
data_files:
- path: train/eng_Latn-mvn_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mvn_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mvn_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mpj_Latn
data_files:
- path: train/eng_Latn-mpj_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mpj_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mpj_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cot_Latn
data_files:
- path: train/eng_Latn-cot_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cot_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cot_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tuo_Latn
data_files:
- path: train/eng_Latn-tuo_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tuo_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tuo_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-iou_Latn
data_files:
- path: train/eng_Latn-iou_Latn.jsonl.gz
split: train
- path: test/eng_Latn-iou_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-iou_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-big_Latn
data_files:
- path: train/eng_Latn-big_Latn.jsonl.gz
split: train
- path: test/eng_Latn-big_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-big_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-apw_Latn
data_files:
- path: train/eng_Latn-apw_Latn.jsonl.gz
split: train
- path: test/eng_Latn-apw_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-apw_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kpx_Latn
data_files:
- path: train/eng_Latn-kpx_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kpx_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kpx_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cui_Latn
data_files:
- path: train/eng_Latn-cui_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cui_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cui_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bnp_Latn
data_files:
- path: train/eng_Latn-bnp_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bnp_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bnp_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ngp_Latn
data_files:
- path: train/eng_Latn-ngp_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ngp_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ngp_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mkj_Latn
data_files:
- path: train/eng_Latn-mkj_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mkj_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mkj_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-chf_Latn
data_files:
- path: train/eng_Latn-chf_Latn.jsonl.gz
split: train
- path: test/eng_Latn-chf_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-chf_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tca_Latn
data_files:
- path: train/eng_Latn-tca_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tca_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tca_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-poh_Latn
data_files:
- path: train/eng_Latn-poh_Latn.jsonl.gz
split: train
- path: test/eng_Latn-poh_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-poh_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ese_Latn
data_files:
- path: train/eng_Latn-ese_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ese_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ese_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-plu_Latn
data_files:
- path: train/eng_Latn-plu_Latn.jsonl.gz
split: train
- path: test/eng_Latn-plu_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-plu_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-crn_Latn
data_files:
- path: train/eng_Latn-crn_Latn.jsonl.gz
split: train
- path: test/eng_Latn-crn_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-crn_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mxt_Latn
data_files:
- path: train/eng_Latn-mxt_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mxt_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mxt_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tnk_Latn
data_files:
- path: train/eng_Latn-tnk_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tnk_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tnk_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-zar_Latn
data_files:
- path: train/eng_Latn-zar_Latn.jsonl.gz
split: train
- path: test/eng_Latn-zar_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-zar_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-sri_Latn
data_files:
- path: train/eng_Latn-sri_Latn.jsonl.gz
split: train
- path: test/eng_Latn-sri_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-sri_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-pan_Guru
data_files:
- path: train/eng_Latn-pan_Guru.jsonl.gz
split: train
- path: test/eng_Latn-pan_Guru.jsonl.gz
split: test
- path: validation/eng_Latn-pan_Guru.jsonl.gz
split: validation
- config_name: eng_Latn-kik_Latn
data_files:
- path: train/eng_Latn-kik_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kik_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kik_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-yby_Latn
data_files:
- path: train/eng_Latn-yby_Latn.jsonl.gz
split: train
- path: test/eng_Latn-yby_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-yby_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-qup_Latn
data_files:
- path: train/eng_Latn-qup_Latn.jsonl.gz
split: train
- path: test/eng_Latn-qup_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-qup_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mco_Latn
data_files:
- path: train/eng_Latn-mco_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mco_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mco_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-gux_Latn
data_files:
- path: train/eng_Latn-gux_Latn.jsonl.gz
split: train
- path: test/eng_Latn-gux_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-gux_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-spa_Latn
data_files:
- path: train/eng_Latn-spa_Latn.jsonl.gz
split: train
- path: test/eng_Latn-spa_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-spa_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-heg_Latn
data_files:
- path: train/eng_Latn-heg_Latn.jsonl.gz
split: train
- path: test/eng_Latn-heg_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-heg_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-gwi_Latn
data_files:
- path: train/eng_Latn-gwi_Latn.jsonl.gz
split: train
- path: test/eng_Latn-gwi_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-gwi_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ttc_Latn
data_files:
- path: train/eng_Latn-ttc_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ttc_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ttc_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mqj_Latn
data_files:
- path: train/eng_Latn-mqj_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mqj_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mqj_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-pjt_Latn
data_files:
- path: train/eng_Latn-pjt_Latn.jsonl.gz
split: train
- path: test/eng_Latn-pjt_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-pjt_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-gui_Latn
data_files:
- path: train/eng_Latn-gui_Latn.jsonl.gz
split: train
- path: test/eng_Latn-gui_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-gui_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tel_Telu
data_files:
- path: train/eng_Latn-tel_Telu.jsonl.gz
split: train
- path: test/eng_Latn-tel_Telu.jsonl.gz
split: test
- path: validation/eng_Latn-tel_Telu.jsonl.gz
split: validation
- config_name: eng_Latn-lbb_Latn
data_files:
- path: train/eng_Latn-lbb_Latn.jsonl.gz
split: train
- path: test/eng_Latn-lbb_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-lbb_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cbr_Latn
data_files:
- path: train/eng_Latn-cbr_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cbr_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cbr_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-jvn_Latn
data_files:
- path: train/eng_Latn-jvn_Latn.jsonl.gz
split: train
- path: test/eng_Latn-jvn_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-jvn_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-huu_Latn
data_files:
- path: train/eng_Latn-huu_Latn.jsonl.gz
split: train
- path: test/eng_Latn-huu_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-huu_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kyq_Latn
data_files:
- path: train/eng_Latn-kyq_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kyq_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kyq_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-lex_Latn
data_files:
- path: train/eng_Latn-lex_Latn.jsonl.gz
split: train
- path: test/eng_Latn-lex_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-lex_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-lug_Latn
data_files:
- path: train/eng_Latn-lug_Latn.jsonl.gz
split: train
- path: test/eng_Latn-lug_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-lug_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tbc_Latn
data_files:
- path: train/eng_Latn-tbc_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tbc_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tbc_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-srm_Latn
data_files:
- path: train/eng_Latn-srm_Latn.jsonl.gz
split: train
- path: test/eng_Latn-srm_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-srm_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ztq_Latn
data_files:
- path: train/eng_Latn-ztq_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ztq_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ztq_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-clu_Latn
data_files:
- path: train/eng_Latn-clu_Latn.jsonl.gz
split: train
- path: test/eng_Latn-clu_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-clu_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-wol_Latn
data_files:
- path: train/eng_Latn-wol_Latn.jsonl.gz
split: train
- path: test/eng_Latn-wol_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-wol_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-wrk_Latn
data_files:
- path: train/eng_Latn-wrk_Latn.jsonl.gz
split: train
- path: test/eng_Latn-wrk_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-wrk_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ssg_Latn
data_files:
- path: train/eng_Latn-ssg_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ssg_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ssg_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tha_Thai
data_files:
- path: train/eng_Latn-tha_Thai.jsonl.gz
split: train
- path: test/eng_Latn-tha_Thai.jsonl.gz
split: test
- path: validation/eng_Latn-tha_Thai.jsonl.gz
split: validation
- config_name: eng_Latn-gub_Latn
data_files:
- path: train/eng_Latn-gub_Latn.jsonl.gz
split: train
- path: test/eng_Latn-gub_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-gub_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-rop_Latn
data_files:
- path: train/eng_Latn-rop_Latn.jsonl.gz
split: train
- path: test/eng_Latn-rop_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-rop_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ind_Latn
data_files:
- path: train/eng_Latn-ind_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ind_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ind_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-urb_Latn
data_files:
- path: train/eng_Latn-urb_Latn.jsonl.gz
split: train
- path: test/eng_Latn-urb_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-urb_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ziw_Latn
data_files:
- path: train/eng_Latn-ziw_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ziw_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ziw_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-waj_Latn
data_files:
- path: train/eng_Latn-waj_Latn.jsonl.gz
split: train
- path: test/eng_Latn-waj_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-waj_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tku_Latn
data_files:
- path: train/eng_Latn-tku_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tku_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tku_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-pao_Latn
data_files:
- path: train/eng_Latn-pao_Latn.jsonl.gz
split: train
- path: test/eng_Latn-pao_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-pao_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tet_Latn
data_files:
- path: train/eng_Latn-tet_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tet_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tet_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-msc_Latn
data_files:
- path: train/eng_Latn-msc_Latn.jsonl.gz
split: train
- path: test/eng_Latn-msc_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-msc_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-wal_Ethi
data_files:
- path: train/eng_Latn-wal_Ethi.jsonl.gz
split: train
- path: test/eng_Latn-wal_Ethi.jsonl.gz
split: test
- path: validation/eng_Latn-wal_Ethi.jsonl.gz
split: validation
- config_name: eng_Latn-bmu_Latn
data_files:
- path: train/eng_Latn-bmu_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bmu_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bmu_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-yaq_Latn
data_files:
- path: train/eng_Latn-yaq_Latn.jsonl.gz
split: train
- path: test/eng_Latn-yaq_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-yaq_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bgt_Latn
data_files:
- path: train/eng_Latn-bgt_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bgt_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bgt_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-atb_Latn
data_files:
- path: train/eng_Latn-atb_Latn.jsonl.gz
split: train
- path: test/eng_Latn-atb_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-atb_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-apn_Latn
data_files:
- path: train/eng_Latn-apn_Latn.jsonl.gz
split: train
- path: test/eng_Latn-apn_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-apn_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-por_Latn
data_files:
- path: train/eng_Latn-por_Latn.jsonl.gz
split: train
- path: test/eng_Latn-por_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-por_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-quf_Latn
data_files:
- path: train/eng_Latn-quf_Latn.jsonl.gz
split: train
- path: test/eng_Latn-quf_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-quf_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-prf_Latn
data_files:
- path: train/eng_Latn-prf_Latn.jsonl.gz
split: train
- path: test/eng_Latn-prf_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-prf_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ndj_Latn
data_files:
- path: train/eng_Latn-ndj_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ndj_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ndj_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-hub_Latn
data_files:
- path: train/eng_Latn-hub_Latn.jsonl.gz
split: train
- path: test/eng_Latn-hub_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-hub_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kvn_Latn
data_files:
- path: train/eng_Latn-kvn_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kvn_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kvn_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-swe_Latn
data_files:
- path: train/eng_Latn-swe_Latn.jsonl.gz
split: train
- path: test/eng_Latn-swe_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-swe_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-xtd_Latn
data_files:
- path: train/eng_Latn-xtd_Latn.jsonl.gz
split: train
- path: test/eng_Latn-xtd_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-xtd_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-blz_Latn
data_files:
- path: train/eng_Latn-blz_Latn.jsonl.gz
split: train
- path: test/eng_Latn-blz_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-blz_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-arb_Arab
data_files:
- path: train/eng_Latn-arb_Arab.jsonl.gz
split: train
- path: test/eng_Latn-arb_Arab.jsonl.gz
split: test
- path: validation/eng_Latn-arb_Arab.jsonl.gz
split: validation
- config_name: eng_Latn-gdr_Latn
data_files:
- path: train/eng_Latn-gdr_Latn.jsonl.gz
split: train
- path: test/eng_Latn-gdr_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-gdr_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ksd_Latn
data_files:
- path: train/eng_Latn-ksd_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ksd_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ksd_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-toj_Latn
data_files:
- path: train/eng_Latn-toj_Latn.jsonl.gz
split: train
- path: test/eng_Latn-toj_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-toj_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-arp_Latn
data_files:
- path: train/eng_Latn-arp_Latn.jsonl.gz
split: train
- path: test/eng_Latn-arp_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-arp_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cnt_Latn
data_files:
- path: train/eng_Latn-cnt_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cnt_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cnt_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-aoj_Latn
data_files:
- path: train/eng_Latn-aoj_Latn.jsonl.gz
split: train
- path: test/eng_Latn-aoj_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-aoj_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-fai_Latn
data_files:
- path: train/eng_Latn-fai_Latn.jsonl.gz
split: train
- path: test/eng_Latn-fai_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-fai_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-far_Latn
data_files:
- path: train/eng_Latn-far_Latn.jsonl.gz
split: train
- path: test/eng_Latn-far_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-far_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ons_Latn
data_files:
- path: train/eng_Latn-ons_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ons_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ons_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-emi_Latn
data_files:
- path: train/eng_Latn-emi_Latn.jsonl.gz
split: train
- path: test/eng_Latn-emi_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-emi_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-yre_Latn
data_files:
- path: train/eng_Latn-yre_Latn.jsonl.gz
split: train
- path: test/eng_Latn-yre_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-yre_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-zpz_Latn
data_files:
- path: train/eng_Latn-zpz_Latn.jsonl.gz
split: train
- path: test/eng_Latn-zpz_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-zpz_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-yss_Latn
data_files:
- path: train/eng_Latn-yss_Latn.jsonl.gz
split: train
- path: test/eng_Latn-yss_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-yss_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kos_Latn
data_files:
- path: train/eng_Latn-kos_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kos_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kos_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-reg_Latn
data_files:
- path: train/eng_Latn-reg_Latn.jsonl.gz
split: train
- path: test/eng_Latn-reg_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-reg_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-rro_Latn
data_files:
- path: train/eng_Latn-rro_Latn.jsonl.gz
split: train
- path: test/eng_Latn-rro_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-rro_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-apz_Latn
data_files:
- path: train/eng_Latn-apz_Latn.jsonl.gz
split: train
- path: test/eng_Latn-apz_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-apz_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-boj_Latn
data_files:
- path: train/eng_Latn-boj_Latn.jsonl.gz
split: train
- path: test/eng_Latn-boj_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-boj_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-hla_Latn
data_files:
- path: train/eng_Latn-hla_Latn.jsonl.gz
split: train
- path: test/eng_Latn-hla_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-hla_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-gyr_Latn
data_files:
- path: train/eng_Latn-gyr_Latn.jsonl.gz
split: train
- path: test/eng_Latn-gyr_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-gyr_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ukr_Cyrl
data_files:
- path: train/eng_Latn-ukr_Cyrl.jsonl.gz
split: train
- path: test/eng_Latn-ukr_Cyrl.jsonl.gz
split: test
- path: validation/eng_Latn-ukr_Cyrl.jsonl.gz
split: validation
- config_name: eng_Latn-gvs_Latn
data_files:
- path: train/eng_Latn-gvs_Latn.jsonl.gz
split: train
- path: test/eng_Latn-gvs_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-gvs_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mil_Latn
data_files:
- path: train/eng_Latn-mil_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mil_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mil_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-gul_Latn
data_files:
- path: train/eng_Latn-gul_Latn.jsonl.gz
split: train
- path: test/eng_Latn-gul_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-gul_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ood_Latn
data_files:
- path: train/eng_Latn-ood_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ood_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ood_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ewe_Latn
data_files:
- path: train/eng_Latn-ewe_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ewe_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ewe_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-qul_Latn
data_files:
- path: train/eng_Latn-qul_Latn.jsonl.gz
split: train
- path: test/eng_Latn-qul_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-qul_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kbm_Latn
data_files:
- path: train/eng_Latn-kbm_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kbm_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kbm_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mih_Latn
data_files:
- path: train/eng_Latn-mih_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mih_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mih_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-maq_Latn
data_files:
- path: train/eng_Latn-maq_Latn.jsonl.gz
split: train
- path: test/eng_Latn-maq_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-maq_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bbr_Latn
data_files:
- path: train/eng_Latn-bbr_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bbr_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bbr_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cbu_Latn
data_files:
- path: train/eng_Latn-cbu_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cbu_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cbu_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-meq_Latn
data_files:
- path: train/eng_Latn-meq_Latn.jsonl.gz
split: train
- path: test/eng_Latn-meq_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-meq_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bmk_Latn
data_files:
- path: train/eng_Latn-bmk_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bmk_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bmk_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-hui_Latn
data_files:
- path: train/eng_Latn-hui_Latn.jsonl.gz
split: train
- path: test/eng_Latn-hui_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-hui_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tgl_Latn
data_files:
- path: train/eng_Latn-tgl_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tgl_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tgl_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-lgl_Latn
data_files:
- path: train/eng_Latn-lgl_Latn.jsonl.gz
split: train
- path: test/eng_Latn-lgl_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-lgl_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-zpq_Latn
data_files:
- path: train/eng_Latn-zpq_Latn.jsonl.gz
split: train
- path: test/eng_Latn-zpq_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-zpq_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mti_Latn
data_files:
- path: train/eng_Latn-mti_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mti_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mti_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-pah_Latn
data_files:
- path: train/eng_Latn-pah_Latn.jsonl.gz
split: train
- path: test/eng_Latn-pah_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-pah_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-nch_Latn
data_files:
- path: train/eng_Latn-nch_Latn.jsonl.gz
split: train
- path: test/eng_Latn-nch_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-nch_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mjc_Latn
data_files:
- path: train/eng_Latn-mjc_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mjc_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mjc_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-zty_Latn
data_files:
- path: train/eng_Latn-zty_Latn.jsonl.gz
split: train
- path: test/eng_Latn-zty_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-zty_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ksj_Latn
data_files:
- path: train/eng_Latn-ksj_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ksj_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ksj_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-nvm_Latn
data_files:
- path: train/eng_Latn-nvm_Latn.jsonl.gz
split: train
- path: test/eng_Latn-nvm_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-nvm_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kyc_Latn
data_files:
- path: train/eng_Latn-kyc_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kyc_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kyc_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bao_Latn
data_files:
- path: train/eng_Latn-bao_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bao_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bao_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-zas_Latn
data_files:
- path: train/eng_Latn-zas_Latn.jsonl.gz
split: train
- path: test/eng_Latn-zas_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-zas_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-djr_Latn
data_files:
- path: train/eng_Latn-djr_Latn.jsonl.gz
split: train
- path: test/eng_Latn-djr_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-djr_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bpr_Latn
data_files:
- path: train/eng_Latn-bpr_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bpr_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bpr_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ndg_Latn
data_files:
- path: train/eng_Latn-ndg_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ndg_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ndg_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ots_Latn
data_files:
- path: train/eng_Latn-ots_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ots_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ots_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-miz_Latn
data_files:
- path: train/eng_Latn-miz_Latn.jsonl.gz
split: train
- path: test/eng_Latn-miz_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-miz_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cco_Latn
data_files:
- path: train/eng_Latn-cco_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cco_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cco_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mbc_Latn
data_files:
- path: train/eng_Latn-mbc_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mbc_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mbc_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-myy_Latn
data_files:
- path: train/eng_Latn-myy_Latn.jsonl.gz
split: train
- path: test/eng_Latn-myy_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-myy_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-att_Latn
data_files:
- path: train/eng_Latn-att_Latn.jsonl.gz
split: train
- path: test/eng_Latn-att_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-att_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-aly_Latn
data_files:
- path: train/eng_Latn-aly_Latn.jsonl.gz
split: train
- path: test/eng_Latn-aly_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-aly_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mgh_Latn
data_files:
- path: train/eng_Latn-mgh_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mgh_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mgh_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mqb_Latn
data_files:
- path: train/eng_Latn-mqb_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mqb_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mqb_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-sps_Latn
data_files:
- path: train/eng_Latn-sps_Latn.jsonl.gz
split: train
- path: test/eng_Latn-sps_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-sps_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-wbi_Latn
data_files:
- path: train/eng_Latn-wbi_Latn.jsonl.gz
split: train
- path: test/eng_Latn-wbi_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-wbi_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-rai_Latn
data_files:
- path: train/eng_Latn-rai_Latn.jsonl.gz
split: train
- path: test/eng_Latn-rai_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-rai_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-knf_Latn
data_files:
- path: train/eng_Latn-knf_Latn.jsonl.gz
split: train
- path: test/eng_Latn-knf_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-knf_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-txq_Latn
data_files:
- path: train/eng_Latn-txq_Latn.jsonl.gz
split: train
- path: test/eng_Latn-txq_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-txq_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cuk_Latn
data_files:
- path: train/eng_Latn-cuk_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cuk_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cuk_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tew_Latn
data_files:
- path: train/eng_Latn-tew_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tew_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tew_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-aia_Latn
data_files:
- path: train/eng_Latn-aia_Latn.jsonl.gz
split: train
- path: test/eng_Latn-aia_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-aia_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ghs_Latn
data_files:
- path: train/eng_Latn-ghs_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ghs_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ghs_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kne_Latn
data_files:
- path: train/eng_Latn-kne_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kne_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kne_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-snp_Latn
data_files:
- path: train/eng_Latn-snp_Latn.jsonl.gz
split: train
- path: test/eng_Latn-snp_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-snp_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-yon_Latn
data_files:
- path: train/eng_Latn-yon_Latn.jsonl.gz
split: train
- path: test/eng_Latn-yon_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-yon_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-rkb_Latn
data_files:
- path: train/eng_Latn-rkb_Latn.jsonl.gz
split: train
- path: test/eng_Latn-rkb_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-rkb_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mam_Latn
data_files:
- path: train/eng_Latn-mam_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mam_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mam_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ffm_Latn
data_files:
- path: train/eng_Latn-ffm_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ffm_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ffm_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tbo_Latn
data_files:
- path: train/eng_Latn-tbo_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tbo_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tbo_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cta_Latn
data_files:
- path: train/eng_Latn-cta_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cta_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cta_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mca_Latn
data_files:
- path: train/eng_Latn-mca_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mca_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mca_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-smk_Latn
data_files:
- path: train/eng_Latn-smk_Latn.jsonl.gz
split: train
- path: test/eng_Latn-smk_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-smk_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bqc_Latn
data_files:
- path: train/eng_Latn-bqc_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bqc_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bqc_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-khz_Latn
data_files:
- path: train/eng_Latn-khz_Latn.jsonl.gz
split: train
- path: test/eng_Latn-khz_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-khz_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ceb_Latn
data_files:
- path: train/eng_Latn-ceb_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ceb_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ceb_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-nyu_Latn
data_files:
- path: train/eng_Latn-nyu_Latn.jsonl.gz
split: train
- path: test/eng_Latn-nyu_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-nyu_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-hlt_Latn
data_files:
- path: train/eng_Latn-hlt_Latn.jsonl.gz
split: train
- path: test/eng_Latn-hlt_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-hlt_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-qvw_Latn
data_files:
- path: train/eng_Latn-qvw_Latn.jsonl.gz
split: train
- path: test/eng_Latn-qvw_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-qvw_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-poy_Latn
data_files:
- path: train/eng_Latn-poy_Latn.jsonl.gz
split: train
- path: test/eng_Latn-poy_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-poy_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-jiv_Latn
data_files:
- path: train/eng_Latn-jiv_Latn.jsonl.gz
split: train
- path: test/eng_Latn-jiv_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-jiv_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mna_Latn
data_files:
- path: train/eng_Latn-mna_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mna_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mna_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-xsi_Latn
data_files:
- path: train/eng_Latn-xsi_Latn.jsonl.gz
split: train
- path: test/eng_Latn-xsi_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-xsi_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-crx_Latn
data_files:
- path: train/eng_Latn-crx_Latn.jsonl.gz
split: train
- path: test/eng_Latn-crx_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-crx_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-apb_Latn
data_files:
- path: train/eng_Latn-apb_Latn.jsonl.gz
split: train
- path: test/eng_Latn-apb_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-apb_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-imo_Latn
data_files:
- path: train/eng_Latn-imo_Latn.jsonl.gz
split: train
- path: test/eng_Latn-imo_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-imo_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-fue_Latn
data_files:
- path: train/eng_Latn-fue_Latn.jsonl.gz
split: train
- path: test/eng_Latn-fue_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-fue_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bhl_Latn
data_files:
- path: train/eng_Latn-bhl_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bhl_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bhl_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tim_Latn
data_files:
- path: train/eng_Latn-tim_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tim_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tim_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mgw_Latn
data_files:
- path: train/eng_Latn-mgw_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mgw_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mgw_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-taj_Deva
data_files:
- path: train/eng_Latn-taj_Deva.jsonl.gz
split: train
- path: test/eng_Latn-taj_Deva.jsonl.gz
split: test
- path: validation/eng_Latn-taj_Deva.jsonl.gz
split: validation
- config_name: eng_Latn-djk_Latn
data_files:
- path: train/eng_Latn-djk_Latn.jsonl.gz
split: train
- path: test/eng_Latn-djk_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-djk_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ruf_Latn
data_files:
- path: train/eng_Latn-ruf_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ruf_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ruf_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bqp_Latn
data_files:
- path: train/eng_Latn-bqp_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bqp_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bqp_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-adz_Latn
data_files:
- path: train/eng_Latn-adz_Latn.jsonl.gz
split: train
- path: test/eng_Latn-adz_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-adz_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bmr_Latn
data_files:
- path: train/eng_Latn-bmr_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bmr_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bmr_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ata_Latn
data_files:
- path: train/eng_Latn-ata_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ata_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ata_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mio_Latn
data_files:
- path: train/eng_Latn-mio_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mio_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mio_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-pad_Latn
data_files:
- path: train/eng_Latn-pad_Latn.jsonl.gz
split: train
- path: test/eng_Latn-pad_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-pad_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-qxh_Latn
data_files:
- path: train/eng_Latn-qxh_Latn.jsonl.gz
split: train
- path: test/eng_Latn-qxh_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-qxh_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tfr_Latn
data_files:
- path: train/eng_Latn-tfr_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tfr_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tfr_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mie_Latn
data_files:
- path: train/eng_Latn-mie_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mie_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mie_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tpi_Latn
data_files:
- path: train/eng_Latn-tpi_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tpi_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tpi_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-fuh_Latn
data_files:
- path: train/eng_Latn-fuh_Latn.jsonl.gz
split: train
- path: test/eng_Latn-fuh_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-fuh_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-dgr_Latn
data_files:
- path: train/eng_Latn-dgr_Latn.jsonl.gz
split: train
- path: test/eng_Latn-dgr_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-dgr_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bch_Latn
data_files:
- path: train/eng_Latn-bch_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bch_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bch_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mcr_Latn
data_files:
- path: train/eng_Latn-mcr_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mcr_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mcr_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bkx_Latn
data_files:
- path: train/eng_Latn-bkx_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bkx_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bkx_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-agr_Latn
data_files:
- path: train/eng_Latn-agr_Latn.jsonl.gz
split: train
- path: test/eng_Latn-agr_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-agr_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-chq_Latn
data_files:
- path: train/eng_Latn-chq_Latn.jsonl.gz
split: train
- path: test/eng_Latn-chq_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-chq_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-rwo_Latn
data_files:
- path: train/eng_Latn-rwo_Latn.jsonl.gz
split: train
- path: test/eng_Latn-rwo_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-rwo_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-esk_Latn
data_files:
- path: train/eng_Latn-esk_Latn.jsonl.gz
split: train
- path: test/eng_Latn-esk_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-esk_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-zpl_Latn
data_files:
- path: train/eng_Latn-zpl_Latn.jsonl.gz
split: train
- path: test/eng_Latn-zpl_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-zpl_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bjr_Latn
data_files:
- path: train/eng_Latn-bjr_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bjr_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bjr_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kiw_Latn
data_files:
- path: train/eng_Latn-kiw_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kiw_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kiw_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-azg_Latn
data_files:
- path: train/eng_Latn-azg_Latn.jsonl.gz
split: train
- path: test/eng_Latn-azg_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-azg_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mbb_Latn
data_files:
- path: train/eng_Latn-mbb_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mbb_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mbb_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-knj_Latn
data_files:
- path: train/eng_Latn-knj_Latn.jsonl.gz
split: train
- path: test/eng_Latn-knj_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-knj_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cao_Latn
data_files:
- path: train/eng_Latn-cao_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cao_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cao_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-dji_Latn
data_files:
- path: train/eng_Latn-dji_Latn.jsonl.gz
split: train
- path: test/eng_Latn-dji_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-dji_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bss_Latn
data_files:
- path: train/eng_Latn-bss_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bss_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bss_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bgs_Latn
data_files:
- path: train/eng_Latn-bgs_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bgs_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bgs_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mek_Latn
data_files:
- path: train/eng_Latn-mek_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mek_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mek_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-yuj_Latn
data_files:
- path: train/eng_Latn-yuj_Latn.jsonl.gz
split: train
- path: test/eng_Latn-yuj_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-yuj_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tpt_Latn
data_files:
- path: train/eng_Latn-tpt_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tpt_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tpt_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-zos_Latn
data_files:
- path: train/eng_Latn-zos_Latn.jsonl.gz
split: train
- path: test/eng_Latn-zos_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-zos_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-blw_Latn
data_files:
- path: train/eng_Latn-blw_Latn.jsonl.gz
split: train
- path: test/eng_Latn-blw_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-blw_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-viv_Latn
data_files:
- path: train/eng_Latn-viv_Latn.jsonl.gz
split: train
- path: test/eng_Latn-viv_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-viv_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ikw_Latn
data_files:
- path: train/eng_Latn-ikw_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ikw_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ikw_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tue_Latn
data_files:
- path: train/eng_Latn-tue_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tue_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tue_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-uvh_Latn
data_files:
- path: train/eng_Latn-uvh_Latn.jsonl.gz
split: train
- path: test/eng_Latn-uvh_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-uvh_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-yap_Latn
data_files:
- path: train/eng_Latn-yap_Latn.jsonl.gz
split: train
- path: test/eng_Latn-yap_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-yap_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-nca_Latn
data_files:
- path: train/eng_Latn-nca_Latn.jsonl.gz
split: train
- path: test/eng_Latn-nca_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-nca_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-luo_Latn
data_files:
- path: train/eng_Latn-luo_Latn.jsonl.gz
split: train
- path: test/eng_Latn-luo_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-luo_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tmd_Latn
data_files:
- path: train/eng_Latn-tmd_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tmd_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tmd_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-txu_Latn
data_files:
- path: train/eng_Latn-txu_Latn.jsonl.gz
split: train
- path: test/eng_Latn-txu_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-txu_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-yor_Latn
data_files:
- path: train/eng_Latn-yor_Latn.jsonl.gz
split: train
- path: test/eng_Latn-yor_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-yor_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-amx_Latn
data_files:
- path: train/eng_Latn-amx_Latn.jsonl.gz
split: train
- path: test/eng_Latn-amx_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-amx_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-uli_Latn
data_files:
- path: train/eng_Latn-uli_Latn.jsonl.gz
split: train
- path: test/eng_Latn-uli_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-uli_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-dov_Latn
data_files:
- path: train/eng_Latn-dov_Latn.jsonl.gz
split: train
- path: test/eng_Latn-dov_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-dov_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-huv_Latn
data_files:
- path: train/eng_Latn-huv_Latn.jsonl.gz
split: train
- path: test/eng_Latn-huv_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-huv_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-msk_Latn
data_files:
- path: train/eng_Latn-msk_Latn.jsonl.gz
split: train
- path: test/eng_Latn-msk_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-msk_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-twi_Latn
data_files:
- path: train/eng_Latn-twi_Latn.jsonl.gz
split: train
- path: test/eng_Latn-twi_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-twi_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-aer_Latn
data_files:
- path: train/eng_Latn-aer_Latn.jsonl.gz
split: train
- path: test/eng_Latn-aer_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-aer_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-pib_Latn
data_files:
- path: train/eng_Latn-pib_Latn.jsonl.gz
split: train
- path: test/eng_Latn-pib_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-pib_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ter_Latn
data_files:
- path: train/eng_Latn-ter_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ter_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ter_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-eri_Latn
data_files:
- path: train/eng_Latn-eri_Latn.jsonl.gz
split: train
- path: test/eng_Latn-eri_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-eri_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cth_Latn
data_files:
- path: train/eng_Latn-cth_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cth_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cth_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-dwr_Latn
data_files:
- path: train/eng_Latn-dwr_Latn.jsonl.gz
split: train
- path: test/eng_Latn-dwr_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-dwr_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-srq_Latn
data_files:
- path: train/eng_Latn-srq_Latn.jsonl.gz
split: train
- path: test/eng_Latn-srq_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-srq_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mmx_Latn
data_files:
- path: train/eng_Latn-mmx_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mmx_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mmx_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cap_Latn
data_files:
- path: train/eng_Latn-cap_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cap_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cap_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ces_Latn
data_files:
- path: train/eng_Latn-ces_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ces_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ces_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cpu_Latn
data_files:
- path: train/eng_Latn-cpu_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cpu_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cpu_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-lww_Latn
data_files:
- path: train/eng_Latn-lww_Latn.jsonl.gz
split: train
- path: test/eng_Latn-lww_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-lww_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-hix_Latn
data_files:
- path: train/eng_Latn-hix_Latn.jsonl.gz
split: train
- path: test/eng_Latn-hix_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-hix_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cab_Latn
data_files:
- path: train/eng_Latn-cab_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cab_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cab_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-fuf_Latn
data_files:
- path: train/eng_Latn-fuf_Latn.jsonl.gz
split: train
- path: test/eng_Latn-fuf_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-fuf_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mcf_Latn
data_files:
- path: train/eng_Latn-mcf_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mcf_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mcf_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-shj_Latn
data_files:
- path: train/eng_Latn-shj_Latn.jsonl.gz
split: train
- path: test/eng_Latn-shj_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-shj_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-qwh_Latn
data_files:
- path: train/eng_Latn-qwh_Latn.jsonl.gz
split: train
- path: test/eng_Latn-qwh_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-qwh_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-zsr_Latn
data_files:
- path: train/eng_Latn-zsr_Latn.jsonl.gz
split: train
- path: test/eng_Latn-zsr_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-zsr_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-daa_Latn
data_files:
- path: train/eng_Latn-daa_Latn.jsonl.gz
split: train
- path: test/eng_Latn-daa_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-daa_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-sus_Arab
data_files:
- path: train/eng_Latn-sus_Arab.jsonl.gz
split: train
- path: test/eng_Latn-sus_Arab.jsonl.gz
split: test
- path: validation/eng_Latn-sus_Arab.jsonl.gz
split: validation
- config_name: eng_Latn-lbk_Latn
data_files:
- path: train/eng_Latn-lbk_Latn.jsonl.gz
split: train
- path: test/eng_Latn-lbk_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-lbk_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-nii_Latn
data_files:
- path: train/eng_Latn-nii_Latn.jsonl.gz
split: train
- path: test/eng_Latn-nii_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-nii_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-gvn_Latn
data_files:
- path: train/eng_Latn-gvn_Latn.jsonl.gz
split: train
- path: test/eng_Latn-gvn_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-gvn_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-yaa_Latn
data_files:
- path: train/eng_Latn-yaa_Latn.jsonl.gz
split: train
- path: test/eng_Latn-yaa_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-yaa_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-npi_Deva
data_files:
- path: train/eng_Latn-npi_Deva.jsonl.gz
split: train
- path: test/eng_Latn-npi_Deva.jsonl.gz
split: test
- path: validation/eng_Latn-npi_Deva.jsonl.gz
split: validation
- config_name: eng_Latn-uig_Latn
data_files:
- path: train/eng_Latn-uig_Latn.jsonl.gz
split: train
- path: test/eng_Latn-uig_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-uig_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mmo_Latn
data_files:
- path: train/eng_Latn-mmo_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mmo_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mmo_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ktm_Latn
data_files:
- path: train/eng_Latn-ktm_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ktm_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ktm_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-agu_Latn
data_files:
- path: train/eng_Latn-agu_Latn.jsonl.gz
split: train
- path: test/eng_Latn-agu_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-agu_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-agg_Latn
data_files:
- path: train/eng_Latn-agg_Latn.jsonl.gz
split: train
- path: test/eng_Latn-agg_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-agg_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ken_Latn
data_files:
- path: train/eng_Latn-ken_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ken_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ken_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-beu_Latn
data_files:
- path: train/eng_Latn-beu_Latn.jsonl.gz
split: train
- path: test/eng_Latn-beu_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-beu_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cac_Latn
data_files:
- path: train/eng_Latn-cac_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cac_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cac_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-uri_Latn
data_files:
- path: train/eng_Latn-uri_Latn.jsonl.gz
split: train
- path: test/eng_Latn-uri_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-uri_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-dah_Latn
data_files:
- path: train/eng_Latn-dah_Latn.jsonl.gz
split: train
- path: test/eng_Latn-dah_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-dah_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-otn_Latn
data_files:
- path: train/eng_Latn-otn_Latn.jsonl.gz
split: train
- path: test/eng_Latn-otn_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-otn_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-wos_Latn
data_files:
- path: train/eng_Latn-wos_Latn.jsonl.gz
split: train
- path: test/eng_Latn-wos_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-wos_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-hin_Deva
data_files:
- path: train/eng_Latn-hin_Deva.jsonl.gz
split: train
- path: test/eng_Latn-hin_Deva.jsonl.gz
split: test
- path: validation/eng_Latn-hin_Deva.jsonl.gz
split: validation
- config_name: eng_Latn-ctu_Latn
data_files:
- path: train/eng_Latn-ctu_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ctu_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ctu_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-pes_Arab
data_files:
- path: train/eng_Latn-pes_Arab.jsonl.gz
split: train
- path: test/eng_Latn-pes_Arab.jsonl.gz
split: test
- path: validation/eng_Latn-pes_Arab.jsonl.gz
split: validation
- config_name: eng_Latn-tbf_Latn
data_files:
- path: train/eng_Latn-tbf_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tbf_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tbf_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bsj_Latn
data_files:
- path: train/eng_Latn-bsj_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bsj_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bsj_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-aey_Latn
data_files:
- path: train/eng_Latn-aey_Latn.jsonl.gz
split: train
- path: test/eng_Latn-aey_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-aey_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-qxn_Latn
data_files:
- path: train/eng_Latn-qxn_Latn.jsonl.gz
split: train
- path: test/eng_Latn-qxn_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-qxn_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-rug_Latn
data_files:
- path: train/eng_Latn-rug_Latn.jsonl.gz
split: train
- path: test/eng_Latn-rug_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-rug_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-nwi_Latn
data_files:
- path: train/eng_Latn-nwi_Latn.jsonl.gz
split: train
- path: test/eng_Latn-nwi_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-nwi_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-spl_Latn
data_files:
- path: train/eng_Latn-spl_Latn.jsonl.gz
split: train
- path: test/eng_Latn-spl_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-spl_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kan_Knda
data_files:
- path: train/eng_Latn-kan_Knda.jsonl.gz
split: train
- path: test/eng_Latn-kan_Knda.jsonl.gz
split: test
- path: validation/eng_Latn-kan_Knda.jsonl.gz
split: validation
- config_name: eng_Latn-dif_Latn
data_files:
- path: train/eng_Latn-dif_Latn.jsonl.gz
split: train
- path: test/eng_Latn-dif_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-dif_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cpa_Latn
data_files:
- path: train/eng_Latn-cpa_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cpa_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cpa_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mau_Latn
data_files:
- path: train/eng_Latn-mau_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mau_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mau_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ron_Latn
data_files:
- path: train/eng_Latn-ron_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ron_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ron_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-dop_Latn
data_files:
- path: train/eng_Latn-dop_Latn.jsonl.gz
split: train
- path: test/eng_Latn-dop_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-dop_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-hau_Latn
data_files:
- path: train/eng_Latn-hau_Latn.jsonl.gz
split: train
- path: test/eng_Latn-hau_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-hau_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-gnn_Latn
data_files:
- path: train/eng_Latn-gnn_Latn.jsonl.gz
split: train
- path: test/eng_Latn-gnn_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-gnn_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bsn_Latn
data_files:
- path: train/eng_Latn-bsn_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bsn_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bsn_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kpj_Latn
data_files:
- path: train/eng_Latn-kpj_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kpj_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kpj_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-wat_Latn
data_files:
- path: train/eng_Latn-wat_Latn.jsonl.gz
split: train
- path: test/eng_Latn-wat_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-wat_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-acr_Latn
data_files:
- path: train/eng_Latn-acr_Latn.jsonl.gz
split: train
- path: test/eng_Latn-acr_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-acr_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-caf_Latn
data_files:
- path: train/eng_Latn-caf_Latn.jsonl.gz
split: train
- path: test/eng_Latn-caf_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-caf_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-dhg_Latn
data_files:
- path: train/eng_Latn-dhg_Latn.jsonl.gz
split: train
- path: test/eng_Latn-dhg_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-dhg_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-yml_Latn
data_files:
- path: train/eng_Latn-yml_Latn.jsonl.gz
split: train
- path: test/eng_Latn-yml_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-yml_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-atd_Latn
data_files:
- path: train/eng_Latn-atd_Latn.jsonl.gz
split: train
- path: test/eng_Latn-atd_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-atd_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bbb_Latn
data_files:
- path: train/eng_Latn-bbb_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bbb_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bbb_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cle_Latn
data_files:
- path: train/eng_Latn-cle_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cle_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cle_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-myk_Latn
data_files:
- path: train/eng_Latn-myk_Latn.jsonl.gz
split: train
- path: test/eng_Latn-myk_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-myk_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bxh_Latn
data_files:
- path: train/eng_Latn-bxh_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bxh_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bxh_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tpa_Latn
data_files:
- path: train/eng_Latn-tpa_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tpa_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tpa_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-awk_Latn
data_files:
- path: train/eng_Latn-awk_Latn.jsonl.gz
split: train
- path: test/eng_Latn-awk_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-awk_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-gfk_Latn
data_files:
- path: train/eng_Latn-gfk_Latn.jsonl.gz
split: train
- path: test/eng_Latn-gfk_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-gfk_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mph_Latn
data_files:
- path: train/eng_Latn-mph_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mph_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mph_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-csy_Latn
data_files:
- path: train/eng_Latn-csy_Latn.jsonl.gz
split: train
- path: test/eng_Latn-csy_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-csy_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tgp_Latn
data_files:
- path: train/eng_Latn-tgp_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tgp_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tgp_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-zia_Latn
data_files:
- path: train/eng_Latn-zia_Latn.jsonl.gz
split: train
- path: test/eng_Latn-zia_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-zia_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-msm_Latn
data_files:
- path: train/eng_Latn-msm_Latn.jsonl.gz
split: train
- path: test/eng_Latn-msm_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-msm_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kql_Latn
data_files:
- path: train/eng_Latn-kql_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kql_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kql_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-wnu_Latn
data_files:
- path: train/eng_Latn-wnu_Latn.jsonl.gz
split: train
- path: test/eng_Latn-wnu_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-wnu_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-nin_Latn
data_files:
- path: train/eng_Latn-nin_Latn.jsonl.gz
split: train
- path: test/eng_Latn-nin_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-nin_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kmo_Latn
data_files:
- path: train/eng_Latn-kmo_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kmo_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kmo_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mpx_Latn
data_files:
- path: train/eng_Latn-mpx_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mpx_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mpx_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-nas_Latn
data_files:
- path: train/eng_Latn-nas_Latn.jsonl.gz
split: train
- path: test/eng_Latn-nas_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-nas_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ulk_Latn
data_files:
- path: train/eng_Latn-ulk_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ulk_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ulk_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ipi_Latn
data_files:
- path: train/eng_Latn-ipi_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ipi_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ipi_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mgc_Latn
data_files:
- path: train/eng_Latn-mgc_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mgc_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mgc_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ape_Latn
data_files:
- path: train/eng_Latn-ape_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ape_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ape_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tac_Latn
data_files:
- path: train/eng_Latn-tac_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tac_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tac_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bzd_Latn
data_files:
- path: train/eng_Latn-bzd_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bzd_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bzd_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-amm_Latn
data_files:
- path: train/eng_Latn-amm_Latn.jsonl.gz
split: train
- path: test/eng_Latn-amm_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-amm_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mic_Latn
data_files:
- path: train/eng_Latn-mic_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mic_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mic_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-sxb_Latn
data_files:
- path: train/eng_Latn-sxb_Latn.jsonl.gz
split: train
- path: test/eng_Latn-sxb_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-sxb_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cux_Latn
data_files:
- path: train/eng_Latn-cux_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cux_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cux_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ixl_Latn
data_files:
- path: train/eng_Latn-ixl_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ixl_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ixl_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-nif_Latn
data_files:
- path: train/eng_Latn-nif_Latn.jsonl.gz
split: train
- path: test/eng_Latn-nif_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-nif_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-isn_Latn
data_files:
- path: train/eng_Latn-isn_Latn.jsonl.gz
split: train
- path: test/eng_Latn-isn_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-isn_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cmn_Hans
data_files:
- path: train/eng_Latn-cmn_Hans.jsonl.gz
split: train
- path: test/eng_Latn-cmn_Hans.jsonl.gz
split: test
- path: validation/eng_Latn-cmn_Hans.jsonl.gz
split: validation
- config_name: eng_Latn-kyf_Latn
data_files:
- path: train/eng_Latn-kyf_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kyf_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kyf_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cut_Latn
data_files:
- path: train/eng_Latn-cut_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cut_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cut_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-lcm_Latn
data_files:
- path: train/eng_Latn-lcm_Latn.jsonl.gz
split: train
- path: test/eng_Latn-lcm_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-lcm_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-nya_Latn
data_files:
- path: train/eng_Latn-nya_Latn.jsonl.gz
split: train
- path: test/eng_Latn-nya_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-nya_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kjs_Latn
data_files:
- path: train/eng_Latn-kjs_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kjs_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kjs_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ton_Latn
data_files:
- path: train/eng_Latn-ton_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ton_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ton_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-qvs_Latn
data_files:
- path: train/eng_Latn-qvs_Latn.jsonl.gz
split: train
- path: test/eng_Latn-qvs_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-qvs_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ino_Latn
data_files:
- path: train/eng_Latn-ino_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ino_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ino_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-zca_Latn
data_files:
- path: train/eng_Latn-zca_Latn.jsonl.gz
split: train
- path: test/eng_Latn-zca_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-zca_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-xav_Latn
data_files:
- path: train/eng_Latn-xav_Latn.jsonl.gz
split: train
- path: test/eng_Latn-xav_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-xav_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-jac_Latn
data_files:
- path: train/eng_Latn-jac_Latn.jsonl.gz
split: train
- path: test/eng_Latn-jac_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-jac_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-quc_Latn
data_files:
- path: train/eng_Latn-quc_Latn.jsonl.gz
split: train
- path: test/eng_Latn-quc_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-quc_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-npl_Latn
data_files:
- path: train/eng_Latn-npl_Latn.jsonl.gz
split: train
- path: test/eng_Latn-npl_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-npl_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-usa_Latn
data_files:
- path: train/eng_Latn-usa_Latn.jsonl.gz
split: train
- path: test/eng_Latn-usa_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-usa_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kvg_Latn
data_files:
- path: train/eng_Latn-kvg_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kvg_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kvg_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tee_Latn
data_files:
- path: train/eng_Latn-tee_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tee_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tee_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-hot_Latn
data_files:
- path: train/eng_Latn-hot_Latn.jsonl.gz
split: train
- path: test/eng_Latn-hot_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-hot_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-acf_Latn
data_files:
- path: train/eng_Latn-acf_Latn.jsonl.gz
split: train
- path: test/eng_Latn-acf_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-acf_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-wiu_Latn
data_files:
- path: train/eng_Latn-wiu_Latn.jsonl.gz
split: train
- path: test/eng_Latn-wiu_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-wiu_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-rmc_Latn
data_files:
- path: train/eng_Latn-rmc_Latn.jsonl.gz
split: train
- path: test/eng_Latn-rmc_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-rmc_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-snx_Latn
data_files:
- path: train/eng_Latn-snx_Latn.jsonl.gz
split: train
- path: test/eng_Latn-snx_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-snx_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-jpn_Jpan
data_files:
- path: train/eng_Latn-jpn_Jpan.jsonl.gz
split: train
- path: test/eng_Latn-jpn_Jpan.jsonl.gz
split: test
- path: validation/eng_Latn-jpn_Jpan.jsonl.gz
split: validation
- config_name: eng_Latn-tbg_Latn
data_files:
- path: train/eng_Latn-tbg_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tbg_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tbg_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-pwg_Latn
data_files:
- path: train/eng_Latn-pwg_Latn.jsonl.gz
split: train
- path: test/eng_Latn-pwg_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-pwg_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-nhr_Latn
data_files:
- path: train/eng_Latn-nhr_Latn.jsonl.gz
split: train
- path: test/eng_Latn-nhr_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-nhr_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mwe_Latn
data_files:
- path: train/eng_Latn-mwe_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mwe_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mwe_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kbq_Latn
data_files:
- path: train/eng_Latn-kbq_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kbq_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kbq_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-myw_Latn
data_files:
- path: train/eng_Latn-myw_Latn.jsonl.gz
split: train
- path: test/eng_Latn-myw_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-myw_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-jni_Latn
data_files:
- path: train/eng_Latn-jni_Latn.jsonl.gz
split: train
- path: test/eng_Latn-jni_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-jni_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-vmy_Latn
data_files:
- path: train/eng_Latn-vmy_Latn.jsonl.gz
split: train
- path: test/eng_Latn-vmy_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-vmy_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-zpv_Latn
data_files:
- path: train/eng_Latn-zpv_Latn.jsonl.gz
split: train
- path: test/eng_Latn-zpv_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-zpv_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-heb_Hebr
data_files:
- path: train/eng_Latn-heb_Hebr.jsonl.gz
split: train
- path: test/eng_Latn-heb_Hebr.jsonl.gz
split: test
- path: validation/eng_Latn-heb_Hebr.jsonl.gz
split: validation
- config_name: eng_Latn-bki_Latn
data_files:
- path: train/eng_Latn-bki_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bki_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bki_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mee_Latn
data_files:
- path: train/eng_Latn-mee_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mee_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mee_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-aau_Latn
data_files:
- path: train/eng_Latn-aau_Latn.jsonl.gz
split: train
- path: test/eng_Latn-aau_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-aau_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-aaz_Latn
data_files:
- path: train/eng_Latn-aaz_Latn.jsonl.gz
split: train
- path: test/eng_Latn-aaz_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-aaz_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-aoi_Latn
data_files:
- path: train/eng_Latn-aoi_Latn.jsonl.gz
split: train
- path: test/eng_Latn-aoi_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-aoi_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-caa_Latn
data_files:
- path: train/eng_Latn-caa_Latn.jsonl.gz
split: train
- path: test/eng_Latn-caa_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-caa_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-zap_Latn
data_files:
- path: train/eng_Latn-zap_Latn.jsonl.gz
split: train
- path: test/eng_Latn-zap_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-zap_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-amk_Latn
data_files:
- path: train/eng_Latn-amk_Latn.jsonl.gz
split: train
- path: test/eng_Latn-amk_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-amk_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-zpo_Latn
data_files:
- path: train/eng_Latn-zpo_Latn.jsonl.gz
split: train
- path: test/eng_Latn-zpo_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-zpo_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-aom_Latn
data_files:
- path: train/eng_Latn-aom_Latn.jsonl.gz
split: train
- path: test/eng_Latn-aom_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-aom_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-zpu_Latn
data_files:
- path: train/eng_Latn-zpu_Latn.jsonl.gz
split: train
- path: test/eng_Latn-zpu_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-zpu_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bdd_Latn
data_files:
- path: train/eng_Latn-bdd_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bdd_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bdd_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-acu_Latn
data_files:
- path: train/eng_Latn-acu_Latn.jsonl.gz
split: train
- path: test/eng_Latn-acu_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-acu_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-beo_Latn
data_files:
- path: train/eng_Latn-beo_Latn.jsonl.gz
split: train
- path: test/eng_Latn-beo_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-beo_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cuc_Latn
data_files:
- path: train/eng_Latn-cuc_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cuc_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cuc_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-otm_Latn
data_files:
- path: train/eng_Latn-otm_Latn.jsonl.gz
split: train
- path: test/eng_Latn-otm_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-otm_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tos_Latn
data_files:
- path: train/eng_Latn-tos_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tos_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tos_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-apu_Latn
data_files:
- path: train/eng_Latn-apu_Latn.jsonl.gz
split: train
- path: test/eng_Latn-apu_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-apu_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-jic_Latn
data_files:
- path: train/eng_Latn-jic_Latn.jsonl.gz
split: train
- path: test/eng_Latn-jic_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-jic_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cek_Latn
data_files:
- path: train/eng_Latn-cek_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cek_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cek_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tnp_Latn
data_files:
- path: train/eng_Latn-tnp_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tnp_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tnp_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-hns_Latn
data_files:
- path: train/eng_Latn-hns_Latn.jsonl.gz
split: train
- path: test/eng_Latn-hns_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-hns_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mpt_Latn
data_files:
- path: train/eng_Latn-mpt_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mpt_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mpt_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kmg_Latn
data_files:
- path: train/eng_Latn-kmg_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kmg_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kmg_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-nhg_Latn
data_files:
- path: train/eng_Latn-nhg_Latn.jsonl.gz
split: train
- path: test/eng_Latn-nhg_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-nhg_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-yle_Latn
data_files:
- path: train/eng_Latn-yle_Latn.jsonl.gz
split: train
- path: test/eng_Latn-yle_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-yle_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-yka_Latn
data_files:
- path: train/eng_Latn-yka_Latn.jsonl.gz
split: train
- path: test/eng_Latn-yka_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-yka_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-maj_Latn
data_files:
- path: train/eng_Latn-maj_Latn.jsonl.gz
split: train
- path: test/eng_Latn-maj_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-maj_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-agt_Latn
data_files:
- path: train/eng_Latn-agt_Latn.jsonl.gz
split: train
- path: test/eng_Latn-agt_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-agt_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-san_Latn
data_files:
- path: train/eng_Latn-san_Latn.jsonl.gz
split: train
- path: test/eng_Latn-san_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-san_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kew_Latn
data_files:
- path: train/eng_Latn-kew_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kew_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kew_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-nop_Latn
data_files:
- path: train/eng_Latn-nop_Latn.jsonl.gz
split: train
- path: test/eng_Latn-nop_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-nop_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-zyp_Latn
data_files:
- path: train/eng_Latn-zyp_Latn.jsonl.gz
split: train
- path: test/eng_Latn-zyp_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-zyp_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-qvm_Latn
data_files:
- path: train/eng_Latn-qvm_Latn.jsonl.gz
split: train
- path: test/eng_Latn-qvm_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-qvm_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mwp_Latn
data_files:
- path: train/eng_Latn-mwp_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mwp_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mwp_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-nhw_Latn
data_files:
- path: train/eng_Latn-nhw_Latn.jsonl.gz
split: train
- path: test/eng_Latn-nhw_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-nhw_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-als_Latn
data_files:
- path: train/eng_Latn-als_Latn.jsonl.gz
split: train
- path: test/eng_Latn-als_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-als_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cof_Latn
data_files:
- path: train/eng_Latn-cof_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cof_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cof_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-auy_Latn
data_files:
- path: train/eng_Latn-auy_Latn.jsonl.gz
split: train
- path: test/eng_Latn-auy_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-auy_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-pol_Latn
data_files:
- path: train/eng_Latn-pol_Latn.jsonl.gz
split: train
- path: test/eng_Latn-pol_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-pol_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-klt_Latn
data_files:
- path: train/eng_Latn-klt_Latn.jsonl.gz
split: train
- path: test/eng_Latn-klt_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-klt_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-soy_Latn
data_files:
- path: train/eng_Latn-soy_Latn.jsonl.gz
split: train
- path: test/eng_Latn-soy_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-soy_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ita_Latn
data_files:
- path: train/eng_Latn-ita_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ita_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ita_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-arn_Latn
data_files:
- path: train/eng_Latn-arn_Latn.jsonl.gz
split: train
- path: test/eng_Latn-arn_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-arn_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tbz_Latn
data_files:
- path: train/eng_Latn-tbz_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tbz_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tbz_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-zaa_Latn
data_files:
- path: train/eng_Latn-zaa_Latn.jsonl.gz
split: train
- path: test/eng_Latn-zaa_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-zaa_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-meu_Latn
data_files:
- path: train/eng_Latn-meu_Latn.jsonl.gz
split: train
- path: test/eng_Latn-meu_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-meu_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-zao_Latn
data_files:
- path: train/eng_Latn-zao_Latn.jsonl.gz
split: train
- path: test/eng_Latn-zao_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-zao_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mxp_Latn
data_files:
- path: train/eng_Latn-mxp_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mxp_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mxp_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-rgu_Latn
data_files:
- path: train/eng_Latn-rgu_Latn.jsonl.gz
split: train
- path: test/eng_Latn-rgu_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-rgu_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tnn_Latn
data_files:
- path: train/eng_Latn-tnn_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tnn_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tnn_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-uvl_Latn
data_files:
- path: train/eng_Latn-uvl_Latn.jsonl.gz
split: train
- path: test/eng_Latn-uvl_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-uvl_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-eko_Latn
data_files:
- path: train/eng_Latn-eko_Latn.jsonl.gz
split: train
- path: test/eng_Latn-eko_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-eko_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-wmt_Latn
data_files:
- path: train/eng_Latn-wmt_Latn.jsonl.gz
split: train
- path: test/eng_Latn-wmt_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-wmt_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kup_Latn
data_files:
- path: train/eng_Latn-kup_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kup_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kup_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-zai_Latn
data_files:
- path: train/eng_Latn-zai_Latn.jsonl.gz
split: train
- path: test/eng_Latn-zai_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-zai_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ebk_Latn
data_files:
- path: train/eng_Latn-ebk_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ebk_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ebk_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-vie_Latn
data_files:
- path: train/eng_Latn-vie_Latn.jsonl.gz
split: train
- path: test/eng_Latn-vie_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-vie_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-azz_Latn
data_files:
- path: train/eng_Latn-azz_Latn.jsonl.gz
split: train
- path: test/eng_Latn-azz_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-azz_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-wbp_Latn
data_files:
- path: train/eng_Latn-wbp_Latn.jsonl.gz
split: train
- path: test/eng_Latn-wbp_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-wbp_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tvk_Latn
data_files:
- path: train/eng_Latn-tvk_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tvk_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tvk_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ote_Latn
data_files:
- path: train/eng_Latn-ote_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ote_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ote_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ubu_Latn
data_files:
- path: train/eng_Latn-ubu_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ubu_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ubu_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-zga_Latn
data_files:
- path: train/eng_Latn-zga_Latn.jsonl.gz
split: train
- path: test/eng_Latn-zga_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-zga_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bps_Latn
data_files:
- path: train/eng_Latn-bps_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bps_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bps_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-swh_Latn
data_files:
- path: train/eng_Latn-swh_Latn.jsonl.gz
split: train
- path: test/eng_Latn-swh_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-swh_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ntp_Latn
data_files:
- path: train/eng_Latn-ntp_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ntp_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ntp_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tav_Latn
data_files:
- path: train/eng_Latn-tav_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tav_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tav_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kms_Latn
data_files:
- path: train/eng_Latn-kms_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kms_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kms_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-pio_Latn
data_files:
- path: train/eng_Latn-pio_Latn.jsonl.gz
split: train
- path: test/eng_Latn-pio_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-pio_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-guj_Gujr
data_files:
- path: train/eng_Latn-guj_Gujr.jsonl.gz
split: train
- path: test/eng_Latn-guj_Gujr.jsonl.gz
split: test
- path: validation/eng_Latn-guj_Gujr.jsonl.gz
split: validation
- config_name: eng_Latn-mbl_Latn
data_files:
- path: train/eng_Latn-mbl_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mbl_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mbl_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-aii_Syrc
data_files:
- path: train/eng_Latn-aii_Syrc.jsonl.gz
split: train
- path: test/eng_Latn-aii_Syrc.jsonl.gz
split: test
- path: validation/eng_Latn-aii_Syrc.jsonl.gz
split: validation
- config_name: eng_Latn-mle_Latn
data_files:
- path: train/eng_Latn-mle_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mle_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mle_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-zam_Latn
data_files:
- path: train/eng_Latn-zam_Latn.jsonl.gz
split: train
- path: test/eng_Latn-zam_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-zam_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-urt_Latn
data_files:
- path: train/eng_Latn-urt_Latn.jsonl.gz
split: train
- path: test/eng_Latn-urt_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-urt_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-guo_Latn
data_files:
- path: train/eng_Latn-guo_Latn.jsonl.gz
split: train
- path: test/eng_Latn-guo_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-guo_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bkd_Latn
data_files:
- path: train/eng_Latn-bkd_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bkd_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bkd_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-rmy_Latn
data_files:
- path: train/eng_Latn-rmy_Latn.jsonl.gz
split: train
- path: test/eng_Latn-rmy_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-rmy_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ong_Latn
data_files:
- path: train/eng_Latn-ong_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ong_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ong_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mlp_Latn
data_files:
- path: train/eng_Latn-mlp_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mlp_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mlp_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mir_Latn
data_files:
- path: train/eng_Latn-mir_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mir_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mir_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-med_Latn
data_files:
- path: train/eng_Latn-med_Latn.jsonl.gz
split: train
- path: test/eng_Latn-med_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-med_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bef_Latn
data_files:
- path: train/eng_Latn-bef_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bef_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bef_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-yuw_Latn
data_files:
- path: train/eng_Latn-yuw_Latn.jsonl.gz
split: train
- path: test/eng_Latn-yuw_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-yuw_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ded_Latn
data_files:
- path: train/eng_Latn-ded_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ded_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ded_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ame_Latn
data_files:
- path: train/eng_Latn-ame_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ame_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ame_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-car_Latn
data_files:
- path: train/eng_Latn-car_Latn.jsonl.gz
split: train
- path: test/eng_Latn-car_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-car_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-chz_Latn
data_files:
- path: train/eng_Latn-chz_Latn.jsonl.gz
split: train
- path: test/eng_Latn-chz_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-chz_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ubr_Latn
data_files:
- path: train/eng_Latn-ubr_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ubr_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ubr_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mar_Deva
data_files:
- path: train/eng_Latn-mar_Deva.jsonl.gz
split: train
- path: test/eng_Latn-mar_Deva.jsonl.gz
split: test
- path: validation/eng_Latn-mar_Deva.jsonl.gz
split: validation
- config_name: eng_Latn-gun_Latn
data_files:
- path: train/eng_Latn-gun_Latn.jsonl.gz
split: train
- path: test/eng_Latn-gun_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-gun_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-pir_Latn
data_files:
- path: train/eng_Latn-pir_Latn.jsonl.gz
split: train
- path: test/eng_Latn-pir_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-pir_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-inb_Latn
data_files:
- path: train/eng_Latn-inb_Latn.jsonl.gz
split: train
- path: test/eng_Latn-inb_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-inb_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-gym_Latn
data_files:
- path: train/eng_Latn-gym_Latn.jsonl.gz
split: train
- path: test/eng_Latn-gym_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-gym_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mit_Latn
data_files:
- path: train/eng_Latn-mit_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mit_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mit_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-enq_Latn
data_files:
- path: train/eng_Latn-enq_Latn.jsonl.gz
split: train
- path: test/eng_Latn-enq_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-enq_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kqf_Latn
data_files:
- path: train/eng_Latn-kqf_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kqf_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kqf_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mbh_Latn
data_files:
- path: train/eng_Latn-mbh_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mbh_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mbh_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-xbi_Latn
data_files:
- path: train/eng_Latn-xbi_Latn.jsonl.gz
split: train
- path: test/eng_Latn-xbi_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-xbi_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-gof_Latn
data_files:
- path: train/eng_Latn-gof_Latn.jsonl.gz
split: train
- path: test/eng_Latn-gof_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-gof_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-lat_Latn
data_files:
- path: train/eng_Latn-lat_Latn.jsonl.gz
split: train
- path: test/eng_Latn-lat_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-lat_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-gah_Latn
data_files:
- path: train/eng_Latn-gah_Latn.jsonl.gz
split: train
- path: test/eng_Latn-gah_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-gah_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-zav_Latn
data_files:
- path: train/eng_Latn-zav_Latn.jsonl.gz
split: train
- path: test/eng_Latn-zav_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-zav_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tnc_Latn
data_files:
- path: train/eng_Latn-tnc_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tnc_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tnc_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-aso_Latn
data_files:
- path: train/eng_Latn-aso_Latn.jsonl.gz
split: train
- path: test/eng_Latn-aso_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-aso_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cax_Latn
data_files:
- path: train/eng_Latn-cax_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cax_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cax_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-xtm_Latn
data_files:
- path: train/eng_Latn-xtm_Latn.jsonl.gz
split: train
- path: test/eng_Latn-xtm_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-xtm_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-llg_Latn
data_files:
- path: train/eng_Latn-llg_Latn.jsonl.gz
split: train
- path: test/eng_Latn-llg_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-llg_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-pls_Latn
data_files:
- path: train/eng_Latn-pls_Latn.jsonl.gz
split: train
- path: test/eng_Latn-pls_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-pls_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-nhi_Latn
data_files:
- path: train/eng_Latn-nhi_Latn.jsonl.gz
split: train
- path: test/eng_Latn-nhi_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-nhi_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-leu_Latn
data_files:
- path: train/eng_Latn-leu_Latn.jsonl.gz
split: train
- path: test/eng_Latn-leu_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-leu_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-agn_Latn
data_files:
- path: train/eng_Latn-agn_Latn.jsonl.gz
split: train
- path: test/eng_Latn-agn_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-agn_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-hus_Latn
data_files:
- path: train/eng_Latn-hus_Latn.jsonl.gz
split: train
- path: test/eng_Latn-hus_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-hus_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-hvn_Latn
data_files:
- path: train/eng_Latn-hvn_Latn.jsonl.gz
split: train
- path: test/eng_Latn-hvn_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-hvn_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-gup_Latn
data_files:
- path: train/eng_Latn-gup_Latn.jsonl.gz
split: train
- path: test/eng_Latn-gup_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-gup_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ncu_Latn
data_files:
- path: train/eng_Latn-ncu_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ncu_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ncu_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ilo_Latn
data_files:
- path: train/eng_Latn-ilo_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ilo_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ilo_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cjv_Latn
data_files:
- path: train/eng_Latn-cjv_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cjv_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cjv_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cbi_Latn
data_files:
- path: train/eng_Latn-cbi_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cbi_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cbi_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-sll_Latn
data_files:
- path: train/eng_Latn-sll_Latn.jsonl.gz
split: train
- path: test/eng_Latn-sll_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-sll_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-gvf_Latn
data_files:
- path: train/eng_Latn-gvf_Latn.jsonl.gz
split: train
- path: test/eng_Latn-gvf_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-gvf_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cbk_Latn
data_files:
- path: train/eng_Latn-cbk_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cbk_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cbk_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ctp_Latn
data_files:
- path: train/eng_Latn-ctp_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ctp_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ctp_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-rus_Cyrl
data_files:
- path: train/eng_Latn-rus_Cyrl.jsonl.gz
split: train
- path: test/eng_Latn-rus_Cyrl.jsonl.gz
split: test
- path: validation/eng_Latn-rus_Cyrl.jsonl.gz
split: validation
- config_name: eng_Latn-zpc_Latn
data_files:
- path: train/eng_Latn-zpc_Latn.jsonl.gz
split: train
- path: test/eng_Latn-zpc_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-zpc_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-dww_Latn
data_files:
- path: train/eng_Latn-dww_Latn.jsonl.gz
split: train
- path: test/eng_Latn-dww_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-dww_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-haw_Latn
data_files:
- path: train/eng_Latn-haw_Latn.jsonl.gz
split: train
- path: test/eng_Latn-haw_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-haw_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-hrv_Latn
data_files:
- path: train/eng_Latn-hrv_Latn.jsonl.gz
split: train
- path: test/eng_Latn-hrv_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-hrv_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bhg_Latn
data_files:
- path: train/eng_Latn-bhg_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bhg_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bhg_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kyg_Latn
data_files:
- path: train/eng_Latn-kyg_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kyg_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kyg_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-are_Latn
data_files:
- path: train/eng_Latn-are_Latn.jsonl.gz
split: train
- path: test/eng_Latn-are_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-are_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-pma_Latn
data_files:
- path: train/eng_Latn-pma_Latn.jsonl.gz
split: train
- path: test/eng_Latn-pma_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-pma_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tcz_Latn
data_files:
- path: train/eng_Latn-tcz_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tcz_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tcz_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mop_Latn
data_files:
- path: train/eng_Latn-mop_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mop_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mop_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-maa_Latn
data_files:
- path: train/eng_Latn-maa_Latn.jsonl.gz
split: train
- path: test/eng_Latn-maa_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-maa_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-row_Latn
data_files:
- path: train/eng_Latn-row_Latn.jsonl.gz
split: train
- path: test/eng_Latn-row_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-row_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mcp_Latn
data_files:
- path: train/eng_Latn-mcp_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mcp_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mcp_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bjv_Latn
data_files:
- path: train/eng_Latn-bjv_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bjv_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bjv_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-dan_Latn
data_files:
- path: train/eng_Latn-dan_Latn.jsonl.gz
split: train
- path: test/eng_Latn-dan_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-dan_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kpw_Latn
data_files:
- path: train/eng_Latn-kpw_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kpw_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kpw_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-yal_Latn
data_files:
- path: train/eng_Latn-yal_Latn.jsonl.gz
split: train
- path: test/eng_Latn-yal_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-yal_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-yrb_Latn
data_files:
- path: train/eng_Latn-yrb_Latn.jsonl.gz
split: train
- path: test/eng_Latn-yrb_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-yrb_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-piu_Latn
data_files:
- path: train/eng_Latn-piu_Latn.jsonl.gz
split: train
- path: test/eng_Latn-piu_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-piu_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kqa_Latn
data_files:
- path: train/eng_Latn-kqa_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kqa_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kqa_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-buk_Latn
data_files:
- path: train/eng_Latn-buk_Latn.jsonl.gz
split: train
- path: test/eng_Latn-buk_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-buk_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-taw_Latn
data_files:
- path: train/eng_Latn-taw_Latn.jsonl.gz
split: train
- path: test/eng_Latn-taw_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-taw_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bzj_Latn
data_files:
- path: train/eng_Latn-bzj_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bzj_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bzj_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-boa_Latn
data_files:
- path: train/eng_Latn-boa_Latn.jsonl.gz
split: train
- path: test/eng_Latn-boa_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-boa_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-sua_Latn
data_files:
- path: train/eng_Latn-sua_Latn.jsonl.gz
split: train
- path: test/eng_Latn-sua_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-sua_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mwf_Latn
data_files:
- path: train/eng_Latn-mwf_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mwf_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mwf_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-deu_Latn
data_files:
- path: train/eng_Latn-deu_Latn.jsonl.gz
split: train
- path: test/eng_Latn-deu_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-deu_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ura_Latn
data_files:
- path: train/eng_Latn-ura_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ura_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ura_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-aui_Latn
data_files:
- path: train/eng_Latn-aui_Latn.jsonl.gz
split: train
- path: test/eng_Latn-aui_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-aui_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-wim_Latn
data_files:
- path: train/eng_Latn-wim_Latn.jsonl.gz
split: train
- path: test/eng_Latn-wim_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-wim_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bzh_Latn
data_files:
- path: train/eng_Latn-bzh_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bzh_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bzh_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-nld_Latn
data_files:
- path: train/eng_Latn-nld_Latn.jsonl.gz
split: train
- path: test/eng_Latn-nld_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-nld_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ory_Orya
data_files:
- path: train/eng_Latn-ory_Orya.jsonl.gz
split: train
- path: test/eng_Latn-ory_Orya.jsonl.gz
split: test
- path: validation/eng_Latn-ory_Orya.jsonl.gz
split: validation
- config_name: eng_Latn-ppo_Latn
data_files:
- path: train/eng_Latn-ppo_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ppo_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ppo_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-epo_Latn
data_files:
- path: train/eng_Latn-epo_Latn.jsonl.gz
split: train
- path: test/eng_Latn-epo_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-epo_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-hop_Latn
data_files:
- path: train/eng_Latn-hop_Latn.jsonl.gz
split: train
- path: test/eng_Latn-hop_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-hop_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-gdn_Latn
data_files:
- path: train/eng_Latn-gdn_Latn.jsonl.gz
split: train
- path: test/eng_Latn-gdn_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-gdn_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tlf_Latn
data_files:
- path: train/eng_Latn-tlf_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tlf_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tlf_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tiw_Latn
data_files:
- path: train/eng_Latn-tiw_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tiw_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tiw_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-sja_Latn
data_files:
- path: train/eng_Latn-sja_Latn.jsonl.gz
split: train
- path: test/eng_Latn-sja_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-sja_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kdl_Latn
data_files:
- path: train/eng_Latn-kdl_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kdl_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kdl_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-chk_Latn
data_files:
- path: train/eng_Latn-chk_Latn.jsonl.gz
split: train
- path: test/eng_Latn-chk_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-chk_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kdc_Latn
data_files:
- path: train/eng_Latn-kdc_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kdc_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kdc_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-gng_Latn
data_files:
- path: train/eng_Latn-gng_Latn.jsonl.gz
split: train
- path: test/eng_Latn-gng_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-gng_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-nko_Latn
data_files:
- path: train/eng_Latn-nko_Latn.jsonl.gz
split: train
- path: test/eng_Latn-nko_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-nko_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-wer_Latn
data_files:
- path: train/eng_Latn-wer_Latn.jsonl.gz
split: train
- path: test/eng_Latn-wer_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-wer_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mhl_Latn
data_files:
- path: train/eng_Latn-mhl_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mhl_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mhl_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-toc_Latn
data_files:
- path: train/eng_Latn-toc_Latn.jsonl.gz
split: train
- path: test/eng_Latn-toc_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-toc_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cbs_Latn
data_files:
- path: train/eng_Latn-cbs_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cbs_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cbs_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-qub_Latn
data_files:
- path: train/eng_Latn-qub_Latn.jsonl.gz
split: train
- path: test/eng_Latn-qub_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-qub_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-auc_Latn
data_files:
- path: train/eng_Latn-auc_Latn.jsonl.gz
split: train
- path: test/eng_Latn-auc_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-auc_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kpr_Latn
data_files:
- path: train/eng_Latn-kpr_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kpr_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kpr_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-hat_Latn
data_files:
- path: train/eng_Latn-hat_Latn.jsonl.gz
split: train
- path: test/eng_Latn-hat_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-hat_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-sny_Latn
data_files:
- path: train/eng_Latn-sny_Latn.jsonl.gz
split: train
- path: test/eng_Latn-sny_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-sny_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-byr_Latn
data_files:
- path: train/eng_Latn-byr_Latn.jsonl.gz
split: train
- path: test/eng_Latn-byr_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-byr_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-emp_Latn
data_files:
- path: train/eng_Latn-emp_Latn.jsonl.gz
split: train
- path: test/eng_Latn-emp_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-emp_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kwi_Latn
data_files:
- path: train/eng_Latn-kwi_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kwi_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kwi_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-gum_Latn
data_files:
- path: train/eng_Latn-gum_Latn.jsonl.gz
split: train
- path: test/eng_Latn-gum_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-gum_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mbj_Latn
data_files:
- path: train/eng_Latn-mbj_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mbj_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mbj_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-sey_Latn
data_files:
- path: train/eng_Latn-sey_Latn.jsonl.gz
split: train
- path: test/eng_Latn-sey_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-sey_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-alp_Latn
data_files:
- path: train/eng_Latn-alp_Latn.jsonl.gz
split: train
- path: test/eng_Latn-alp_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-alp_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-gmv_Latn
data_files:
- path: train/eng_Latn-gmv_Latn.jsonl.gz
split: train
- path: test/eng_Latn-gmv_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-gmv_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-poe_Latn
data_files:
- path: train/eng_Latn-poe_Latn.jsonl.gz
split: train
- path: test/eng_Latn-poe_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-poe_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-etr_Latn
data_files:
- path: train/eng_Latn-etr_Latn.jsonl.gz
split: train
- path: test/eng_Latn-etr_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-etr_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-abt_Latn
data_files:
- path: train/eng_Latn-abt_Latn.jsonl.gz
split: train
- path: test/eng_Latn-abt_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-abt_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tuf_Latn
data_files:
- path: train/eng_Latn-tuf_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tuf_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tuf_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-dob_Latn
data_files:
- path: train/eng_Latn-dob_Latn.jsonl.gz
split: train
- path: test/eng_Latn-dob_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-dob_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-nys_Latn
data_files:
- path: train/eng_Latn-nys_Latn.jsonl.gz
split: train
- path: test/eng_Latn-nys_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-nys_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-srn_Latn
data_files:
- path: train/eng_Latn-srn_Latn.jsonl.gz
split: train
- path: test/eng_Latn-srn_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-srn_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kpf_Latn
data_files:
- path: train/eng_Latn-kpf_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kpf_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kpf_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mbt_Latn
data_files:
- path: train/eng_Latn-mbt_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mbt_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mbt_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-stp_Latn
data_files:
- path: train/eng_Latn-stp_Latn.jsonl.gz
split: train
- path: test/eng_Latn-stp_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-stp_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-trc_Latn
data_files:
- path: train/eng_Latn-trc_Latn.jsonl.gz
split: train
- path: test/eng_Latn-trc_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-trc_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-for_Latn
data_files:
- path: train/eng_Latn-for_Latn.jsonl.gz
split: train
- path: test/eng_Latn-for_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-for_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-yad_Latn
data_files:
- path: train/eng_Latn-yad_Latn.jsonl.gz
split: train
- path: test/eng_Latn-yad_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-yad_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cme_Latn
data_files:
- path: train/eng_Latn-cme_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cme_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cme_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bba_Latn
data_files:
- path: train/eng_Latn-bba_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bba_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bba_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mxq_Latn
data_files:
- path: train/eng_Latn-mxq_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mxq_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mxq_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-dwy_Latn
data_files:
- path: train/eng_Latn-dwy_Latn.jsonl.gz
split: train
- path: test/eng_Latn-dwy_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-dwy_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-atg_Latn
data_files:
- path: train/eng_Latn-atg_Latn.jsonl.gz
split: train
- path: test/eng_Latn-atg_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-atg_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ncj_Latn
data_files:
- path: train/eng_Latn-ncj_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ncj_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ncj_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mpm_Latn
data_files:
- path: train/eng_Latn-mpm_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mpm_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mpm_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kqc_Latn
data_files:
- path: train/eng_Latn-kqc_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kqc_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kqc_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-knv_Latn
data_files:
- path: train/eng_Latn-knv_Latn.jsonl.gz
split: train
- path: test/eng_Latn-knv_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-knv_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-upv_Latn
data_files:
- path: train/eng_Latn-upv_Latn.jsonl.gz
split: train
- path: test/eng_Latn-upv_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-upv_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-yut_Latn
data_files:
- path: train/eng_Latn-yut_Latn.jsonl.gz
split: train
- path: test/eng_Latn-yut_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-yut_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kje_Latn
data_files:
- path: train/eng_Latn-kje_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kje_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kje_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-okv_Latn
data_files:
- path: train/eng_Latn-okv_Latn.jsonl.gz
split: train
- path: test/eng_Latn-okv_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-okv_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tof_Latn
data_files:
- path: train/eng_Latn-tof_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tof_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tof_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-faa_Latn
data_files:
- path: train/eng_Latn-faa_Latn.jsonl.gz
split: train
- path: test/eng_Latn-faa_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-faa_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mya_Latn
data_files:
- path: train/eng_Latn-mya_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mya_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mya_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-hto_Latn
data_files:
- path: train/eng_Latn-hto_Latn.jsonl.gz
split: train
- path: test/eng_Latn-hto_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-hto_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-wiv_Latn
data_files:
- path: train/eng_Latn-wiv_Latn.jsonl.gz
split: train
- path: test/eng_Latn-wiv_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-wiv_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-vid_Latn
data_files:
- path: train/eng_Latn-vid_Latn.jsonl.gz
split: train
- path: test/eng_Latn-vid_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-vid_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-xla_Latn
data_files:
- path: train/eng_Latn-xla_Latn.jsonl.gz
split: train
- path: test/eng_Latn-xla_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-xla_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-snn_Latn
data_files:
- path: train/eng_Latn-snn_Latn.jsonl.gz
split: train
- path: test/eng_Latn-snn_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-snn_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ycn_Latn
data_files:
- path: train/eng_Latn-ycn_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ycn_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ycn_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mcb_Latn
data_files:
- path: train/eng_Latn-mcb_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mcb_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mcb_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mal_Mlym
data_files:
- path: train/eng_Latn-mal_Mlym.jsonl.gz
split: train
- path: test/eng_Latn-mal_Mlym.jsonl.gz
split: test
- path: validation/eng_Latn-mal_Mlym.jsonl.gz
split: validation
- config_name: eng_Latn-sgb_Latn
data_files:
- path: train/eng_Latn-sgb_Latn.jsonl.gz
split: train
- path: test/eng_Latn-sgb_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-sgb_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-box_Latn
data_files:
- path: train/eng_Latn-box_Latn.jsonl.gz
split: train
- path: test/eng_Latn-box_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-box_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-amu_Latn
data_files:
- path: train/eng_Latn-amu_Latn.jsonl.gz
split: train
- path: test/eng_Latn-amu_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-amu_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cni_Latn
data_files:
- path: train/eng_Latn-cni_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cni_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cni_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-byx_Latn
data_files:
- path: train/eng_Latn-byx_Latn.jsonl.gz
split: train
- path: test/eng_Latn-byx_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-byx_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-udu_Latn
data_files:
- path: train/eng_Latn-udu_Latn.jsonl.gz
split: train
- path: test/eng_Latn-udu_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-udu_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-jid_Latn
data_files:
- path: train/eng_Latn-jid_Latn.jsonl.gz
split: train
- path: test/eng_Latn-jid_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-jid_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-nlg_Latn
data_files:
- path: train/eng_Latn-nlg_Latn.jsonl.gz
split: train
- path: test/eng_Latn-nlg_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-nlg_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-wuv_Latn
data_files:
- path: train/eng_Latn-wuv_Latn.jsonl.gz
split: train
- path: test/eng_Latn-wuv_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-wuv_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mto_Latn
data_files:
- path: train/eng_Latn-mto_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mto_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mto_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mcd_Latn
data_files:
- path: train/eng_Latn-mcd_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mcd_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mcd_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bus_Latn
data_files:
- path: train/eng_Latn-bus_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bus_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bus_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-glk_Arab
data_files:
- path: train/eng_Latn-glk_Arab.jsonl.gz
split: train
- path: test/eng_Latn-glk_Arab.jsonl.gz
split: test
- path: validation/eng_Latn-glk_Arab.jsonl.gz
split: validation
- config_name: eng_Latn-too_Latn
data_files:
- path: train/eng_Latn-too_Latn.jsonl.gz
split: train
- path: test/eng_Latn-too_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-too_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mpp_Latn
data_files:
- path: train/eng_Latn-mpp_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mpp_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mpp_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-zaj_Latn
data_files:
- path: train/eng_Latn-zaj_Latn.jsonl.gz
split: train
- path: test/eng_Latn-zaj_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-zaj_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-yva_Latn
data_files:
- path: train/eng_Latn-yva_Latn.jsonl.gz
split: train
- path: test/eng_Latn-yva_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-yva_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-awx_Latn
data_files:
- path: train/eng_Latn-awx_Latn.jsonl.gz
split: train
- path: test/eng_Latn-awx_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-awx_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ian_Latn
data_files:
- path: train/eng_Latn-ian_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ian_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ian_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-otq_Latn
data_files:
- path: train/eng_Latn-otq_Latn.jsonl.gz
split: train
- path: test/eng_Latn-otq_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-otq_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-fra_Latn
data_files:
- path: train/eng_Latn-fra_Latn.jsonl.gz
split: train
- path: test/eng_Latn-fra_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-fra_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-zlm_Latn
data_files:
- path: train/eng_Latn-zlm_Latn.jsonl.gz
split: train
- path: test/eng_Latn-zlm_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-zlm_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ptu_Latn
data_files:
- path: train/eng_Latn-ptu_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ptu_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ptu_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-apr_Latn
data_files:
- path: train/eng_Latn-apr_Latn.jsonl.gz
split: train
- path: test/eng_Latn-apr_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-apr_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-zab_Latn
data_files:
- path: train/eng_Latn-zab_Latn.jsonl.gz
split: train
- path: test/eng_Latn-zab_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-zab_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kwf_Latn
data_files:
- path: train/eng_Latn-kwf_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kwf_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kwf_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cya_Latn
data_files:
- path: train/eng_Latn-cya_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cya_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cya_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-nna_Latn
data_files:
- path: train/eng_Latn-nna_Latn.jsonl.gz
split: train
- path: test/eng_Latn-nna_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-nna_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-wnc_Latn
data_files:
- path: train/eng_Latn-wnc_Latn.jsonl.gz
split: train
- path: test/eng_Latn-wnc_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-wnc_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-dad_Latn
data_files:
- path: train/eng_Latn-dad_Latn.jsonl.gz
split: train
- path: test/eng_Latn-dad_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-dad_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-opm_Latn
data_files:
- path: train/eng_Latn-opm_Latn.jsonl.gz
split: train
- path: test/eng_Latn-opm_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-opm_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-zaw_Latn
data_files:
- path: train/eng_Latn-zaw_Latn.jsonl.gz
split: train
- path: test/eng_Latn-zaw_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-zaw_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-agd_Latn
data_files:
- path: train/eng_Latn-agd_Latn.jsonl.gz
split: train
- path: test/eng_Latn-agd_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-agd_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-qxo_Latn
data_files:
- path: train/eng_Latn-qxo_Latn.jsonl.gz
split: train
- path: test/eng_Latn-qxo_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-qxo_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ign_Latn
data_files:
- path: train/eng_Latn-ign_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ign_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ign_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cak_Latn
data_files:
- path: train/eng_Latn-cak_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cak_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cak_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-top_Latn
data_files:
- path: train/eng_Latn-top_Latn.jsonl.gz
split: train
- path: test/eng_Latn-top_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-top_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tgk_Cyrl
data_files:
- path: train/eng_Latn-tgk_Cyrl.jsonl.gz
split: train
- path: test/eng_Latn-tgk_Cyrl.jsonl.gz
split: test
- path: validation/eng_Latn-tgk_Cyrl.jsonl.gz
split: validation
- config_name: eng_Latn-awb_Latn
data_files:
- path: train/eng_Latn-awb_Latn.jsonl.gz
split: train
- path: test/eng_Latn-awb_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-awb_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cnl_Latn
data_files:
- path: train/eng_Latn-cnl_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cnl_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cnl_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kgp_Latn
data_files:
- path: train/eng_Latn-kgp_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kgp_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kgp_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-khs_Latn
data_files:
- path: train/eng_Latn-khs_Latn.jsonl.gz
split: train
- path: test/eng_Latn-khs_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-khs_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-abx_Latn
data_files:
- path: train/eng_Latn-abx_Latn.jsonl.gz
split: train
- path: test/eng_Latn-abx_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-abx_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mcq_Latn
data_files:
- path: train/eng_Latn-mcq_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mcq_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mcq_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-chd_Latn
data_files:
- path: train/eng_Latn-chd_Latn.jsonl.gz
split: train
- path: test/eng_Latn-chd_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-chd_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kud_Latn
data_files:
- path: train/eng_Latn-kud_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kud_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kud_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-nak_Latn
data_files:
- path: train/eng_Latn-nak_Latn.jsonl.gz
split: train
- path: test/eng_Latn-nak_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-nak_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mux_Latn
data_files:
- path: train/eng_Latn-mux_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mux_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mux_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tzo_Latn
data_files:
- path: train/eng_Latn-tzo_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tzo_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tzo_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mav_Latn
data_files:
- path: train/eng_Latn-mav_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mav_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mav_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-avt_Latn
data_files:
- path: train/eng_Latn-avt_Latn.jsonl.gz
split: train
- path: test/eng_Latn-avt_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-avt_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bjz_Latn
data_files:
- path: train/eng_Latn-bjz_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bjz_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bjz_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ptp_Latn
data_files:
- path: train/eng_Latn-ptp_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ptp_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ptp_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-gnw_Latn
data_files:
- path: train/eng_Latn-gnw_Latn.jsonl.gz
split: train
- path: test/eng_Latn-gnw_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-gnw_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cub_Latn
data_files:
- path: train/eng_Latn-cub_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cub_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cub_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-hmo_Latn
data_files:
- path: train/eng_Latn-hmo_Latn.jsonl.gz
split: train
- path: test/eng_Latn-hmo_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-hmo_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kkl_Latn
data_files:
- path: train/eng_Latn-kkl_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kkl_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kkl_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-nou_Latn
data_files:
- path: train/eng_Latn-nou_Latn.jsonl.gz
split: train
- path: test/eng_Latn-nou_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-nou_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bre_Latn
data_files:
- path: train/eng_Latn-bre_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bre_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bre_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-sim_Latn
data_files:
- path: train/eng_Latn-sim_Latn.jsonl.gz
split: train
- path: test/eng_Latn-sim_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-sim_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-sbk_Latn
data_files:
- path: train/eng_Latn-sbk_Latn.jsonl.gz
split: train
- path: test/eng_Latn-sbk_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-sbk_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-nsn_Latn
data_files:
- path: train/eng_Latn-nsn_Latn.jsonl.gz
split: train
- path: test/eng_Latn-nsn_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-nsn_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mva_Latn
data_files:
- path: train/eng_Latn-mva_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mva_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mva_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kkc_Latn
data_files:
- path: train/eng_Latn-kkc_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kkc_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kkc_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-gvc_Latn
data_files:
- path: train/eng_Latn-gvc_Latn.jsonl.gz
split: train
- path: test/eng_Latn-gvc_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-gvc_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-jao_Latn
data_files:
- path: train/eng_Latn-jao_Latn.jsonl.gz
split: train
- path: test/eng_Latn-jao_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-jao_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kek_Latn
data_files:
- path: train/eng_Latn-kek_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kek_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kek_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-nfa_Latn
data_files:
- path: train/eng_Latn-nfa_Latn.jsonl.gz
split: train
- path: test/eng_Latn-nfa_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-nfa_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-lid_Latn
data_files:
- path: train/eng_Latn-lid_Latn.jsonl.gz
split: train
- path: test/eng_Latn-lid_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-lid_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kmu_Latn
data_files:
- path: train/eng_Latn-kmu_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kmu_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kmu_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-hbo_Hebr
data_files:
- path: train/eng_Latn-hbo_Hebr.jsonl.gz
split: train
- path: test/eng_Latn-hbo_Hebr.jsonl.gz
split: test
- path: validation/eng_Latn-hbo_Hebr.jsonl.gz
split: validation
- config_name: eng_Latn-bkq_Latn
data_files:
- path: train/eng_Latn-bkq_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bkq_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bkq_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mig_Latn
data_files:
- path: train/eng_Latn-mig_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mig_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mig_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-jae_Latn
data_files:
- path: train/eng_Latn-jae_Latn.jsonl.gz
split: train
- path: test/eng_Latn-jae_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-jae_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ben_Beng
data_files:
- path: train/eng_Latn-ben_Beng.jsonl.gz
split: train
- path: test/eng_Latn-ben_Beng.jsonl.gz
split: test
- path: validation/eng_Latn-ben_Beng.jsonl.gz
split: validation
- config_name: eng_Latn-spy_Latn
data_files:
- path: train/eng_Latn-spy_Latn.jsonl.gz
split: train
- path: test/eng_Latn-spy_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-spy_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bvd_Latn
data_files:
- path: train/eng_Latn-bvd_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bvd_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bvd_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bvr_Latn
data_files:
- path: train/eng_Latn-bvr_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bvr_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bvr_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kto_Latn
data_files:
- path: train/eng_Latn-kto_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kto_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kto_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-amn_Latn
data_files:
- path: train/eng_Latn-amn_Latn.jsonl.gz
split: train
- path: test/eng_Latn-amn_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-amn_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-spp_Latn
data_files:
- path: train/eng_Latn-spp_Latn.jsonl.gz
split: train
- path: test/eng_Latn-spp_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-spp_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ncl_Latn
data_files:
- path: train/eng_Latn-ncl_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ncl_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ncl_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tdt_Latn
data_files:
- path: train/eng_Latn-tdt_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tdt_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tdt_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-urw_Latn
data_files:
- path: train/eng_Latn-urw_Latn.jsonl.gz
split: train
- path: test/eng_Latn-urw_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-urw_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-shp_Latn
data_files:
- path: train/eng_Latn-shp_Latn.jsonl.gz
split: train
- path: test/eng_Latn-shp_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-shp_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-met_Latn
data_files:
- path: train/eng_Latn-met_Latn.jsonl.gz
split: train
- path: test/eng_Latn-met_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-met_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-pon_Latn
data_files:
- path: train/eng_Latn-pon_Latn.jsonl.gz
split: train
- path: test/eng_Latn-pon_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-pon_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tiy_Latn
data_files:
- path: train/eng_Latn-tiy_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tiy_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tiy_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-nhy_Latn
data_files:
- path: train/eng_Latn-nhy_Latn.jsonl.gz
split: train
- path: test/eng_Latn-nhy_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-nhy_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cpc_Latn
data_files:
- path: train/eng_Latn-cpc_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cpc_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cpc_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bel_Cyrl
data_files:
- path: train/eng_Latn-bel_Cyrl.jsonl.gz
split: train
- path: test/eng_Latn-bel_Cyrl.jsonl.gz
split: test
- path: validation/eng_Latn-bel_Cyrl.jsonl.gz
split: validation
- config_name: eng_Latn-cbv_Latn
data_files:
- path: train/eng_Latn-cbv_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cbv_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cbv_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-pab_Latn
data_files:
- path: train/eng_Latn-pab_Latn.jsonl.gz
split: train
- path: test/eng_Latn-pab_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-pab_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-dgz_Latn
data_files:
- path: train/eng_Latn-dgz_Latn.jsonl.gz
split: train
- path: test/eng_Latn-dgz_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-dgz_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-pri_Latn
data_files:
- path: train/eng_Latn-pri_Latn.jsonl.gz
split: train
- path: test/eng_Latn-pri_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-pri_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-hch_Latn
data_files:
- path: train/eng_Latn-hch_Latn.jsonl.gz
split: train
- path: test/eng_Latn-hch_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-hch_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-wed_Latn
data_files:
- path: train/eng_Latn-wed_Latn.jsonl.gz
split: train
- path: test/eng_Latn-wed_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-wed_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-suz_Latn
data_files:
- path: train/eng_Latn-suz_Latn.jsonl.gz
split: train
- path: test/eng_Latn-suz_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-suz_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-usp_Latn
data_files:
- path: train/eng_Latn-usp_Latn.jsonl.gz
split: train
- path: test/eng_Latn-usp_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-usp_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mkl_Latn
data_files:
- path: train/eng_Latn-mkl_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mkl_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mkl_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cbt_Latn
data_files:
- path: train/eng_Latn-cbt_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cbt_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cbt_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kpg_Latn
data_files:
- path: train/eng_Latn-kpg_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kpg_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kpg_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kue_Latn
data_files:
- path: train/eng_Latn-kue_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kue_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kue_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-sbs_Latn
data_files:
- path: train/eng_Latn-sbs_Latn.jsonl.gz
split: train
- path: test/eng_Latn-sbs_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-sbs_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-qvz_Latn
data_files:
- path: train/eng_Latn-qvz_Latn.jsonl.gz
split: train
- path: test/eng_Latn-qvz_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-qvz_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-seh_Latn
data_files:
- path: train/eng_Latn-seh_Latn.jsonl.gz
split: train
- path: test/eng_Latn-seh_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-seh_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-wrs_Latn
data_files:
- path: train/eng_Latn-wrs_Latn.jsonl.gz
split: train
- path: test/eng_Latn-wrs_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-wrs_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-snc_Latn
data_files:
- path: train/eng_Latn-snc_Latn.jsonl.gz
split: train
- path: test/eng_Latn-snc_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-snc_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bjp_Latn
data_files:
- path: train/eng_Latn-bjp_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bjp_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bjp_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kyz_Latn
data_files:
- path: train/eng_Latn-kyz_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kyz_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kyz_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-noa_Latn
data_files:
- path: train/eng_Latn-noa_Latn.jsonl.gz
split: train
- path: test/eng_Latn-noa_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-noa_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ssx_Latn
data_files:
- path: train/eng_Latn-ssx_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ssx_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ssx_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-nbq_Latn
data_files:
- path: train/eng_Latn-nbq_Latn.jsonl.gz
split: train
- path: test/eng_Latn-nbq_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-nbq_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-msb_Latn
data_files:
- path: train/eng_Latn-msb_Latn.jsonl.gz
split: train
- path: test/eng_Latn-msb_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-msb_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-sue_Latn
data_files:
- path: train/eng_Latn-sue_Latn.jsonl.gz
split: train
- path: test/eng_Latn-sue_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-sue_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-asm_Beng
data_files:
- path: train/eng_Latn-asm_Beng.jsonl.gz
split: train
- path: test/eng_Latn-asm_Beng.jsonl.gz
split: test
- path: validation/eng_Latn-asm_Beng.jsonl.gz
split: validation
- config_name: eng_Latn-som_Latn
data_files:
- path: train/eng_Latn-som_Latn.jsonl.gz
split: train
- path: test/eng_Latn-som_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-som_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-xon_Latn
data_files:
- path: train/eng_Latn-xon_Latn.jsonl.gz
split: train
- path: test/eng_Latn-xon_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-xon_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-qvh_Latn
data_files:
- path: train/eng_Latn-qvh_Latn.jsonl.gz
split: train
- path: test/eng_Latn-qvh_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-qvh_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mib_Latn
data_files:
- path: train/eng_Latn-mib_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mib_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mib_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-wap_Latn
data_files:
- path: train/eng_Latn-wap_Latn.jsonl.gz
split: train
- path: test/eng_Latn-wap_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-wap_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-gai_Latn
data_files:
- path: train/eng_Latn-gai_Latn.jsonl.gz
split: train
- path: test/eng_Latn-gai_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-gai_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mkn_Latn
data_files:
- path: train/eng_Latn-mkn_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mkn_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mkn_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-xnn_Latn
data_files:
- path: train/eng_Latn-xnn_Latn.jsonl.gz
split: train
- path: test/eng_Latn-xnn_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-xnn_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-amf_Latn
data_files:
- path: train/eng_Latn-amf_Latn.jsonl.gz
split: train
- path: test/eng_Latn-amf_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-amf_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-nhe_Latn
data_files:
- path: train/eng_Latn-nhe_Latn.jsonl.gz
split: train
- path: test/eng_Latn-nhe_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-nhe_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kmk_Latn
data_files:
- path: train/eng_Latn-kmk_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kmk_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kmk_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tam_Taml
data_files:
- path: train/eng_Latn-tam_Taml.jsonl.gz
split: train
- path: test/eng_Latn-tam_Taml.jsonl.gz
split: test
- path: validation/eng_Latn-tam_Taml.jsonl.gz
split: validation
- config_name: eng_Latn-nab_Latn
data_files:
- path: train/eng_Latn-nab_Latn.jsonl.gz
split: train
- path: test/eng_Latn-nab_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-nab_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-dik_Latn
data_files:
- path: train/eng_Latn-dik_Latn.jsonl.gz
split: train
- path: test/eng_Latn-dik_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-dik_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cpy_Latn
data_files:
- path: train/eng_Latn-cpy_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cpy_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cpy_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-arl_Latn
data_files:
- path: train/eng_Latn-arl_Latn.jsonl.gz
split: train
- path: test/eng_Latn-arl_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-arl_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tuc_Latn
data_files:
- path: train/eng_Latn-tuc_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tuc_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tuc_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ngu_Latn
data_files:
- path: train/eng_Latn-ngu_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ngu_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ngu_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-zad_Latn
data_files:
- path: train/eng_Latn-zad_Latn.jsonl.gz
split: train
- path: test/eng_Latn-zad_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-zad_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kmh_Latn
data_files:
- path: train/eng_Latn-kmh_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kmh_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kmh_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-quh_Latn
data_files:
- path: train/eng_Latn-quh_Latn.jsonl.gz
split: train
- path: test/eng_Latn-quh_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-quh_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-alq_Latn
data_files:
- path: train/eng_Latn-alq_Latn.jsonl.gz
split: train
- path: test/eng_Latn-alq_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-alq_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-grc_Grek
data_files:
- path: train/eng_Latn-grc_Grek.jsonl.gz
split: train
- path: test/eng_Latn-grc_Grek.jsonl.gz
split: test
- path: validation/eng_Latn-grc_Grek.jsonl.gz
split: validation
- config_name: eng_Latn-kaq_Latn
data_files:
- path: train/eng_Latn-kaq_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kaq_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kaq_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-zpm_Latn
data_files:
- path: train/eng_Latn-zpm_Latn.jsonl.gz
split: train
- path: test/eng_Latn-zpm_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-zpm_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-obo_Latn
data_files:
- path: train/eng_Latn-obo_Latn.jsonl.gz
split: train
- path: test/eng_Latn-obo_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-obo_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cha_Latn
data_files:
- path: train/eng_Latn-cha_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cha_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cha_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mbs_Latn
data_files:
- path: train/eng_Latn-mbs_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mbs_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mbs_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-poi_Latn
data_files:
- path: train/eng_Latn-poi_Latn.jsonl.gz
split: train
- path: test/eng_Latn-poi_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-poi_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-spm_Latn
data_files:
- path: train/eng_Latn-spm_Latn.jsonl.gz
split: train
- path: test/eng_Latn-spm_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-spm_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cpb_Latn
data_files:
- path: train/eng_Latn-cpb_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cpb_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cpb_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-omw_Latn
data_files:
- path: train/eng_Latn-omw_Latn.jsonl.gz
split: train
- path: test/eng_Latn-omw_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-omw_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-klv_Latn
data_files:
- path: train/eng_Latn-klv_Latn.jsonl.gz
split: train
- path: test/eng_Latn-klv_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-klv_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-sbe_Latn
data_files:
- path: train/eng_Latn-sbe_Latn.jsonl.gz
split: train
- path: test/eng_Latn-sbe_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-sbe_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ntu_Latn
data_files:
- path: train/eng_Latn-ntu_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ntu_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ntu_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-zat_Latn
data_files:
- path: train/eng_Latn-zat_Latn.jsonl.gz
split: train
- path: test/eng_Latn-zat_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-zat_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bsp_Latn
data_files:
- path: train/eng_Latn-bsp_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bsp_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bsp_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mps_Latn
data_files:
- path: train/eng_Latn-mps_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mps_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mps_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mks_Latn
data_files:
- path: train/eng_Latn-mks_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mks_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mks_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bmh_Latn
data_files:
- path: train/eng_Latn-bmh_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bmh_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bmh_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tpz_Latn
data_files:
- path: train/eng_Latn-tpz_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tpz_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tpz_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-amr_Latn
data_files:
- path: train/eng_Latn-amr_Latn.jsonl.gz
split: train
- path: test/eng_Latn-amr_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-amr_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cjo_Latn
data_files:
- path: train/eng_Latn-cjo_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cjo_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cjo_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ksr_Latn
data_files:
- path: train/eng_Latn-ksr_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ksr_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ksr_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tgo_Latn
data_files:
- path: train/eng_Latn-tgo_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tgo_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tgo_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tke_Latn
data_files:
- path: train/eng_Latn-tke_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tke_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tke_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-lac_Latn
data_files:
- path: train/eng_Latn-lac_Latn.jsonl.gz
split: train
- path: test/eng_Latn-lac_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-lac_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-nhu_Latn
data_files:
- path: train/eng_Latn-nhu_Latn.jsonl.gz
split: train
- path: test/eng_Latn-nhu_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-nhu_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ssd_Latn
data_files:
- path: train/eng_Latn-ssd_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ssd_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ssd_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bon_Latn
data_files:
- path: train/eng_Latn-bon_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bon_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bon_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cso_Latn
data_files:
- path: train/eng_Latn-cso_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cso_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cso_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-naf_Latn
data_files:
- path: train/eng_Latn-naf_Latn.jsonl.gz
split: train
- path: test/eng_Latn-naf_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-naf_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kbh_Latn
data_files:
- path: train/eng_Latn-kbh_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kbh_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kbh_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-hun_Latn
data_files:
- path: train/eng_Latn-hun_Latn.jsonl.gz
split: train
- path: test/eng_Latn-hun_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-hun_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tte_Latn
data_files:
- path: train/eng_Latn-tte_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tte_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tte_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-amo_Latn
data_files:
- path: train/eng_Latn-amo_Latn.jsonl.gz
split: train
- path: test/eng_Latn-amo_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-amo_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kiz_Latn
data_files:
- path: train/eng_Latn-kiz_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kiz_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kiz_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-wsk_Latn
data_files:
- path: train/eng_Latn-wsk_Latn.jsonl.gz
split: train
- path: test/eng_Latn-wsk_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-wsk_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kwd_Latn
data_files:
- path: train/eng_Latn-kwd_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kwd_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kwd_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-geb_Latn
data_files:
- path: train/eng_Latn-geb_Latn.jsonl.gz
split: train
- path: test/eng_Latn-geb_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-geb_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mdy_Latn
data_files:
- path: train/eng_Latn-mdy_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mdy_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mdy_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kgk_Latn
data_files:
- path: train/eng_Latn-kgk_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kgk_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kgk_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kqw_Latn
data_files:
- path: train/eng_Latn-kqw_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kqw_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kqw_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-gaw_Latn
data_files:
- path: train/eng_Latn-gaw_Latn.jsonl.gz
split: train
- path: test/eng_Latn-gaw_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-gaw_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mox_Latn
data_files:
- path: train/eng_Latn-mox_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mox_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mox_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cav_Latn
data_files:
- path: train/eng_Latn-cav_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cav_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cav_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tzj_Latn
data_files:
- path: train/eng_Latn-tzj_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tzj_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tzj_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kze_Latn
data_files:
- path: train/eng_Latn-kze_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kze_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kze_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-not_Latn
data_files:
- path: train/eng_Latn-not_Latn.jsonl.gz
split: train
- path: test/eng_Latn-not_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-not_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-lin_Latn
data_files:
- path: train/eng_Latn-lin_Latn.jsonl.gz
split: train
- path: test/eng_Latn-lin_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-lin_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-urd_Arab
data_files:
- path: train/eng_Latn-urd_Arab.jsonl.gz
split: train
- path: test/eng_Latn-urd_Arab.jsonl.gz
split: test
- path: validation/eng_Latn-urd_Arab.jsonl.gz
split: validation
- config_name: eng_Latn-mlh_Latn
data_files:
- path: train/eng_Latn-mlh_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mlh_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mlh_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-muy_Latn
data_files:
- path: train/eng_Latn-muy_Latn.jsonl.gz
split: train
- path: test/eng_Latn-muy_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-muy_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-iws_Latn
data_files:
- path: train/eng_Latn-iws_Latn.jsonl.gz
split: train
- path: test/eng_Latn-iws_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-iws_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tur_Latn
data_files:
- path: train/eng_Latn-tur_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tur_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tur_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-gam_Latn
data_files:
- path: train/eng_Latn-gam_Latn.jsonl.gz
split: train
- path: test/eng_Latn-gam_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-gam_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kbc_Latn
data_files:
- path: train/eng_Latn-kbc_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kbc_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kbc_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kgf_Latn
data_files:
- path: train/eng_Latn-kgf_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kgf_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kgf_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-maz_Latn
data_files:
- path: train/eng_Latn-maz_Latn.jsonl.gz
split: train
- path: test/eng_Latn-maz_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-maz_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-nss_Latn
data_files:
- path: train/eng_Latn-nss_Latn.jsonl.gz
split: train
- path: test/eng_Latn-nss_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-nss_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ake_Latn
data_files:
- path: train/eng_Latn-ake_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ake_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ake_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-nuy_Latn
data_files:
- path: train/eng_Latn-nuy_Latn.jsonl.gz
split: train
- path: test/eng_Latn-nuy_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-nuy_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bjk_Latn
data_files:
- path: train/eng_Latn-bjk_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bjk_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bjk_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mzz_Latn
data_files:
- path: train/eng_Latn-mzz_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mzz_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mzz_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-msy_Latn
data_files:
- path: train/eng_Latn-msy_Latn.jsonl.gz
split: train
- path: test/eng_Latn-msy_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-msy_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-anh_Latn
data_files:
- path: train/eng_Latn-anh_Latn.jsonl.gz
split: train
- path: test/eng_Latn-anh_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-anh_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-bea_Latn
data_files:
- path: train/eng_Latn-bea_Latn.jsonl.gz
split: train
- path: test/eng_Latn-bea_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-bea_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-ntj_Latn
data_files:
- path: train/eng_Latn-ntj_Latn.jsonl.gz
split: train
- path: test/eng_Latn-ntj_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-ntj_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-anv_Latn
data_files:
- path: train/eng_Latn-anv_Latn.jsonl.gz
split: train
- path: test/eng_Latn-anv_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-anv_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-xed_Latn
data_files:
- path: train/eng_Latn-xed_Latn.jsonl.gz
split: train
- path: test/eng_Latn-xed_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-xed_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-nho_Latn
data_files:
- path: train/eng_Latn-nho_Latn.jsonl.gz
split: train
- path: test/eng_Latn-nho_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-nho_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-cbc_Latn
data_files:
- path: train/eng_Latn-cbc_Latn.jsonl.gz
split: train
- path: test/eng_Latn-cbc_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-cbc_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-qve_Latn
data_files:
- path: train/eng_Latn-qve_Latn.jsonl.gz
split: train
- path: test/eng_Latn-qve_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-qve_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-amp_Latn
data_files:
- path: train/eng_Latn-amp_Latn.jsonl.gz
split: train
- path: test/eng_Latn-amp_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-amp_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-qvc_Latn
data_files:
- path: train/eng_Latn-qvc_Latn.jsonl.gz
split: train
- path: test/eng_Latn-qvc_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-qvc_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-aka_Latn
data_files:
- path: train/eng_Latn-aka_Latn.jsonl.gz
split: train
- path: test/eng_Latn-aka_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-aka_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-aby_Latn
data_files:
- path: train/eng_Latn-aby_Latn.jsonl.gz
split: train
- path: test/eng_Latn-aby_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-aby_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-myu_Latn
data_files:
- path: train/eng_Latn-myu_Latn.jsonl.gz
split: train
- path: test/eng_Latn-myu_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-myu_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-aak_Arab
data_files:
- path: train/eng_Latn-aak_Arab.jsonl.gz
split: train
- path: test/eng_Latn-aak_Arab.jsonl.gz
split: test
- path: validation/eng_Latn-aak_Arab.jsonl.gz
split: validation
- config_name: eng_Latn-soq_Latn
data_files:
- path: train/eng_Latn-soq_Latn.jsonl.gz
split: train
- path: test/eng_Latn-soq_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-soq_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tif_Latn
data_files:
- path: train/eng_Latn-tif_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tif_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tif_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-aai_Latn
data_files:
- path: train/eng_Latn-aai_Latn.jsonl.gz
split: train
- path: test/eng_Latn-aai_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-aai_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-nnq_Latn
data_files:
- path: train/eng_Latn-nnq_Latn.jsonl.gz
split: train
- path: test/eng_Latn-nnq_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-nnq_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-sab_Latn
data_files:
- path: train/eng_Latn-sab_Latn.jsonl.gz
split: train
- path: test/eng_Latn-sab_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-sab_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-wmw_Latn
data_files:
- path: train/eng_Latn-wmw_Latn.jsonl.gz
split: train
- path: test/eng_Latn-wmw_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-wmw_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-dgc_Latn
data_files:
- path: train/eng_Latn-dgc_Latn.jsonl.gz
split: train
- path: test/eng_Latn-dgc_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-dgc_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-roo_Latn
data_files:
- path: train/eng_Latn-roo_Latn.jsonl.gz
split: train
- path: test/eng_Latn-roo_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-roo_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-tcs_Latn
data_files:
- path: train/eng_Latn-tcs_Latn.jsonl.gz
split: train
- path: test/eng_Latn-tcs_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-tcs_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-mxb_Latn
data_files:
- path: train/eng_Latn-mxb_Latn.jsonl.gz
split: train
- path: test/eng_Latn-mxb_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-mxb_Latn.jsonl.gz
split: validation
- config_name: eng_Latn-kde_Latn
data_files:
- path: train/eng_Latn-kde_Latn.jsonl.gz
split: train
- path: test/eng_Latn-kde_Latn.jsonl.gz
split: test
- path: validation/eng_Latn-kde_Latn.jsonl.gz
split: validation
---
This dataset pre-computes all English-centric directions from [bible-nlp/biblenlp-corpus](https://huggingface.co/datasets/bible-nlp/biblenlp-corpus), and as a result loading is significantly faster.
Loading example:
```python
>>> from datasets import load_dataset
>>> dataset = load_dataset("davidstap/biblenlp-corpus-mmteb", "eng-arb", trust_remote_code=True)
>>> dataset
DatasetDict({
train: Dataset({
features: ['eng', 'arb'],
num_rows: 28723
})
validation: Dataset({
features: ['eng', 'arb'],
num_rows: 1578
})
test: Dataset({
features: ['eng', 'arb'],
num_rows: 1551
})
})
>>>
```
Note that in all possible configurations, `eng` comes before the other language.
|
open-web-math/open-web-math | open-web-math | "2023-10-17T20:14:00Z" | 18,540 | 312 | [
"size_categories:1M<n<10M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"arxiv:2310.06786",
"region:us"
] | null | "2023-09-06T00:25:12Z" | ---
dataset_info:
features:
- name: url
dtype: string
- name: text
dtype: string
- name: date
dtype: string
- name: metadata
dtype: string
splits:
- name: train
num_bytes: 56651995057
num_examples: 6315233
download_size: 16370689925
dataset_size: 56651995057
license: odc-by
task_categories:
- text-generation
language:
- en
pretty_name: OpenWebMath
size_categories:
- 10B<n<100B
---
<img src="imgs/OpenWebMath-left.png" width="300">
[Keiran Paster](https://keirp.com)\*, [Marco Dos Santos](https://marco-dossantos.github.io/)\*, [Zhangir Azerbayev](https://zhangir-azerbayev.github.io/), [Jimmy Ba](https://jimmylba.github.io/)
[GitHub ](https://github.com/keirp/OpenWebMath) | [ArXiv](https://arxiv.org/abs/2310.06786)
| [PDF](https://arxiv.org/pdf/2310.06786.pdf)
**OpenWebMath** is a dataset containing the majority of the high-quality, mathematical text from the internet. It is filtered and extracted from over 200B HTML files on Common Crawl down to a set of **6.3 million documents** containing a total of **14.7B tokens**. OpenWebMath is intended for use in _pretraining_ and _finetuning_ large language models.
You can download the dataset using Hugging Face:
```python
from datasets import load_dataset
ds = load_dataset("open-web-math/open-web-math")
```
# OpenWebMath Contents
The dataset is structured as follows:
```python
{
"text": ..., # document text.
"url": ..., # document url.
"date": ..., # date the page was crawled.
"metadata": ..., # JSON containing information from the extraction process.
}
```
OpenWebMath contains documents from over 130k different domains, including data from forums, educational pages, and blogs. The dataset contains documents covering mathematics, physics, statistics, computer science, and more. The following table shows the most common domains in OpenWebMath by character count.
| Domain | # Characters | % Characters |
| ----------------- | ------------- | ------------ |
| stackexchange.com | 4,655,132,784 | 9.55% |
| nature.com | 1,529,935,838 | 3.14% |
| wordpress.com | 1,294,166,938 | 2.66% |
| physicsforums.com | 1,160,137,919 | 2.38% |
| github.io | 725,689,722 | 1.49% |
| zbmath.org | 620,019,503 | 1.27% |
| wikipedia.org | 618,024,754 | 1.27% |
| groundai.com | 545,214,990 | 1.12% |
| blogspot.com | 520,392,333 | 1.07% |
| mathoverflow.net | 499,102,560 | 1.02% |
# OpenWebMath Pipeline
<img src="imgs/pipeline.png" alt="Overview of the OpenWebMath Pipeline">
OpenWebMath builds on the massive [Common Crawl](https://commoncrawl.org/) dataset, which contains over 200B HTML documents. We filtered the data to only include documents that are: (1) in English, (2) contain mathematical content, and (3) are of high quality. We also put a strong emphasis on extracting LaTeX content from the HTML documents as well as reducing boilerplate in comparison to other web datasets.
The OpenWebMath pipeline consists of five steps:
1. **Prefiltering HTML Documents**:
- We apply a simple prefilter to all HTML documents in Common Crawl in order to skip documents without mathematical content to unnecessary processing time.
2. **Text Extraction**:
- Extract text, including LaTeX content, from the HTML documents while removing boilerplate.
3. **Content Classification and Filtering**:
- Apply a [FastText language identification model](https://fasttext.cc/docs/en/language-identification.html) to keep only English documents.
- Filter high perplexity documents using a [KenLM](https://github.com/kpu/kenlm) model trained on [Proof-Pile](https://huggingface.co/datasets/hoskinson-center/proof-pile).
- Filter non-mathematical documents using our own _MathScore_ model.
4. **Deduplication**:
- Deduplicate the dataset using SimHash in [text-dedup](https://github.com/ChenghaoMou/text-dedup).
5. **Manual Inspection**:
- Inspect the documents gathered from previous steps and remove low quality pages.
For a detailed discussion on the processing pipeline, please refer to our paper.
# License
OpenWebMath is made available under an ODC-By 1.0 license; users should also abide by the CommonCrawl ToU: [https://commoncrawl.org/terms-of-use/](https://commoncrawl.org/terms-of-use/). We do not alter the license of any of the underlying data.
# Citation Information
```
@misc{paster2023openwebmath,
title={OpenWebMath: An Open Dataset of High-Quality Mathematical Web Text},
author={Keiran Paster and Marco Dos Santos and Zhangir Azerbayev and Jimmy Ba},
year={2023},
eprint={2310.06786},
archivePrefix={arXiv},
primaryClass={cs.AI}
}
```
|
hails/bigbench | hails | "2023-11-17T16:05:10Z" | 18,449 | 4 | [
"size_categories:1M<n<10M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2023-10-03T19:55:51Z" | ---
dataset_info:
- config_name: abstract_narrative_understanding_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 6560069
num_examples: 3000
- name: train
num_bytes: 5249819
num_examples: 2400
- name: validation
num_bytes: 1310250
num_examples: 600
download_size: 0
dataset_size: 13120138
- config_name: anachronisms_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 48826
num_examples: 230
- name: train
num_bytes: 39116
num_examples: 184
- name: validation
num_bytes: 9710
num_examples: 46
download_size: 0
dataset_size: 97652
- config_name: analogical_similarity_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 1373815
num_examples: 323
- name: train
num_bytes: 1101512
num_examples: 259
- name: validation
num_bytes: 272303
num_examples: 64
download_size: 0
dataset_size: 2747630
- config_name: analytic_entailment_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 17316
num_examples: 70
- name: train
num_bytes: 13368
num_examples: 54
- name: validation
num_bytes: 3948
num_examples: 16
download_size: 0
dataset_size: 34632
- config_name: arithmetic_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 3833272
num_examples: 15023
- name: train
num_bytes: 3066775
num_examples: 12019
- name: validation
num_bytes: 766497
num_examples: 3004
download_size: 0
dataset_size: 7666544
- config_name: ascii_word_recognition_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 4984662
num_examples: 5000
- name: train
num_bytes: 3997273
num_examples: 4000
- name: validation
num_bytes: 987389
num_examples: 1000
download_size: 0
dataset_size: 9969324
- config_name: authorship_verification_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 14118592
num_examples: 880
- name: train
num_bytes: 11288481
num_examples: 704
- name: validation
num_bytes: 2830111
num_examples: 176
download_size: 0
dataset_size: 28237184
- config_name: auto_categorization_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 40549
num_examples: 328
- name: train
num_bytes: 32992
num_examples: 263
- name: validation
num_bytes: 7557
num_examples: 65
download_size: 0
dataset_size: 81098
- config_name: auto_debugging_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 5112
num_examples: 34
- name: train
num_bytes: 2651
num_examples: 18
- name: validation
num_bytes: 2461
num_examples: 16
download_size: 0
dataset_size: 10224
- config_name: bbq_lite_json_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 6890493
num_examples: 16076
- name: train
num_bytes: 5508584
num_examples: 12866
- name: validation
num_bytes: 1381909
num_examples: 3210
download_size: 0
dataset_size: 13780986
- config_name: bridging_anaphora_resolution_barqa_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 1971015
num_examples: 648
- name: train
num_bytes: 1537264
num_examples: 519
- name: validation
num_bytes: 433751
num_examples: 129
download_size: 0
dataset_size: 3942030
- config_name: causal_judgment_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 204878
num_examples: 190
- name: train
num_bytes: 164940
num_examples: 152
- name: validation
num_bytes: 39938
num_examples: 38
download_size: 0
dataset_size: 409756
- config_name: cause_and_effect_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 49314
num_examples: 153
- name: train
num_bytes: 39620
num_examples: 123
- name: validation
num_bytes: 9694
num_examples: 30
download_size: 0
dataset_size: 98628
- config_name: checkmate_in_one_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 3123256
num_examples: 3498
- name: train
num_bytes: 2502314
num_examples: 2799
- name: validation
num_bytes: 620942
num_examples: 699
download_size: 0
dataset_size: 6246512
- config_name: chess_state_tracking_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 3269932
num_examples: 6000
- name: train
num_bytes: 2616294
num_examples: 4800
- name: validation
num_bytes: 653638
num_examples: 1200
download_size: 0
dataset_size: 6539864
- config_name: chinese_remainder_theorem_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 153222
num_examples: 500
- name: train
num_bytes: 122601
num_examples: 400
- name: validation
num_bytes: 30621
num_examples: 100
download_size: 0
dataset_size: 306444
- config_name: cifar10_classification_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 111022200
num_examples: 20000
- name: train
num_bytes: 88782724
num_examples: 16000
- name: validation
num_bytes: 22239476
num_examples: 4000
download_size: 0
dataset_size: 222044400
- config_name: code_line_description_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 33670
num_examples: 60
- name: train
num_bytes: 25530
num_examples: 44
- name: validation
num_bytes: 8140
num_examples: 16
download_size: 0
dataset_size: 67340
- config_name: codenames_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 25195
num_examples: 85
- name: train
num_bytes: 19964
num_examples: 68
- name: validation
num_bytes: 5231
num_examples: 17
download_size: 0
dataset_size: 50390
- config_name: color_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 1633263
num_examples: 4000
- name: train
num_bytes: 1306663
num_examples: 3200
- name: validation
num_bytes: 326600
num_examples: 800
download_size: 0
dataset_size: 3266526
- config_name: common_morpheme_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 12388
num_examples: 50
- name: train
num_bytes: 8444
num_examples: 34
- name: validation
num_bytes: 3944
num_examples: 16
download_size: 0
dataset_size: 24776
- config_name: conceptual_combinations_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 58859
num_examples: 103
- name: train
num_bytes: 48010
num_examples: 84
- name: validation
num_bytes: 10849
num_examples: 19
download_size: 0
dataset_size: 117718
- config_name: conlang_translation_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 215190
num_examples: 164
- name: train
num_bytes: 173024
num_examples: 132
- name: validation
num_bytes: 42166
num_examples: 32
download_size: 0
dataset_size: 430380
- config_name: contextual_parametric_knowledge_conflicts_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 14587554
num_examples: 17528
- name: train
num_bytes: 11666236
num_examples: 14023
- name: validation
num_bytes: 2921318
num_examples: 3505
download_size: 0
dataset_size: 29175108
- config_name: crash_blossom_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 12194
num_examples: 38
- name: train
num_bytes: 6999
num_examples: 22
- name: validation
num_bytes: 5195
num_examples: 16
download_size: 0
dataset_size: 24388
- config_name: crass_ai_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 22870
num_examples: 44
- name: train
num_bytes: 14130
num_examples: 28
- name: validation
num_bytes: 8740
num_examples: 16
download_size: 0
dataset_size: 45740
- config_name: cryobiology_spanish_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 38674
num_examples: 146
- name: train
num_bytes: 31129
num_examples: 117
- name: validation
num_bytes: 7545
num_examples: 29
download_size: 0
dataset_size: 77348
- config_name: cryptonite_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 2844402
num_examples: 26157
- name: train
num_bytes: 2275724
num_examples: 20926
- name: validation
num_bytes: 568678
num_examples: 5231
download_size: 0
dataset_size: 5688804
- config_name: cs_algorithms_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 272435
num_examples: 1320
- name: train
num_bytes: 218192
num_examples: 1056
- name: validation
num_bytes: 54243
num_examples: 264
download_size: 0
dataset_size: 544870
- config_name: dark_humor_detection_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 26556
num_examples: 80
- name: train
num_bytes: 21267
num_examples: 64
- name: validation
num_bytes: 5289
num_examples: 16
download_size: 0
dataset_size: 53112
- config_name: date_understanding_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 94908
num_examples: 369
- name: train
num_bytes: 76165
num_examples: 296
- name: validation
num_bytes: 18743
num_examples: 73
download_size: 0
dataset_size: 189816
- config_name: disambiguation_qa_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 122471
num_examples: 258
- name: train
num_bytes: 98687
num_examples: 207
- name: validation
num_bytes: 23784
num_examples: 51
download_size: 0
dataset_size: 244942
- config_name: discourse_marker_prediction_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 2090684
num_examples: 857
- name: train
num_bytes: 1666052
num_examples: 686
- name: validation
num_bytes: 424632
num_examples: 171
download_size: 0
dataset_size: 4181368
- config_name: disfl_qa_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 7964775
num_examples: 8000
- name: train
num_bytes: 6376511
num_examples: 6400
- name: validation
num_bytes: 1588264
num_examples: 1600
download_size: 0
dataset_size: 15929550
- config_name: dyck_languages_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 1227916
num_examples: 1000
- name: train
num_bytes: 982680
num_examples: 800
- name: validation
num_bytes: 245236
num_examples: 200
download_size: 0
dataset_size: 2455832
- config_name: elementary_math_qa_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 13442550
num_examples: 38160
- name: train
num_bytes: 10766969
num_examples: 30531
- name: validation
num_bytes: 2675581
num_examples: 7629
download_size: 0
dataset_size: 26885100
- config_name: emoji_movie_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 33667
num_examples: 100
- name: train
num_bytes: 26987
num_examples: 80
- name: validation
num_bytes: 6680
num_examples: 20
download_size: 0
dataset_size: 67334
- config_name: emojis_emotion_prediction_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 47983
num_examples: 131
- name: train
num_bytes: 38458
num_examples: 105
- name: validation
num_bytes: 9525
num_examples: 26
download_size: 0
dataset_size: 95966
- config_name: empirical_judgments_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 47499
num_examples: 99
- name: train
num_bytes: 38346
num_examples: 80
- name: validation
num_bytes: 9153
num_examples: 19
download_size: 0
dataset_size: 94998
- config_name: english_proverbs_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 22530
num_examples: 34
- name: train
num_bytes: 12066
num_examples: 18
- name: validation
num_bytes: 10464
num_examples: 16
download_size: 0
dataset_size: 45060
- config_name: english_russian_proverbs_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 59900
num_examples: 80
- name: train
num_bytes: 48051
num_examples: 64
- name: validation
num_bytes: 11849
num_examples: 16
download_size: 0
dataset_size: 119800
- config_name: entailed_polarity_hindi_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 57052
num_examples: 138
- name: train
num_bytes: 45829
num_examples: 111
- name: validation
num_bytes: 11223
num_examples: 27
download_size: 0
dataset_size: 114104
- config_name: entailed_polarity_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 25421
num_examples: 148
- name: train
num_bytes: 20350
num_examples: 119
- name: validation
num_bytes: 5071
num_examples: 29
download_size: 0
dataset_size: 50842
- config_name: epistemic_reasoning_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 887158
num_examples: 2000
- name: train
num_bytes: 710107
num_examples: 1600
- name: validation
num_bytes: 177051
num_examples: 400
download_size: 0
dataset_size: 1774316
- config_name: evaluating_information_essentiality_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 77488
num_examples: 68
- name: train
num_bytes: 59596
num_examples: 52
- name: validation
num_bytes: 17892
num_examples: 16
download_size: 0
dataset_size: 154976
- config_name: fact_checker_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 1337384
num_examples: 7154
- name: train
num_bytes: 1070750
num_examples: 5724
- name: validation
num_bytes: 266634
num_examples: 1430
download_size: 0
dataset_size: 2674768
- config_name: fantasy_reasoning_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 75886
num_examples: 201
- name: train
num_bytes: 61398
num_examples: 161
- name: validation
num_bytes: 14488
num_examples: 40
download_size: 0
dataset_size: 151772
- config_name: few_shot_nlg_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 75937
num_examples: 153
- name: train
num_bytes: 61862
num_examples: 123
- name: validation
num_bytes: 14075
num_examples: 30
download_size: 0
dataset_size: 151874
- config_name: figure_of_speech_detection_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 21717
num_examples: 59
- name: train
num_bytes: 15962
num_examples: 43
- name: validation
num_bytes: 5755
num_examples: 16
download_size: 0
dataset_size: 43434
- config_name: formal_fallacies_syllogisms_negation_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 8314653
num_examples: 14200
- name: train
num_bytes: 6652955
num_examples: 11360
- name: validation
num_bytes: 1661698
num_examples: 2840
download_size: 0
dataset_size: 16629306
- config_name: gem_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 36065281
num_examples: 14802
- name: train
num_bytes: 28819497
num_examples: 11845
- name: validation
num_bytes: 7245784
num_examples: 2957
download_size: 0
dataset_size: 72130562
- config_name: gender_inclusive_sentences_german_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 126881
num_examples: 200
- name: train
num_bytes: 100628
num_examples: 160
- name: validation
num_bytes: 26253
num_examples: 40
download_size: 0
dataset_size: 253762
- config_name: general_knowledge_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 21828
num_examples: 70
- name: train
num_bytes: 16818
num_examples: 54
- name: validation
num_bytes: 5010
num_examples: 16
download_size: 0
dataset_size: 43656
- config_name: geometric_shapes_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 180094
num_examples: 359
- name: train
num_bytes: 144602
num_examples: 288
- name: validation
num_bytes: 35492
num_examples: 71
download_size: 0
dataset_size: 360188
- config_name: goal_step_wikihow_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 3567615
num_examples: 7053
- name: train
num_bytes: 2853871
num_examples: 5643
- name: validation
num_bytes: 713744
num_examples: 1410
download_size: 0
dataset_size: 7135230
- config_name: gre_reading_comprehension_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 94273
num_examples: 31
- name: train
num_bytes: 44458
num_examples: 15
- name: validation
num_bytes: 49815
num_examples: 16
download_size: 0
dataset_size: 188546
- config_name: hhh_alignment_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 272898
num_examples: 221
- name: train
num_bytes: 212488
num_examples: 179
- name: validation
num_bytes: 60410
num_examples: 42
download_size: 0
dataset_size: 545796
- config_name: hindi_question_answering_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 15154954
num_examples: 6610
- name: train
num_bytes: 11983837
num_examples: 5288
- name: validation
num_bytes: 3171117
num_examples: 1322
download_size: 0
dataset_size: 30309908
- config_name: hindu_knowledge_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 44092
num_examples: 175
- name: train
num_bytes: 35392
num_examples: 140
- name: validation
num_bytes: 8700
num_examples: 35
download_size: 0
dataset_size: 88184
- config_name: hinglish_toxicity_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 60613
num_examples: 200
- name: train
num_bytes: 49997
num_examples: 160
- name: validation
num_bytes: 10616
num_examples: 40
download_size: 0
dataset_size: 121226
- config_name: human_organs_senses_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 7944
num_examples: 42
- name: train
num_bytes: 4873
num_examples: 26
- name: validation
num_bytes: 3071
num_examples: 16
download_size: 0
dataset_size: 15888
- config_name: hyperbaton_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 9383986
num_examples: 50000
- name: train
num_bytes: 7509334
num_examples: 40000
- name: validation
num_bytes: 1874652
num_examples: 10000
download_size: 0
dataset_size: 18767972
- config_name: identify_math_theorems_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 104841
num_examples: 53
- name: train
num_bytes: 70295
num_examples: 37
- name: validation
num_bytes: 34546
num_examples: 16
download_size: 0
dataset_size: 209682
- config_name: identify_odd_metaphor_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 27602
num_examples: 47
- name: train
num_bytes: 18138
num_examples: 31
- name: validation
num_bytes: 9464
num_examples: 16
download_size: 0
dataset_size: 55204
- config_name: implicatures_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 91683
num_examples: 492
- name: train
num_bytes: 73416
num_examples: 394
- name: validation
num_bytes: 18267
num_examples: 98
download_size: 0
dataset_size: 183366
- config_name: implicit_relations_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 79710
num_examples: 85
- name: train
num_bytes: 64346
num_examples: 68
- name: validation
num_bytes: 15364
num_examples: 17
download_size: 0
dataset_size: 159420
- config_name: intent_recognition_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 322371
num_examples: 693
- name: train
num_bytes: 257864
num_examples: 555
- name: validation
num_bytes: 64507
num_examples: 138
download_size: 0
dataset_size: 644742
- config_name: international_phonetic_alphabet_nli_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 79320
num_examples: 126
- name: train
num_bytes: 63288
num_examples: 101
- name: validation
num_bytes: 16032
num_examples: 25
download_size: 0
dataset_size: 158640
- config_name: international_phonetic_alphabet_transliterate_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 275938
num_examples: 1003
- name: train
num_bytes: 220784
num_examples: 803
- name: validation
num_bytes: 55154
num_examples: 200
download_size: 0
dataset_size: 551876
- config_name: intersect_geometry_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 211674752
num_examples: 249999
- name: train
num_bytes: 169332898
num_examples: 200000
- name: validation
num_bytes: 42341854
num_examples: 49999
download_size: 0
dataset_size: 423349504
- config_name: irony_identification_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 28178
num_examples: 99
- name: train
num_bytes: 22918
num_examples: 80
- name: validation
num_bytes: 5260
num_examples: 19
download_size: 0
dataset_size: 56356
- config_name: kanji_ascii_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 366946
num_examples: 1092
- name: train
num_bytes: 293933
num_examples: 875
- name: validation
num_bytes: 73013
num_examples: 217
download_size: 0
dataset_size: 733892
- config_name: kannada_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 140638
num_examples: 316
- name: train
num_bytes: 111865
num_examples: 253
- name: validation
num_bytes: 28773
num_examples: 63
download_size: 0
dataset_size: 281276
- config_name: key_value_maps_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 105136
num_examples: 101
- name: train
num_bytes: 84317
num_examples: 80
- name: validation
num_bytes: 20819
num_examples: 21
download_size: 0
dataset_size: 210272
- config_name: known_unknowns_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 7960
num_examples: 46
- name: train
num_bytes: 5130
num_examples: 30
- name: validation
num_bytes: 2830
num_examples: 16
download_size: 0
dataset_size: 15920
- config_name: language_games_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 979619
num_examples: 2128
- name: train
num_bytes: 783111
num_examples: 1704
- name: validation
num_bytes: 196508
num_examples: 424
download_size: 0
dataset_size: 1959238
- config_name: language_identification_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 7376223
num_examples: 10000
- name: train
num_bytes: 5908808
num_examples: 8000
- name: validation
num_bytes: 1467415
num_examples: 2000
download_size: 0
dataset_size: 14752446
- config_name: linguistic_mappings_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 1325186
num_examples: 15527
- name: train
num_bytes: 1060088
num_examples: 12426
- name: validation
num_bytes: 265098
num_examples: 3101
download_size: 0
dataset_size: 2650372
- config_name: linguistics_puzzles_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 1746024
num_examples: 2000
- name: train
num_bytes: 1398113
num_examples: 1600
- name: validation
num_bytes: 347911
num_examples: 400
download_size: 0
dataset_size: 3492048
- config_name: list_functions_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 2678136
num_examples: 10750
- name: train
num_bytes: 2161065
num_examples: 8700
- name: validation
num_bytes: 517071
num_examples: 2050
download_size: 0
dataset_size: 5356272
- config_name: logic_grid_puzzle_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 1456218
num_examples: 1000
- name: train
num_bytes: 1160137
num_examples: 800
- name: validation
num_bytes: 296081
num_examples: 200
download_size: 0
dataset_size: 2912436
- config_name: logical_args_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 43582
num_examples: 32
- name: train
num_bytes: 21072
num_examples: 16
- name: validation
num_bytes: 22510
num_examples: 16
download_size: 0
dataset_size: 87164
- config_name: logical_deduction_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 1056716
num_examples: 1500
- name: train
num_bytes: 841788
num_examples: 1200
- name: validation
num_bytes: 214928
num_examples: 300
download_size: 0
dataset_size: 2113432
- config_name: logical_fallacy_detection_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 720286
num_examples: 2800
- name: train
num_bytes: 576295
num_examples: 2240
- name: validation
num_bytes: 143991
num_examples: 560
download_size: 0
dataset_size: 1440572
- config_name: logical_sequence_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 22722
num_examples: 39
- name: train
num_bytes: 12648
num_examples: 23
- name: validation
num_bytes: 10074
num_examples: 16
download_size: 0
dataset_size: 45444
- config_name: mathematical_induction_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 19018
num_examples: 69
- name: train
num_bytes: 14983
num_examples: 53
- name: validation
num_bytes: 4035
num_examples: 16
download_size: 0
dataset_size: 38036
- config_name: matrixshapes_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 1130574
num_examples: 4462
- name: train
num_bytes: 906061
num_examples: 3570
- name: validation
num_bytes: 224513
num_examples: 892
download_size: 0
dataset_size: 2261148
- config_name: metaphor_boolean_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 213848
num_examples: 680
- name: train
num_bytes: 170765
num_examples: 544
- name: validation
num_bytes: 43083
num_examples: 136
download_size: 0
dataset_size: 427696
- config_name: metaphor_understanding_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 200862
num_examples: 234
- name: train
num_bytes: 162101
num_examples: 188
- name: validation
num_bytes: 38761
num_examples: 46
download_size: 0
dataset_size: 401724
- config_name: minute_mysteries_qa_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 3245190
num_examples: 477
- name: train
num_bytes: 2623703
num_examples: 383
- name: validation
num_bytes: 621487
num_examples: 94
download_size: 0
dataset_size: 6490380
- config_name: misconceptions_russian_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 16991
num_examples: 49
- name: train
num_bytes: 10970
num_examples: 33
- name: validation
num_bytes: 6021
num_examples: 16
download_size: 0
dataset_size: 33982
- config_name: misconceptions_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 45816
num_examples: 219
- name: train
num_bytes: 37246
num_examples: 176
- name: validation
num_bytes: 8570
num_examples: 43
download_size: 0
dataset_size: 91632
- config_name: mnist_ascii_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 61739808
num_examples: 69984
- name: train
num_bytes: 49419928
num_examples: 55988
- name: validation
num_bytes: 12319880
num_examples: 13996
download_size: 0
dataset_size: 123479616
- config_name: modified_arithmetic_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 1220993
num_examples: 6000
- name: train
num_bytes: 976859
num_examples: 4800
- name: validation
num_bytes: 244134
num_examples: 1200
download_size: 0
dataset_size: 2441986
- config_name: moral_permissibility_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 162068
num_examples: 342
- name: train
num_bytes: 128790
num_examples: 274
- name: validation
num_bytes: 33278
num_examples: 68
download_size: 0
dataset_size: 324136
- config_name: movie_dialog_same_or_different_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 28645997
num_examples: 50000
- name: train
num_bytes: 22889061
num_examples: 40000
- name: validation
num_bytes: 5756936
num_examples: 10000
download_size: 0
dataset_size: 57291994
- config_name: movie_recommendation_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 173557
num_examples: 500
- name: train
num_bytes: 138936
num_examples: 400
- name: validation
num_bytes: 34621
num_examples: 100
download_size: 0
dataset_size: 347114
- config_name: mult_data_wrangling_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 625422
num_examples: 7854
- name: train
num_bytes: 507838
num_examples: 6380
- name: validation
num_bytes: 117584
num_examples: 1474
download_size: 0
dataset_size: 1250844
- config_name: multiemo_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 650173925
num_examples: 1437281
- name: train
num_bytes: 520172185
num_examples: 1149873
- name: validation
num_bytes: 130001740
num_examples: 287408
download_size: 0
dataset_size: 1300347850
- config_name: natural_instructions_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 355938370
num_examples: 193250
- name: train
num_bytes: 284920096
num_examples: 154615
- name: validation
num_bytes: 71018274
num_examples: 38635
download_size: 0
dataset_size: 711876740
- config_name: navigate_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 225813
num_examples: 1000
- name: train
num_bytes: 180958
num_examples: 800
- name: validation
num_bytes: 44855
num_examples: 200
download_size: 83744
dataset_size: 451626
- config_name: nonsense_words_grammar_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 11102
num_examples: 50
- name: train
num_bytes: 7582
num_examples: 34
- name: validation
num_bytes: 3520
num_examples: 16
download_size: 24107
dataset_size: 22204
- config_name: novel_concepts_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 16065
num_examples: 32
- name: train
num_bytes: 8128
num_examples: 16
- name: validation
num_bytes: 7937
num_examples: 16
download_size: 25919
dataset_size: 32130
- config_name: object_counting_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 149555
num_examples: 1000
- name: train
num_bytes: 119609
num_examples: 800
- name: validation
num_bytes: 29946
num_examples: 200
download_size: 91852
dataset_size: 299110
- config_name: odd_one_out_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 13843
num_examples: 86
- name: train
num_bytes: 11217
num_examples: 69
- name: validation
num_bytes: 2626
num_examples: 17
download_size: 25796
dataset_size: 27686
- config_name: operators_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 32435
num_examples: 210
- name: train
num_bytes: 25937
num_examples: 168
- name: validation
num_bytes: 6498
num_examples: 42
download_size: 24728
dataset_size: 64870
- config_name: paragraph_segmentation_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 56846507
num_examples: 9000
- name: train
num_bytes: 45674320
num_examples: 7200
- name: validation
num_bytes: 11172187
num_examples: 1800
download_size: 61123049
dataset_size: 113693014
- config_name: parsinlu_qa_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 456189
num_examples: 1050
- name: train
num_bytes: 366577
num_examples: 840
- name: validation
num_bytes: 89612
num_examples: 210
download_size: 465963
dataset_size: 912378
- config_name: parsinlu_reading_comprehension_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 573798
num_examples: 518
- name: train
num_bytes: 455828
num_examples: 415
- name: validation
num_bytes: 117970
num_examples: 103
download_size: 572992
dataset_size: 1147596
- config_name: penguins_in_a_table_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 75985
num_examples: 149
- name: train
num_bytes: 61321
num_examples: 120
- name: validation
num_bytes: 14664
num_examples: 29
download_size: 32039
dataset_size: 151970
- config_name: periodic_elements_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 285204
num_examples: 654
- name: train
num_bytes: 229481
num_examples: 524
- name: validation
num_bytes: 55723
num_examples: 130
download_size: 41084
dataset_size: 570408
- config_name: persian_idioms_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 28592
num_examples: 66
- name: train
num_bytes: 21684
num_examples: 50
- name: validation
num_bytes: 6908
num_examples: 16
download_size: 34341
dataset_size: 57184
- config_name: phrase_relatedness_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 30190
num_examples: 100
- name: train
num_bytes: 23773
num_examples: 80
- name: validation
num_bytes: 6417
num_examples: 20
download_size: 40334
dataset_size: 60380
- config_name: physical_intuition_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 23734
num_examples: 81
- name: train
num_bytes: 19307
num_examples: 65
- name: validation
num_bytes: 4427
num_examples: 16
download_size: 28462
dataset_size: 47468
- config_name: physics_questions_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 18372
num_examples: 54
- name: train
num_bytes: 13402
num_examples: 38
- name: validation
num_bytes: 4970
num_examples: 16
download_size: 35187
dataset_size: 36744
- config_name: physics_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 120239
num_examples: 229
- name: train
num_bytes: 96122
num_examples: 184
- name: validation
num_bytes: 24117
num_examples: 45
download_size: 69885
dataset_size: 240478
- config_name: play_dialog_same_or_different_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 3142468
num_examples: 3264
- name: train
num_bytes: 2516052
num_examples: 2612
- name: validation
num_bytes: 626416
num_examples: 652
download_size: 1710264
dataset_size: 6284936
- config_name: polish_sequence_labeling_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 18081112
num_examples: 12812
- name: train
num_bytes: 14470720
num_examples: 10250
- name: validation
num_bytes: 3610392
num_examples: 2562
download_size: 5242934
dataset_size: 36162224
- config_name: presuppositions_as_nli_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 502522
num_examples: 735
- name: train
num_bytes: 400761
num_examples: 588
- name: validation
num_bytes: 101761
num_examples: 147
download_size: 240065
dataset_size: 1005044
- config_name: qa_wikidata_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 1419042
num_examples: 20321
- name: train
num_bytes: 1134918
num_examples: 16257
- name: validation
num_bytes: 284124
num_examples: 4064
download_size: 1181835
dataset_size: 2838084
- config_name: question_selection_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 2487181
num_examples: 1582
- name: train
num_bytes: 1990094
num_examples: 1266
- name: validation
num_bytes: 497087
num_examples: 316
download_size: 1804283
dataset_size: 4974362
- config_name: real_or_fake_text_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 53663318
num_examples: 15088
- name: train
num_bytes: 42879846
num_examples: 12072
- name: validation
num_bytes: 10783472
num_examples: 3016
download_size: 47399045
dataset_size: 107326636
- config_name: reasoning_about_colored_objects_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 907474
num_examples: 2000
- name: train
num_bytes: 729609
num_examples: 1600
- name: validation
num_bytes: 177865
num_examples: 400
download_size: 273263
dataset_size: 1814948
- config_name: repeat_copy_logic_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 6678
num_examples: 32
- name: train
num_bytes: 3327
num_examples: 16
- name: validation
num_bytes: 3351
num_examples: 16
download_size: 18315
dataset_size: 13356
- config_name: rephrase_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 34222
num_examples: 78
- name: train
num_bytes: 27360
num_examples: 62
- name: validation
num_bytes: 6862
num_examples: 16
download_size: 41102
dataset_size: 68444
- config_name: riddle_sense_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 15507
num_examples: 49
- name: train
num_bytes: 10741
num_examples: 33
- name: validation
num_bytes: 4766
num_examples: 16
download_size: 32496
dataset_size: 31014
- config_name: ruin_names_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 144087
num_examples: 448
- name: train
num_bytes: 115171
num_examples: 359
- name: validation
num_bytes: 28916
num_examples: 89
download_size: 118193
dataset_size: 288174
- config_name: salient_translation_error_detection_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 1141626
num_examples: 998
- name: train
num_bytes: 912819
num_examples: 799
- name: validation
num_bytes: 228807
num_examples: 199
download_size: 413634
dataset_size: 2283252
- config_name: scientific_press_release_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 13690
num_examples: 50
- name: train
num_bytes: 9254
num_examples: 34
- name: validation
num_bytes: 4436
num_examples: 16
download_size: 27293
dataset_size: 27380
- config_name: semantic_parsing_in_context_sparc_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 1524852
num_examples: 1155
- name: train
num_bytes: 1248391
num_examples: 924
- name: validation
num_bytes: 276461
num_examples: 231
download_size: 440326
dataset_size: 3049704
- config_name: semantic_parsing_spider_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 1265744
num_examples: 1034
- name: train
num_bytes: 973864
num_examples: 828
- name: validation
num_bytes: 291880
num_examples: 206
download_size: 358276
dataset_size: 2531488
- config_name: sentence_ambiguity_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 8168
num_examples: 60
- name: train
num_bytes: 5976
num_examples: 44
- name: validation
num_bytes: 2192
num_examples: 16
download_size: 18275
dataset_size: 16336
- config_name: similarities_abstraction_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 23416
num_examples: 76
- name: train
num_bytes: 18545
num_examples: 60
- name: validation
num_bytes: 4871
num_examples: 16
download_size: 31521
dataset_size: 46832
- config_name: simp_turing_concept_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 1017646
num_examples: 6390
- name: train
num_bytes: 813220
num_examples: 5112
- name: validation
num_bytes: 204426
num_examples: 1278
download_size: 402574
dataset_size: 2035292
- config_name: simple_arithmetic_json_multiple_choice_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 924
num_examples: 8
- name: train
num_bytes: 0
num_examples: 0
- name: validation
num_bytes: 0
num_examples: 0
download_size: 7777
dataset_size: 924
- config_name: simple_arithmetic_json_subtasks_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 1145
num_examples: 30
- name: train
num_bytes: 571
num_examples: 15
- name: validation
num_bytes: 574
num_examples: 15
download_size: 10460
dataset_size: 2290
- config_name: simple_arithmetic_json_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 1145
num_examples: 30
- name: train
num_bytes: 540
num_examples: 14
- name: validation
num_bytes: 605
num_examples: 16
download_size: 10645
dataset_size: 2290
- config_name: simple_arithmetic_multiple_targets_json_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 414
num_examples: 10
- name: train
num_bytes: 0
num_examples: 0
- name: validation
num_bytes: 0
num_examples: 0
download_size: 7352
dataset_size: 414
- config_name: simple_ethical_questions_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 76518
num_examples: 115
- name: train
num_bytes: 60275
num_examples: 92
- name: validation
num_bytes: 16243
num_examples: 23
download_size: 81285
dataset_size: 153036
- config_name: simple_text_editing_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 27865
num_examples: 47
- name: train
num_bytes: 18469
num_examples: 31
- name: validation
num_bytes: 9396
num_examples: 16
download_size: 27100
dataset_size: 55730
- config_name: snarks_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 45717
num_examples: 181
- name: train
num_bytes: 36989
num_examples: 145
- name: validation
num_bytes: 8728
num_examples: 36
download_size: 45434
dataset_size: 91434
- config_name: social_iqa_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 643162
num_examples: 1935
- name: train
num_bytes: 515686
num_examples: 1548
- name: validation
num_bytes: 127476
num_examples: 387
download_size: 684043
dataset_size: 1286324
- config_name: social_support_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 366705
num_examples: 897
- name: train
num_bytes: 294793
num_examples: 718
- name: validation
num_bytes: 71912
num_examples: 179
download_size: 288867
dataset_size: 733410
- config_name: sports_understanding_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 226654
num_examples: 986
- name: train
num_bytes: 181328
num_examples: 789
- name: validation
num_bytes: 45326
num_examples: 197
download_size: 82415
dataset_size: 453308
- config_name: strange_stories_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 120500
num_examples: 174
- name: train
num_bytes: 98055
num_examples: 140
- name: validation
num_bytes: 22445
num_examples: 34
download_size: 106428
dataset_size: 241000
- config_name: strategyqa_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 659967
num_examples: 2289
- name: train
num_bytes: 527670
num_examples: 1832
- name: validation
num_bytes: 132297
num_examples: 457
download_size: 814405
dataset_size: 1319934
- config_name: sufficient_information_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 9425
num_examples: 39
- name: train
num_bytes: 5594
num_examples: 23
- name: validation
num_bytes: 3831
num_examples: 16
download_size: 17766
dataset_size: 18850
- config_name: suicide_risk_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 37952
num_examples: 40
- name: train
num_bytes: 23067
num_examples: 24
- name: validation
num_bytes: 14885
num_examples: 16
download_size: 60518
dataset_size: 75904
- config_name: swahili_english_proverbs_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 90246
num_examples: 153
- name: train
num_bytes: 72467
num_examples: 123
- name: validation
num_bytes: 17779
num_examples: 30
download_size: 95186
dataset_size: 180492
- config_name: swedish_to_german_proverbs_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 35204
num_examples: 72
- name: train
num_bytes: 27266
num_examples: 56
- name: validation
num_bytes: 7938
num_examples: 16
download_size: 55102
dataset_size: 70408
- config_name: symbol_interpretation_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 1148958
num_examples: 990
- name: train
num_bytes: 927326
num_examples: 795
- name: validation
num_bytes: 221632
num_examples: 195
download_size: 320412
dataset_size: 2297916
- config_name: temporal_sequences_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 687086
num_examples: 1000
- name: train
num_bytes: 549808
num_examples: 800
- name: validation
num_bytes: 137278
num_examples: 200
download_size: 295316
dataset_size: 1374172
- config_name: tense_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 43882
num_examples: 286
- name: train
num_bytes: 35466
num_examples: 229
- name: validation
num_bytes: 8416
num_examples: 57
download_size: 51466
dataset_size: 87764
- config_name: timedial_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 2763178
num_examples: 2550
- name: train
num_bytes: 2217190
num_examples: 2040
- name: validation
num_bytes: 545988
num_examples: 510
download_size: 2444115
dataset_size: 5526356
- config_name: topical_chat_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 30927758
num_examples: 22295
- name: train
num_bytes: 24827254
num_examples: 17836
- name: validation
num_bytes: 6100504
num_examples: 4459
download_size: 23505731
dataset_size: 61855516
- config_name: tracking_shuffled_objects_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 2775972
num_examples: 3750
- name: train
num_bytes: 2224037
num_examples: 3000
- name: validation
num_bytes: 551935
num_examples: 750
download_size: 738413
dataset_size: 5551944
- config_name: understanding_fables_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 227748
num_examples: 189
- name: train
num_bytes: 181000
num_examples: 152
- name: validation
num_bytes: 46748
num_examples: 37
download_size: 237036
dataset_size: 455496
- config_name: undo_permutation_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 196118
num_examples: 300
- name: train
num_bytes: 158562
num_examples: 240
- name: validation
num_bytes: 37556
num_examples: 60
download_size: 137204
dataset_size: 392236
- config_name: unit_conversion_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 4028628
num_examples: 23936
- name: train
num_bytes: 3230357
num_examples: 19151
- name: validation
num_bytes: 798271
num_examples: 4785
download_size: 3208622
dataset_size: 8057256
- config_name: unit_interpretation_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 37363
num_examples: 100
- name: train
num_bytes: 29939
num_examples: 80
- name: validation
num_bytes: 7424
num_examples: 20
download_size: 34926
dataset_size: 74726
- config_name: unnatural_in_context_learning_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 4599760
num_examples: 73420
- name: train
num_bytes: 3679822
num_examples: 58736
- name: validation
num_bytes: 919938
num_examples: 14684
download_size: 3840657
dataset_size: 9199520
- config_name: vitaminc_fact_verification_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 32361818
num_examples: 54668
- name: train
num_bytes: 25889850
num_examples: 43735
- name: validation
num_bytes: 6471968
num_examples: 10933
download_size: 14264790
dataset_size: 64723636
- config_name: what_is_the_tao_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 13268
num_examples: 36
- name: train
num_bytes: 7435
num_examples: 20
- name: validation
num_bytes: 5833
num_examples: 16
download_size: 27585
dataset_size: 26536
- config_name: which_wiki_edit_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 6331683
num_examples: 571
- name: train
num_bytes: 5233870
num_examples: 457
- name: validation
num_bytes: 1097813
num_examples: 114
download_size: 3914574
dataset_size: 12663366
- config_name: winowhy_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 1002434
num_examples: 2862
- name: train
num_bytes: 800520
num_examples: 2290
- name: validation
num_bytes: 201914
num_examples: 572
download_size: 449218
dataset_size: 2004868
- config_name: word_sorting_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 491054
num_examples: 1900
- name: train
num_bytes: 392738
num_examples: 1520
- name: validation
num_bytes: 98316
num_examples: 380
download_size: 641536
dataset_size: 982108
- config_name: word_unscrambling_zero_shot
features:
- name: idx
dtype: int32
- name: inputs
dtype: string
- name: targets
sequence: string
- name: multiple_choice_targets
sequence: string
- name: multiple_choice_scores
sequence: int32
splits:
- name: default
num_bytes: 882364
num_examples: 8917
- name: train
num_bytes: 705755
num_examples: 7134
- name: validation
num_bytes: 176609
num_examples: 1783
download_size: 563799
dataset_size: 1764728
configs:
- config_name: abstract_narrative_understanding_zero_shot
data_files:
- split: default
path: abstract_narrative_understanding_zero_shot/default-*
- split: train
path: abstract_narrative_understanding_zero_shot/train-*
- split: validation
path: abstract_narrative_understanding_zero_shot/validation-*
- config_name: anachronisms_zero_shot
data_files:
- split: default
path: anachronisms_zero_shot/default-*
- split: train
path: anachronisms_zero_shot/train-*
- split: validation
path: anachronisms_zero_shot/validation-*
- config_name: analogical_similarity_zero_shot
data_files:
- split: default
path: analogical_similarity_zero_shot/default-*
- split: train
path: analogical_similarity_zero_shot/train-*
- split: validation
path: analogical_similarity_zero_shot/validation-*
- config_name: analytic_entailment_zero_shot
data_files:
- split: default
path: analytic_entailment_zero_shot/default-*
- split: train
path: analytic_entailment_zero_shot/train-*
- split: validation
path: analytic_entailment_zero_shot/validation-*
- config_name: arithmetic_zero_shot
data_files:
- split: default
path: arithmetic_zero_shot/default-*
- split: train
path: arithmetic_zero_shot/train-*
- split: validation
path: arithmetic_zero_shot/validation-*
- config_name: ascii_word_recognition_zero_shot
data_files:
- split: default
path: ascii_word_recognition_zero_shot/default-*
- split: train
path: ascii_word_recognition_zero_shot/train-*
- split: validation
path: ascii_word_recognition_zero_shot/validation-*
- config_name: authorship_verification_zero_shot
data_files:
- split: default
path: authorship_verification_zero_shot/default-*
- split: train
path: authorship_verification_zero_shot/train-*
- split: validation
path: authorship_verification_zero_shot/validation-*
- config_name: auto_categorization_zero_shot
data_files:
- split: default
path: auto_categorization_zero_shot/default-*
- split: train
path: auto_categorization_zero_shot/train-*
- split: validation
path: auto_categorization_zero_shot/validation-*
- config_name: auto_debugging_zero_shot
data_files:
- split: default
path: auto_debugging_zero_shot/default-*
- split: train
path: auto_debugging_zero_shot/train-*
- split: validation
path: auto_debugging_zero_shot/validation-*
- config_name: bbq_lite_json_zero_shot
data_files:
- split: default
path: bbq_lite_json_zero_shot/default-*
- split: train
path: bbq_lite_json_zero_shot/train-*
- split: validation
path: bbq_lite_json_zero_shot/validation-*
- config_name: bridging_anaphora_resolution_barqa_zero_shot
data_files:
- split: default
path: bridging_anaphora_resolution_barqa_zero_shot/default-*
- split: train
path: bridging_anaphora_resolution_barqa_zero_shot/train-*
- split: validation
path: bridging_anaphora_resolution_barqa_zero_shot/validation-*
- config_name: causal_judgment_zero_shot
data_files:
- split: default
path: causal_judgment_zero_shot/default-*
- split: train
path: causal_judgment_zero_shot/train-*
- split: validation
path: causal_judgment_zero_shot/validation-*
- config_name: cause_and_effect_zero_shot
data_files:
- split: default
path: cause_and_effect_zero_shot/default-*
- split: train
path: cause_and_effect_zero_shot/train-*
- split: validation
path: cause_and_effect_zero_shot/validation-*
- config_name: checkmate_in_one_zero_shot
data_files:
- split: default
path: checkmate_in_one_zero_shot/default-*
- split: train
path: checkmate_in_one_zero_shot/train-*
- split: validation
path: checkmate_in_one_zero_shot/validation-*
- config_name: chess_state_tracking_zero_shot
data_files:
- split: default
path: chess_state_tracking_zero_shot/default-*
- split: train
path: chess_state_tracking_zero_shot/train-*
- split: validation
path: chess_state_tracking_zero_shot/validation-*
- config_name: chinese_remainder_theorem_zero_shot
data_files:
- split: default
path: chinese_remainder_theorem_zero_shot/default-*
- split: train
path: chinese_remainder_theorem_zero_shot/train-*
- split: validation
path: chinese_remainder_theorem_zero_shot/validation-*
- config_name: cifar10_classification_zero_shot
data_files:
- split: default
path: cifar10_classification_zero_shot/default-*
- split: train
path: cifar10_classification_zero_shot/train-*
- split: validation
path: cifar10_classification_zero_shot/validation-*
- config_name: code_line_description_zero_shot
data_files:
- split: default
path: code_line_description_zero_shot/default-*
- split: train
path: code_line_description_zero_shot/train-*
- split: validation
path: code_line_description_zero_shot/validation-*
- config_name: codenames_zero_shot
data_files:
- split: default
path: codenames_zero_shot/default-*
- split: train
path: codenames_zero_shot/train-*
- split: validation
path: codenames_zero_shot/validation-*
- config_name: color_zero_shot
data_files:
- split: default
path: color_zero_shot/default-*
- split: train
path: color_zero_shot/train-*
- split: validation
path: color_zero_shot/validation-*
- config_name: common_morpheme_zero_shot
data_files:
- split: default
path: common_morpheme_zero_shot/default-*
- split: train
path: common_morpheme_zero_shot/train-*
- split: validation
path: common_morpheme_zero_shot/validation-*
- config_name: conceptual_combinations_zero_shot
data_files:
- split: default
path: conceptual_combinations_zero_shot/default-*
- split: train
path: conceptual_combinations_zero_shot/train-*
- split: validation
path: conceptual_combinations_zero_shot/validation-*
- config_name: conlang_translation_zero_shot
data_files:
- split: default
path: conlang_translation_zero_shot/default-*
- split: train
path: conlang_translation_zero_shot/train-*
- split: validation
path: conlang_translation_zero_shot/validation-*
- config_name: contextual_parametric_knowledge_conflicts_zero_shot
data_files:
- split: default
path: contextual_parametric_knowledge_conflicts_zero_shot/default-*
- split: train
path: contextual_parametric_knowledge_conflicts_zero_shot/train-*
- split: validation
path: contextual_parametric_knowledge_conflicts_zero_shot/validation-*
- config_name: crash_blossom_zero_shot
data_files:
- split: default
path: crash_blossom_zero_shot/default-*
- split: train
path: crash_blossom_zero_shot/train-*
- split: validation
path: crash_blossom_zero_shot/validation-*
- config_name: crass_ai_zero_shot
data_files:
- split: default
path: crass_ai_zero_shot/default-*
- split: train
path: crass_ai_zero_shot/train-*
- split: validation
path: crass_ai_zero_shot/validation-*
- config_name: cryobiology_spanish_zero_shot
data_files:
- split: default
path: cryobiology_spanish_zero_shot/default-*
- split: train
path: cryobiology_spanish_zero_shot/train-*
- split: validation
path: cryobiology_spanish_zero_shot/validation-*
- config_name: cryptonite_zero_shot
data_files:
- split: default
path: cryptonite_zero_shot/default-*
- split: train
path: cryptonite_zero_shot/train-*
- split: validation
path: cryptonite_zero_shot/validation-*
- config_name: cs_algorithms_zero_shot
data_files:
- split: default
path: cs_algorithms_zero_shot/default-*
- split: train
path: cs_algorithms_zero_shot/train-*
- split: validation
path: cs_algorithms_zero_shot/validation-*
- config_name: dark_humor_detection_zero_shot
data_files:
- split: default
path: dark_humor_detection_zero_shot/default-*
- split: train
path: dark_humor_detection_zero_shot/train-*
- split: validation
path: dark_humor_detection_zero_shot/validation-*
- config_name: date_understanding_zero_shot
data_files:
- split: default
path: date_understanding_zero_shot/default-*
- split: train
path: date_understanding_zero_shot/train-*
- split: validation
path: date_understanding_zero_shot/validation-*
- config_name: disambiguation_qa_zero_shot
data_files:
- split: default
path: disambiguation_qa_zero_shot/default-*
- split: train
path: disambiguation_qa_zero_shot/train-*
- split: validation
path: disambiguation_qa_zero_shot/validation-*
- config_name: discourse_marker_prediction_zero_shot
data_files:
- split: default
path: discourse_marker_prediction_zero_shot/default-*
- split: train
path: discourse_marker_prediction_zero_shot/train-*
- split: validation
path: discourse_marker_prediction_zero_shot/validation-*
- config_name: disfl_qa_zero_shot
data_files:
- split: default
path: disfl_qa_zero_shot/default-*
- split: train
path: disfl_qa_zero_shot/train-*
- split: validation
path: disfl_qa_zero_shot/validation-*
- config_name: dyck_languages_zero_shot
data_files:
- split: default
path: dyck_languages_zero_shot/default-*
- split: train
path: dyck_languages_zero_shot/train-*
- split: validation
path: dyck_languages_zero_shot/validation-*
- config_name: elementary_math_qa_zero_shot
data_files:
- split: default
path: elementary_math_qa_zero_shot/default-*
- split: train
path: elementary_math_qa_zero_shot/train-*
- split: validation
path: elementary_math_qa_zero_shot/validation-*
- config_name: emoji_movie_zero_shot
data_files:
- split: default
path: emoji_movie_zero_shot/default-*
- split: train
path: emoji_movie_zero_shot/train-*
- split: validation
path: emoji_movie_zero_shot/validation-*
- config_name: emojis_emotion_prediction_zero_shot
data_files:
- split: default
path: emojis_emotion_prediction_zero_shot/default-*
- split: train
path: emojis_emotion_prediction_zero_shot/train-*
- split: validation
path: emojis_emotion_prediction_zero_shot/validation-*
- config_name: empirical_judgments_zero_shot
data_files:
- split: default
path: empirical_judgments_zero_shot/default-*
- split: train
path: empirical_judgments_zero_shot/train-*
- split: validation
path: empirical_judgments_zero_shot/validation-*
- config_name: english_proverbs_zero_shot
data_files:
- split: default
path: english_proverbs_zero_shot/default-*
- split: train
path: english_proverbs_zero_shot/train-*
- split: validation
path: english_proverbs_zero_shot/validation-*
- config_name: english_russian_proverbs_zero_shot
data_files:
- split: default
path: english_russian_proverbs_zero_shot/default-*
- split: train
path: english_russian_proverbs_zero_shot/train-*
- split: validation
path: english_russian_proverbs_zero_shot/validation-*
- config_name: entailed_polarity_hindi_zero_shot
data_files:
- split: default
path: entailed_polarity_hindi_zero_shot/default-*
- split: train
path: entailed_polarity_hindi_zero_shot/train-*
- split: validation
path: entailed_polarity_hindi_zero_shot/validation-*
- config_name: entailed_polarity_zero_shot
data_files:
- split: default
path: entailed_polarity_zero_shot/default-*
- split: train
path: entailed_polarity_zero_shot/train-*
- split: validation
path: entailed_polarity_zero_shot/validation-*
- config_name: epistemic_reasoning_zero_shot
data_files:
- split: default
path: epistemic_reasoning_zero_shot/default-*
- split: train
path: epistemic_reasoning_zero_shot/train-*
- split: validation
path: epistemic_reasoning_zero_shot/validation-*
- config_name: evaluating_information_essentiality_zero_shot
data_files:
- split: default
path: evaluating_information_essentiality_zero_shot/default-*
- split: train
path: evaluating_information_essentiality_zero_shot/train-*
- split: validation
path: evaluating_information_essentiality_zero_shot/validation-*
- config_name: fact_checker_zero_shot
data_files:
- split: default
path: fact_checker_zero_shot/default-*
- split: train
path: fact_checker_zero_shot/train-*
- split: validation
path: fact_checker_zero_shot/validation-*
- config_name: fantasy_reasoning_zero_shot
data_files:
- split: default
path: fantasy_reasoning_zero_shot/default-*
- split: train
path: fantasy_reasoning_zero_shot/train-*
- split: validation
path: fantasy_reasoning_zero_shot/validation-*
- config_name: few_shot_nlg_zero_shot
data_files:
- split: default
path: few_shot_nlg_zero_shot/default-*
- split: train
path: few_shot_nlg_zero_shot/train-*
- split: validation
path: few_shot_nlg_zero_shot/validation-*
- config_name: figure_of_speech_detection_zero_shot
data_files:
- split: default
path: figure_of_speech_detection_zero_shot/default-*
- split: train
path: figure_of_speech_detection_zero_shot/train-*
- split: validation
path: figure_of_speech_detection_zero_shot/validation-*
- config_name: formal_fallacies_syllogisms_negation_zero_shot
data_files:
- split: default
path: formal_fallacies_syllogisms_negation_zero_shot/default-*
- split: train
path: formal_fallacies_syllogisms_negation_zero_shot/train-*
- split: validation
path: formal_fallacies_syllogisms_negation_zero_shot/validation-*
- config_name: gem_zero_shot
data_files:
- split: default
path: gem_zero_shot/default-*
- split: train
path: gem_zero_shot/train-*
- split: validation
path: gem_zero_shot/validation-*
- config_name: gender_inclusive_sentences_german_zero_shot
data_files:
- split: default
path: gender_inclusive_sentences_german_zero_shot/default-*
- split: train
path: gender_inclusive_sentences_german_zero_shot/train-*
- split: validation
path: gender_inclusive_sentences_german_zero_shot/validation-*
- config_name: general_knowledge_zero_shot
data_files:
- split: default
path: general_knowledge_zero_shot/default-*
- split: train
path: general_knowledge_zero_shot/train-*
- split: validation
path: general_knowledge_zero_shot/validation-*
- config_name: geometric_shapes_zero_shot
data_files:
- split: default
path: geometric_shapes_zero_shot/default-*
- split: train
path: geometric_shapes_zero_shot/train-*
- split: validation
path: geometric_shapes_zero_shot/validation-*
- config_name: goal_step_wikihow_zero_shot
data_files:
- split: default
path: goal_step_wikihow_zero_shot/default-*
- split: train
path: goal_step_wikihow_zero_shot/train-*
- split: validation
path: goal_step_wikihow_zero_shot/validation-*
- config_name: gre_reading_comprehension_zero_shot
data_files:
- split: default
path: gre_reading_comprehension_zero_shot/default-*
- split: train
path: gre_reading_comprehension_zero_shot/train-*
- split: validation
path: gre_reading_comprehension_zero_shot/validation-*
- config_name: hhh_alignment_zero_shot
data_files:
- split: default
path: hhh_alignment_zero_shot/default-*
- split: train
path: hhh_alignment_zero_shot/train-*
- split: validation
path: hhh_alignment_zero_shot/validation-*
- config_name: hindi_question_answering_zero_shot
data_files:
- split: default
path: hindi_question_answering_zero_shot/default-*
- split: train
path: hindi_question_answering_zero_shot/train-*
- split: validation
path: hindi_question_answering_zero_shot/validation-*
- config_name: hindu_knowledge_zero_shot
data_files:
- split: default
path: hindu_knowledge_zero_shot/default-*
- split: train
path: hindu_knowledge_zero_shot/train-*
- split: validation
path: hindu_knowledge_zero_shot/validation-*
- config_name: hinglish_toxicity_zero_shot
data_files:
- split: default
path: hinglish_toxicity_zero_shot/default-*
- split: train
path: hinglish_toxicity_zero_shot/train-*
- split: validation
path: hinglish_toxicity_zero_shot/validation-*
- config_name: human_organs_senses_zero_shot
data_files:
- split: default
path: human_organs_senses_zero_shot/default-*
- split: train
path: human_organs_senses_zero_shot/train-*
- split: validation
path: human_organs_senses_zero_shot/validation-*
- config_name: hyperbaton_zero_shot
data_files:
- split: default
path: hyperbaton_zero_shot/default-*
- split: train
path: hyperbaton_zero_shot/train-*
- split: validation
path: hyperbaton_zero_shot/validation-*
- config_name: identify_math_theorems_zero_shot
data_files:
- split: default
path: identify_math_theorems_zero_shot/default-*
- split: train
path: identify_math_theorems_zero_shot/train-*
- split: validation
path: identify_math_theorems_zero_shot/validation-*
- config_name: identify_odd_metaphor_zero_shot
data_files:
- split: default
path: identify_odd_metaphor_zero_shot/default-*
- split: train
path: identify_odd_metaphor_zero_shot/train-*
- split: validation
path: identify_odd_metaphor_zero_shot/validation-*
- config_name: implicatures_zero_shot
data_files:
- split: default
path: implicatures_zero_shot/default-*
- split: train
path: implicatures_zero_shot/train-*
- split: validation
path: implicatures_zero_shot/validation-*
- config_name: implicit_relations_zero_shot
data_files:
- split: default
path: implicit_relations_zero_shot/default-*
- split: train
path: implicit_relations_zero_shot/train-*
- split: validation
path: implicit_relations_zero_shot/validation-*
- config_name: intent_recognition_zero_shot
data_files:
- split: default
path: intent_recognition_zero_shot/default-*
- split: train
path: intent_recognition_zero_shot/train-*
- split: validation
path: intent_recognition_zero_shot/validation-*
- config_name: international_phonetic_alphabet_nli_zero_shot
data_files:
- split: default
path: international_phonetic_alphabet_nli_zero_shot/default-*
- split: train
path: international_phonetic_alphabet_nli_zero_shot/train-*
- split: validation
path: international_phonetic_alphabet_nli_zero_shot/validation-*
- config_name: international_phonetic_alphabet_transliterate_zero_shot
data_files:
- split: default
path: international_phonetic_alphabet_transliterate_zero_shot/default-*
- split: train
path: international_phonetic_alphabet_transliterate_zero_shot/train-*
- split: validation
path: international_phonetic_alphabet_transliterate_zero_shot/validation-*
- config_name: intersect_geometry_zero_shot
data_files:
- split: default
path: intersect_geometry_zero_shot/default-*
- split: train
path: intersect_geometry_zero_shot/train-*
- split: validation
path: intersect_geometry_zero_shot/validation-*
- config_name: irony_identification_zero_shot
data_files:
- split: default
path: irony_identification_zero_shot/default-*
- split: train
path: irony_identification_zero_shot/train-*
- split: validation
path: irony_identification_zero_shot/validation-*
- config_name: kanji_ascii_zero_shot
data_files:
- split: default
path: kanji_ascii_zero_shot/default-*
- split: train
path: kanji_ascii_zero_shot/train-*
- split: validation
path: kanji_ascii_zero_shot/validation-*
- config_name: kannada_zero_shot
data_files:
- split: default
path: kannada_zero_shot/default-*
- split: train
path: kannada_zero_shot/train-*
- split: validation
path: kannada_zero_shot/validation-*
- config_name: key_value_maps_zero_shot
data_files:
- split: default
path: key_value_maps_zero_shot/default-*
- split: train
path: key_value_maps_zero_shot/train-*
- split: validation
path: key_value_maps_zero_shot/validation-*
- config_name: known_unknowns_zero_shot
data_files:
- split: default
path: known_unknowns_zero_shot/default-*
- split: train
path: known_unknowns_zero_shot/train-*
- split: validation
path: known_unknowns_zero_shot/validation-*
- config_name: language_games_zero_shot
data_files:
- split: default
path: language_games_zero_shot/default-*
- split: train
path: language_games_zero_shot/train-*
- split: validation
path: language_games_zero_shot/validation-*
- config_name: language_identification_zero_shot
data_files:
- split: default
path: language_identification_zero_shot/default-*
- split: train
path: language_identification_zero_shot/train-*
- split: validation
path: language_identification_zero_shot/validation-*
- config_name: linguistic_mappings_zero_shot
data_files:
- split: default
path: linguistic_mappings_zero_shot/default-*
- split: train
path: linguistic_mappings_zero_shot/train-*
- split: validation
path: linguistic_mappings_zero_shot/validation-*
- config_name: linguistics_puzzles_zero_shot
data_files:
- split: default
path: linguistics_puzzles_zero_shot/default-*
- split: train
path: linguistics_puzzles_zero_shot/train-*
- split: validation
path: linguistics_puzzles_zero_shot/validation-*
- config_name: list_functions_zero_shot
data_files:
- split: default
path: list_functions_zero_shot/default-*
- split: train
path: list_functions_zero_shot/train-*
- split: validation
path: list_functions_zero_shot/validation-*
- config_name: logic_grid_puzzle_zero_shot
data_files:
- split: default
path: logic_grid_puzzle_zero_shot/default-*
- split: train
path: logic_grid_puzzle_zero_shot/train-*
- split: validation
path: logic_grid_puzzle_zero_shot/validation-*
- config_name: logical_args_zero_shot
data_files:
- split: default
path: logical_args_zero_shot/default-*
- split: train
path: logical_args_zero_shot/train-*
- split: validation
path: logical_args_zero_shot/validation-*
- config_name: logical_deduction_zero_shot
data_files:
- split: default
path: logical_deduction_zero_shot/default-*
- split: train
path: logical_deduction_zero_shot/train-*
- split: validation
path: logical_deduction_zero_shot/validation-*
- config_name: logical_fallacy_detection_zero_shot
data_files:
- split: default
path: logical_fallacy_detection_zero_shot/default-*
- split: train
path: logical_fallacy_detection_zero_shot/train-*
- split: validation
path: logical_fallacy_detection_zero_shot/validation-*
- config_name: logical_sequence_zero_shot
data_files:
- split: default
path: logical_sequence_zero_shot/default-*
- split: train
path: logical_sequence_zero_shot/train-*
- split: validation
path: logical_sequence_zero_shot/validation-*
- config_name: mathematical_induction_zero_shot
data_files:
- split: default
path: mathematical_induction_zero_shot/default-*
- split: train
path: mathematical_induction_zero_shot/train-*
- split: validation
path: mathematical_induction_zero_shot/validation-*
- config_name: matrixshapes_zero_shot
data_files:
- split: default
path: matrixshapes_zero_shot/default-*
- split: train
path: matrixshapes_zero_shot/train-*
- split: validation
path: matrixshapes_zero_shot/validation-*
- config_name: metaphor_boolean_zero_shot
data_files:
- split: default
path: metaphor_boolean_zero_shot/default-*
- split: train
path: metaphor_boolean_zero_shot/train-*
- split: validation
path: metaphor_boolean_zero_shot/validation-*
- config_name: metaphor_understanding_zero_shot
data_files:
- split: default
path: metaphor_understanding_zero_shot/default-*
- split: train
path: metaphor_understanding_zero_shot/train-*
- split: validation
path: metaphor_understanding_zero_shot/validation-*
- config_name: minute_mysteries_qa_zero_shot
data_files:
- split: default
path: minute_mysteries_qa_zero_shot/default-*
- split: train
path: minute_mysteries_qa_zero_shot/train-*
- split: validation
path: minute_mysteries_qa_zero_shot/validation-*
- config_name: misconceptions_russian_zero_shot
data_files:
- split: default
path: misconceptions_russian_zero_shot/default-*
- split: train
path: misconceptions_russian_zero_shot/train-*
- split: validation
path: misconceptions_russian_zero_shot/validation-*
- config_name: misconceptions_zero_shot
data_files:
- split: default
path: misconceptions_zero_shot/default-*
- split: train
path: misconceptions_zero_shot/train-*
- split: validation
path: misconceptions_zero_shot/validation-*
- config_name: mnist_ascii_zero_shot
data_files:
- split: default
path: mnist_ascii_zero_shot/default-*
- split: train
path: mnist_ascii_zero_shot/train-*
- split: validation
path: mnist_ascii_zero_shot/validation-*
- config_name: modified_arithmetic_zero_shot
data_files:
- split: default
path: modified_arithmetic_zero_shot/default-*
- split: train
path: modified_arithmetic_zero_shot/train-*
- split: validation
path: modified_arithmetic_zero_shot/validation-*
- config_name: moral_permissibility_zero_shot
data_files:
- split: default
path: moral_permissibility_zero_shot/default-*
- split: train
path: moral_permissibility_zero_shot/train-*
- split: validation
path: moral_permissibility_zero_shot/validation-*
- config_name: movie_dialog_same_or_different_zero_shot
data_files:
- split: default
path: movie_dialog_same_or_different_zero_shot/default-*
- split: train
path: movie_dialog_same_or_different_zero_shot/train-*
- split: validation
path: movie_dialog_same_or_different_zero_shot/validation-*
- config_name: movie_recommendation_zero_shot
data_files:
- split: default
path: movie_recommendation_zero_shot/default-*
- split: train
path: movie_recommendation_zero_shot/train-*
- split: validation
path: movie_recommendation_zero_shot/validation-*
- config_name: mult_data_wrangling_zero_shot
data_files:
- split: default
path: mult_data_wrangling_zero_shot/default-*
- split: train
path: mult_data_wrangling_zero_shot/train-*
- split: validation
path: mult_data_wrangling_zero_shot/validation-*
- config_name: multiemo_zero_shot
data_files:
- split: default
path: multiemo_zero_shot/default-*
- split: train
path: multiemo_zero_shot/train-*
- split: validation
path: multiemo_zero_shot/validation-*
- config_name: natural_instructions_zero_shot
data_files:
- split: default
path: natural_instructions_zero_shot/default-*
- split: train
path: natural_instructions_zero_shot/train-*
- split: validation
path: natural_instructions_zero_shot/validation-*
- config_name: navigate_zero_shot
data_files:
- split: default
path: navigate_zero_shot/default-*
- split: train
path: navigate_zero_shot/train-*
- split: validation
path: navigate_zero_shot/validation-*
- config_name: nonsense_words_grammar_zero_shot
data_files:
- split: default
path: nonsense_words_grammar_zero_shot/default-*
- split: train
path: nonsense_words_grammar_zero_shot/train-*
- split: validation
path: nonsense_words_grammar_zero_shot/validation-*
- config_name: novel_concepts_zero_shot
data_files:
- split: default
path: novel_concepts_zero_shot/default-*
- split: train
path: novel_concepts_zero_shot/train-*
- split: validation
path: novel_concepts_zero_shot/validation-*
- config_name: object_counting_zero_shot
data_files:
- split: default
path: object_counting_zero_shot/default-*
- split: train
path: object_counting_zero_shot/train-*
- split: validation
path: object_counting_zero_shot/validation-*
- config_name: odd_one_out_zero_shot
data_files:
- split: default
path: odd_one_out_zero_shot/default-*
- split: train
path: odd_one_out_zero_shot/train-*
- split: validation
path: odd_one_out_zero_shot/validation-*
- config_name: operators_zero_shot
data_files:
- split: default
path: operators_zero_shot/default-*
- split: train
path: operators_zero_shot/train-*
- split: validation
path: operators_zero_shot/validation-*
- config_name: paragraph_segmentation_zero_shot
data_files:
- split: default
path: paragraph_segmentation_zero_shot/default-*
- split: train
path: paragraph_segmentation_zero_shot/train-*
- split: validation
path: paragraph_segmentation_zero_shot/validation-*
- config_name: parsinlu_qa_zero_shot
data_files:
- split: default
path: parsinlu_qa_zero_shot/default-*
- split: train
path: parsinlu_qa_zero_shot/train-*
- split: validation
path: parsinlu_qa_zero_shot/validation-*
- config_name: parsinlu_reading_comprehension_zero_shot
data_files:
- split: default
path: parsinlu_reading_comprehension_zero_shot/default-*
- split: train
path: parsinlu_reading_comprehension_zero_shot/train-*
- split: validation
path: parsinlu_reading_comprehension_zero_shot/validation-*
- config_name: penguins_in_a_table_zero_shot
data_files:
- split: default
path: penguins_in_a_table_zero_shot/default-*
- split: train
path: penguins_in_a_table_zero_shot/train-*
- split: validation
path: penguins_in_a_table_zero_shot/validation-*
- config_name: periodic_elements_zero_shot
data_files:
- split: default
path: periodic_elements_zero_shot/default-*
- split: train
path: periodic_elements_zero_shot/train-*
- split: validation
path: periodic_elements_zero_shot/validation-*
- config_name: persian_idioms_zero_shot
data_files:
- split: default
path: persian_idioms_zero_shot/default-*
- split: train
path: persian_idioms_zero_shot/train-*
- split: validation
path: persian_idioms_zero_shot/validation-*
- config_name: phrase_relatedness_zero_shot
data_files:
- split: default
path: phrase_relatedness_zero_shot/default-*
- split: train
path: phrase_relatedness_zero_shot/train-*
- split: validation
path: phrase_relatedness_zero_shot/validation-*
- config_name: physical_intuition_zero_shot
data_files:
- split: default
path: physical_intuition_zero_shot/default-*
- split: train
path: physical_intuition_zero_shot/train-*
- split: validation
path: physical_intuition_zero_shot/validation-*
- config_name: physics_questions_zero_shot
data_files:
- split: default
path: physics_questions_zero_shot/default-*
- split: train
path: physics_questions_zero_shot/train-*
- split: validation
path: physics_questions_zero_shot/validation-*
- config_name: physics_zero_shot
data_files:
- split: default
path: physics_zero_shot/default-*
- split: train
path: physics_zero_shot/train-*
- split: validation
path: physics_zero_shot/validation-*
- config_name: play_dialog_same_or_different_zero_shot
data_files:
- split: default
path: play_dialog_same_or_different_zero_shot/default-*
- split: train
path: play_dialog_same_or_different_zero_shot/train-*
- split: validation
path: play_dialog_same_or_different_zero_shot/validation-*
- config_name: polish_sequence_labeling_zero_shot
data_files:
- split: default
path: polish_sequence_labeling_zero_shot/default-*
- split: train
path: polish_sequence_labeling_zero_shot/train-*
- split: validation
path: polish_sequence_labeling_zero_shot/validation-*
- config_name: presuppositions_as_nli_zero_shot
data_files:
- split: default
path: presuppositions_as_nli_zero_shot/default-*
- split: train
path: presuppositions_as_nli_zero_shot/train-*
- split: validation
path: presuppositions_as_nli_zero_shot/validation-*
- config_name: qa_wikidata_zero_shot
data_files:
- split: default
path: qa_wikidata_zero_shot/default-*
- split: train
path: qa_wikidata_zero_shot/train-*
- split: validation
path: qa_wikidata_zero_shot/validation-*
- config_name: question_selection_zero_shot
data_files:
- split: default
path: question_selection_zero_shot/default-*
- split: train
path: question_selection_zero_shot/train-*
- split: validation
path: question_selection_zero_shot/validation-*
- config_name: real_or_fake_text_zero_shot
data_files:
- split: default
path: real_or_fake_text_zero_shot/default-*
- split: train
path: real_or_fake_text_zero_shot/train-*
- split: validation
path: real_or_fake_text_zero_shot/validation-*
- config_name: reasoning_about_colored_objects_zero_shot
data_files:
- split: default
path: reasoning_about_colored_objects_zero_shot/default-*
- split: train
path: reasoning_about_colored_objects_zero_shot/train-*
- split: validation
path: reasoning_about_colored_objects_zero_shot/validation-*
- config_name: repeat_copy_logic_zero_shot
data_files:
- split: default
path: repeat_copy_logic_zero_shot/default-*
- split: train
path: repeat_copy_logic_zero_shot/train-*
- split: validation
path: repeat_copy_logic_zero_shot/validation-*
- config_name: rephrase_zero_shot
data_files:
- split: default
path: rephrase_zero_shot/default-*
- split: train
path: rephrase_zero_shot/train-*
- split: validation
path: rephrase_zero_shot/validation-*
- config_name: riddle_sense_zero_shot
data_files:
- split: default
path: riddle_sense_zero_shot/default-*
- split: train
path: riddle_sense_zero_shot/train-*
- split: validation
path: riddle_sense_zero_shot/validation-*
- config_name: ruin_names_zero_shot
data_files:
- split: default
path: ruin_names_zero_shot/default-*
- split: train
path: ruin_names_zero_shot/train-*
- split: validation
path: ruin_names_zero_shot/validation-*
- config_name: salient_translation_error_detection_zero_shot
data_files:
- split: default
path: salient_translation_error_detection_zero_shot/default-*
- split: train
path: salient_translation_error_detection_zero_shot/train-*
- split: validation
path: salient_translation_error_detection_zero_shot/validation-*
- config_name: scientific_press_release_zero_shot
data_files:
- split: default
path: scientific_press_release_zero_shot/default-*
- split: train
path: scientific_press_release_zero_shot/train-*
- split: validation
path: scientific_press_release_zero_shot/validation-*
- config_name: semantic_parsing_in_context_sparc_zero_shot
data_files:
- split: default
path: semantic_parsing_in_context_sparc_zero_shot/default-*
- split: train
path: semantic_parsing_in_context_sparc_zero_shot/train-*
- split: validation
path: semantic_parsing_in_context_sparc_zero_shot/validation-*
- config_name: semantic_parsing_spider_zero_shot
data_files:
- split: default
path: semantic_parsing_spider_zero_shot/default-*
- split: train
path: semantic_parsing_spider_zero_shot/train-*
- split: validation
path: semantic_parsing_spider_zero_shot/validation-*
- config_name: sentence_ambiguity_zero_shot
data_files:
- split: default
path: sentence_ambiguity_zero_shot/default-*
- split: train
path: sentence_ambiguity_zero_shot/train-*
- split: validation
path: sentence_ambiguity_zero_shot/validation-*
- config_name: similarities_abstraction_zero_shot
data_files:
- split: default
path: similarities_abstraction_zero_shot/default-*
- split: train
path: similarities_abstraction_zero_shot/train-*
- split: validation
path: similarities_abstraction_zero_shot/validation-*
- config_name: simp_turing_concept_zero_shot
data_files:
- split: default
path: simp_turing_concept_zero_shot/default-*
- split: train
path: simp_turing_concept_zero_shot/train-*
- split: validation
path: simp_turing_concept_zero_shot/validation-*
- config_name: simple_arithmetic_json_multiple_choice_zero_shot
data_files:
- split: default
path: simple_arithmetic_json_multiple_choice_zero_shot/default-*
- split: train
path: simple_arithmetic_json_multiple_choice_zero_shot/train-*
- split: validation
path: simple_arithmetic_json_multiple_choice_zero_shot/validation-*
- config_name: simple_arithmetic_json_subtasks_zero_shot
data_files:
- split: default
path: simple_arithmetic_json_subtasks_zero_shot/default-*
- split: train
path: simple_arithmetic_json_subtasks_zero_shot/train-*
- split: validation
path: simple_arithmetic_json_subtasks_zero_shot/validation-*
- config_name: simple_arithmetic_json_zero_shot
data_files:
- split: default
path: simple_arithmetic_json_zero_shot/default-*
- split: train
path: simple_arithmetic_json_zero_shot/train-*
- split: validation
path: simple_arithmetic_json_zero_shot/validation-*
- config_name: simple_arithmetic_multiple_targets_json_zero_shot
data_files:
- split: default
path: simple_arithmetic_multiple_targets_json_zero_shot/default-*
- split: train
path: simple_arithmetic_multiple_targets_json_zero_shot/train-*
- split: validation
path: simple_arithmetic_multiple_targets_json_zero_shot/validation-*
- config_name: simple_ethical_questions_zero_shot
data_files:
- split: default
path: simple_ethical_questions_zero_shot/default-*
- split: train
path: simple_ethical_questions_zero_shot/train-*
- split: validation
path: simple_ethical_questions_zero_shot/validation-*
- config_name: simple_text_editing_zero_shot
data_files:
- split: default
path: simple_text_editing_zero_shot/default-*
- split: train
path: simple_text_editing_zero_shot/train-*
- split: validation
path: simple_text_editing_zero_shot/validation-*
- config_name: snarks_zero_shot
data_files:
- split: default
path: snarks_zero_shot/default-*
- split: train
path: snarks_zero_shot/train-*
- split: validation
path: snarks_zero_shot/validation-*
- config_name: social_iqa_zero_shot
data_files:
- split: default
path: social_iqa_zero_shot/default-*
- split: train
path: social_iqa_zero_shot/train-*
- split: validation
path: social_iqa_zero_shot/validation-*
- config_name: social_support_zero_shot
data_files:
- split: default
path: social_support_zero_shot/default-*
- split: train
path: social_support_zero_shot/train-*
- split: validation
path: social_support_zero_shot/validation-*
- config_name: sports_understanding_zero_shot
data_files:
- split: default
path: sports_understanding_zero_shot/default-*
- split: train
path: sports_understanding_zero_shot/train-*
- split: validation
path: sports_understanding_zero_shot/validation-*
- config_name: strange_stories_zero_shot
data_files:
- split: default
path: strange_stories_zero_shot/default-*
- split: train
path: strange_stories_zero_shot/train-*
- split: validation
path: strange_stories_zero_shot/validation-*
- config_name: strategyqa_zero_shot
data_files:
- split: default
path: strategyqa_zero_shot/default-*
- split: train
path: strategyqa_zero_shot/train-*
- split: validation
path: strategyqa_zero_shot/validation-*
- config_name: sufficient_information_zero_shot
data_files:
- split: default
path: sufficient_information_zero_shot/default-*
- split: train
path: sufficient_information_zero_shot/train-*
- split: validation
path: sufficient_information_zero_shot/validation-*
- config_name: suicide_risk_zero_shot
data_files:
- split: default
path: suicide_risk_zero_shot/default-*
- split: train
path: suicide_risk_zero_shot/train-*
- split: validation
path: suicide_risk_zero_shot/validation-*
- config_name: swahili_english_proverbs_zero_shot
data_files:
- split: default
path: swahili_english_proverbs_zero_shot/default-*
- split: train
path: swahili_english_proverbs_zero_shot/train-*
- split: validation
path: swahili_english_proverbs_zero_shot/validation-*
- config_name: swedish_to_german_proverbs_zero_shot
data_files:
- split: default
path: swedish_to_german_proverbs_zero_shot/default-*
- split: train
path: swedish_to_german_proverbs_zero_shot/train-*
- split: validation
path: swedish_to_german_proverbs_zero_shot/validation-*
- config_name: symbol_interpretation_zero_shot
data_files:
- split: default
path: symbol_interpretation_zero_shot/default-*
- split: train
path: symbol_interpretation_zero_shot/train-*
- split: validation
path: symbol_interpretation_zero_shot/validation-*
- config_name: temporal_sequences_zero_shot
data_files:
- split: default
path: temporal_sequences_zero_shot/default-*
- split: train
path: temporal_sequences_zero_shot/train-*
- split: validation
path: temporal_sequences_zero_shot/validation-*
- config_name: tense_zero_shot
data_files:
- split: default
path: tense_zero_shot/default-*
- split: train
path: tense_zero_shot/train-*
- split: validation
path: tense_zero_shot/validation-*
- config_name: timedial_zero_shot
data_files:
- split: default
path: timedial_zero_shot/default-*
- split: train
path: timedial_zero_shot/train-*
- split: validation
path: timedial_zero_shot/validation-*
- config_name: topical_chat_zero_shot
data_files:
- split: default
path: topical_chat_zero_shot/default-*
- split: train
path: topical_chat_zero_shot/train-*
- split: validation
path: topical_chat_zero_shot/validation-*
- config_name: tracking_shuffled_objects_zero_shot
data_files:
- split: default
path: tracking_shuffled_objects_zero_shot/default-*
- split: train
path: tracking_shuffled_objects_zero_shot/train-*
- split: validation
path: tracking_shuffled_objects_zero_shot/validation-*
- config_name: understanding_fables_zero_shot
data_files:
- split: default
path: understanding_fables_zero_shot/default-*
- split: train
path: understanding_fables_zero_shot/train-*
- split: validation
path: understanding_fables_zero_shot/validation-*
- config_name: undo_permutation_zero_shot
data_files:
- split: default
path: undo_permutation_zero_shot/default-*
- split: train
path: undo_permutation_zero_shot/train-*
- split: validation
path: undo_permutation_zero_shot/validation-*
- config_name: unit_conversion_zero_shot
data_files:
- split: default
path: unit_conversion_zero_shot/default-*
- split: train
path: unit_conversion_zero_shot/train-*
- split: validation
path: unit_conversion_zero_shot/validation-*
- config_name: unit_interpretation_zero_shot
data_files:
- split: default
path: unit_interpretation_zero_shot/default-*
- split: train
path: unit_interpretation_zero_shot/train-*
- split: validation
path: unit_interpretation_zero_shot/validation-*
- config_name: unnatural_in_context_learning_zero_shot
data_files:
- split: default
path: unnatural_in_context_learning_zero_shot/default-*
- split: train
path: unnatural_in_context_learning_zero_shot/train-*
- split: validation
path: unnatural_in_context_learning_zero_shot/validation-*
- config_name: vitaminc_fact_verification_zero_shot
data_files:
- split: default
path: vitaminc_fact_verification_zero_shot/default-*
- split: train
path: vitaminc_fact_verification_zero_shot/train-*
- split: validation
path: vitaminc_fact_verification_zero_shot/validation-*
- config_name: what_is_the_tao_zero_shot
data_files:
- split: default
path: what_is_the_tao_zero_shot/default-*
- split: train
path: what_is_the_tao_zero_shot/train-*
- split: validation
path: what_is_the_tao_zero_shot/validation-*
- config_name: which_wiki_edit_zero_shot
data_files:
- split: default
path: which_wiki_edit_zero_shot/default-*
- split: train
path: which_wiki_edit_zero_shot/train-*
- split: validation
path: which_wiki_edit_zero_shot/validation-*
- config_name: winowhy_zero_shot
data_files:
- split: default
path: winowhy_zero_shot/default-*
- split: train
path: winowhy_zero_shot/train-*
- split: validation
path: winowhy_zero_shot/validation-*
- config_name: word_sorting_zero_shot
data_files:
- split: default
path: word_sorting_zero_shot/default-*
- split: train
path: word_sorting_zero_shot/train-*
- split: validation
path: word_sorting_zero_shot/validation-*
- config_name: word_unscrambling_zero_shot
data_files:
- split: default
path: word_unscrambling_zero_shot/default-*
- split: train
path: word_unscrambling_zero_shot/train-*
- split: validation
path: word_unscrambling_zero_shot/validation-*
---
# Dataset Card for "bigbench"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
open-llm-leaderboard/contents | open-llm-leaderboard | "2025-03-20T12:17:27Z" | 18,423 | 15 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-06-26T08:33:17Z" | ---
dataset_info:
features:
- name: eval_name
dtype: string
- name: Precision
dtype: string
- name: Type
dtype: string
- name: T
dtype: string
- name: Weight type
dtype: string
- name: Architecture
dtype: string
- name: Model
dtype: string
- name: fullname
dtype: string
- name: Model sha
dtype: string
- name: Average ⬆️
dtype: float64
- name: Hub License
dtype: string
- name: Hub ❤️
dtype: int64
- name: '#Params (B)'
dtype: float64
- name: Available on the hub
dtype: bool
- name: MoE
dtype: bool
- name: Flagged
dtype: bool
- name: Chat Template
dtype: bool
- name: CO₂ cost (kg)
dtype: float64
- name: IFEval Raw
dtype: float64
- name: IFEval
dtype: float64
- name: BBH Raw
dtype: float64
- name: BBH
dtype: float64
- name: MATH Lvl 5 Raw
dtype: float64
- name: MATH Lvl 5
dtype: float64
- name: GPQA Raw
dtype: float64
- name: GPQA
dtype: float64
- name: MUSR Raw
dtype: float64
- name: MUSR
dtype: float64
- name: MMLU-PRO Raw
dtype: float64
- name: MMLU-PRO
dtype: float64
- name: Merged
dtype: bool
- name: Official Providers
dtype: bool
- name: Upload To Hub Date
dtype: string
- name: Submission Date
dtype: string
- name: Generation
dtype: int64
- name: Base Model
dtype: string
splits:
- name: train
num_bytes: 4004719
num_examples: 4576
download_size: 1109997
dataset_size: 4004719
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
textmachinelab/quail | textmachinelab | "2024-01-04T16:18:32Z" | 18,305 | 7 | [
"task_categories:multiple-choice",
"task_ids:multiple-choice-qa",
"annotations_creators:crowdsourced",
"language_creators:found",
"multilinguality:monolingual",
"source_datasets:original",
"language:en",
"license:cc-by-nc-sa-4.0",
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [
"multiple-choice"
] | "2022-03-02T23:29:22Z" | ---
annotations_creators:
- crowdsourced
language_creators:
- found
language:
- en
license:
- cc-by-nc-sa-4.0
multilinguality:
- monolingual
size_categories:
- 10K<n<100K
source_datasets:
- original
task_categories:
- multiple-choice
task_ids:
- multiple-choice-qa
paperswithcode_id: quail
pretty_name: Question Answering for Artificial Intelligence (QuAIL)
dataset_info:
config_name: quail
features:
- name: id
dtype: string
- name: context_id
dtype: string
- name: question_id
dtype: string
- name: domain
dtype: string
- name: metadata
struct:
- name: author
dtype: string
- name: title
dtype: string
- name: url
dtype: string
- name: context
dtype: string
- name: question
dtype: string
- name: question_type
dtype: string
- name: answers
sequence: string
- name: correct_answer_id
dtype: int32
splits:
- name: train
num_bytes: 23432601
num_examples: 10246
- name: validation
num_bytes: 4989531
num_examples: 2164
- name: challenge
num_bytes: 1199792
num_examples: 556
download_size: 2286403
dataset_size: 29621924
configs:
- config_name: quail
data_files:
- split: train
path: quail/train-*
- split: validation
path: quail/validation-*
- split: challenge
path: quail/challenge-*
default: true
---
# Dataset Card for "quail"
## Table of Contents
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** [https://text-machine-lab.github.io/blog/2020/quail/](https://text-machine-lab.github.io/blog/2020/quail/)
- **Repository:** https://github.com/text-machine-lab/quail
- **Paper:** [Getting Closer to AI Complete Question Answering: A Set of Prerequisite Real Tasks](https://doi.org/10.1609/aaai.v34i05.6398 )
- **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
- **Size of downloaded dataset files:** 6.41 MB
- **Size of the generated dataset:** 29.62 MB
- **Total amount of disk used:** 36.03 MB
### Dataset Summary
QuAIL is a reading comprehension dataset. QuAIL contains 15K multi-choice questions in texts 300-350 tokens long 4 domains (news, user stories, fiction, blogs).QuAIL is balanced and annotated for question types.
### Supported Tasks and Leaderboards
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Languages
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
## Dataset Structure
### Data Instances
#### quail
- **Size of downloaded dataset files:** 6.41 MB
- **Size of the generated dataset:** 29.62 MB
- **Total amount of disk used:** 36.03 MB
An example of 'train' looks as follows.
```
This example was too long and was cropped:
{
"answers": ["the cousin is not friendly", "the cousin could have been pretier", "not enough information", "the cousin was too nice"],
"context": "\"That fall came and I went back to Michigan and the school year went by and summer came and I never really thought about it. I'm...",
"context_id": "f001",
"correct_answer_id": 0,
"domain": "fiction",
"id": "f001_19",
"metadata": {
"author": "Joseph Devon",
"title": "Black Eyed Susan",
"url": "http://manybooks.net/pages/devonjother08black_eyed_susan/0.html"
},
"question": "After the events in the text what does the author think about the cousin?",
"question_id": "19",
"question_type": "Subsequent_state"
}
```
### Data Fields
The data fields are the same among all splits.
#### quail
- `id`: a `string` feature.
- `context_id`: a `string` feature.
- `question_id`: a `string` feature.
- `domain`: a `string` feature.
- `author`: a `string` feature.
- `title`: a `string` feature.
- `url`: a `string` feature.
- `context`: a `string` feature.
- `question`: a `string` feature.
- `question_type`: a `string` feature.
- `answers`: a `list` of `string` features.
- `correct_answer_id`: a `int32` feature.
### Data Splits
|name |train|challenge|validation|
|-----|----:|--------:|---------:|
|quail|10246| 556| 2164|
## Dataset Creation
### Curation Rationale
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
#### Who are the source language producers?
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Annotations
#### Annotation process
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
#### Who are the annotators?
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Personal and Sensitive Information
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Discussion of Biases
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Other Known Limitations
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
## Additional Information
### Dataset Curators
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Licensing Information
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Citation Information
```
@inproceedings{DBLP:conf/aaai/RogersKDR20,
author = {Anna Rogers and
Olga Kovaleva and
Matthew Downey and
Anna Rumshisky},
title = {Getting Closer to {AI} Complete Question Answering: {A} Set of Prerequisite
Real Tasks},
booktitle = {The Thirty-Fourth {AAAI} Conference on Artificial Intelligence, {AAAI}
2020, The Thirty-Second Innovative Applications of Artificial Intelligence
Conference, {IAAI} 2020, The Tenth {AAAI} Symposium on Educational
Advances in Artificial Intelligence, {EAAI} 2020, New York, NY, USA,
February 7-12, 2020},
pages = {8722--8731},
publisher = {{AAAI} Press},
year = {2020},
url = {https://aaai.org/ojs/index.php/AAAI/article/view/6398},
timestamp = {Thu, 04 Jun 2020 13:18:48 +0200},
biburl = {https://dblp.org/rec/conf/aaai/RogersKDR20.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
```
### Contributions
Thanks to [@sai-prasanna](https://github.com/sai-prasanna), [@ngdodd](https://github.com/ngdodd) for adding this dataset. |
avery00/MomentSeeker | avery00 | "2025-03-26T03:18:37Z" | 18,222 | 1 | [
"task_categories:question-answering",
"language:en",
"license:cc-by-nc-sa-4.0",
"size_categories:1K<n<10K",
"format:imagefolder",
"modality:image",
"modality:video",
"library:datasets",
"library:mlcroissant",
"arxiv:2502.12558",
"region:us"
] | [
"question-answering"
] | "2025-03-07T08:35:58Z" | ---
license: cc-by-nc-sa-4.0
extra_gated_prompt: >-
You acknowledge and understand that: This dataset is provided solely for
academic research purposes. It is not intended for commercial use or any other
non-research activities. All copyrights, trademarks, and other intellectual
property rights related to the videos in the dataset remain the exclusive
property of their respective owners.
You assume full responsibility for any additional use or dissemination of this dataset and for any consequences that may arise from such actions. You are also aware that the copyright holders of the original videos retain the right to request the removal of their videos from the dataset.
Furthermore, it is your responsibility to respect these conditions and to use the dataset ethically and in compliance with all applicable laws and regulations. Any violation of these terms may result in the immediate termination of your access to the dataset.
extra_gated_fields:
Institution: text
Name: text
Country: country
I want to use this dataset for:
type: select
options:
- Research
I agree to use this dataset solely for research purposes: checkbox
I will not use this dataset in any way that infringes upon the rights of the copyright holders of the original videos, and strictly prohibit its use for any commercial purposes: checkbox
task_categories:
- question-answering
language:
- en
---
<h1 align="center">MomentSeeker: A Comprehensive Benchmark and A Strong Baseline For Moment Retrieval Within Long Videos</h1>
<p align="center">
<a href="https://arxiv.org/abs/2502.12558">
<img alt="Build" src="http://img.shields.io/badge/cs.CV-arXiv%3A2406.04264-B31B1B.svg">
</a>
<a href="https://github.com/yhy-2000/MomentSeeker">
<img alt="Build" src="https://img.shields.io/badge/Github-MomentSeeker: A Comprehensive Benchmark and A Strong Baseline For Moment Retrieval Within Long Videos-blue">
</a>
</p>
This repo contains the annotation data for the paper "[MomentSeeker: A Comprehensive Benchmark and A Strong Baseline For Moment Retrieval Within Long Videos](https://arxiv.org/abs/2502.12558)".
## 🔔 News:
- 🥳 2025/03/07: We have released the MomentSeeker [Benchmark](https://huggingface.co/datasets/avery00/MomentSeeker) and [Paper](https://arxiv.org/abs/2502.12558)! 🔥
## License
Our dataset is under the CC-BY-NC-SA-4.0 license.
⚠️ If you need to access and use our dataset, you must understand and agree: **This dataset is for research purposes only and cannot be used for any commercial or other purposes. The user assumes all effects arising from any other use and dissemination.**
We do not own the copyright of any raw video files. Currently, we provide video access to researchers under the condition of acknowledging the above license. For the video data used, we respect and acknowledge any copyrights of the video authors. Therefore, for the movies, TV series, documentaries, and cartoons used in the dataset, we have reduced the resolution, clipped the length, adjusted dimensions, etc. of the original videos to minimize the impact on the rights of the original works.
If the original authors of the related works still believe that the videos should be removed, please contact [email protected] or directly raise an issue.
## Introduction
We present MomentSeeker, a comprehensive benchmark to evaluate retrieval models' performance in handling general long-video moment retrieval (LVMR) tasks. MomentSeeker offers three key advantages. First, it incorporates long videos of over 500 seconds on average, making it the first benchmark specialized for long-video moment retrieval. Second, it covers a wide range of task categories (including Moment Search, Caption Alignment, Image-conditioned Moment Search, and Video-conditioned Moment Search) and diverse application scenarios (e.g., sports, movies, cartoons, and ego), making it a comprehensive tool for assessing retrieval models' general LVMR performance. Additionally, the evaluation tasks are carefully curated through human annotation, ensuring the reliability of assessment. We further fine-tune an MLLM-based LVMR retriever on synthetic data, which demonstrates strong performance on our benchmark. The checkpoint will release soon.

## 🏆 Mini Leaderboard
| Rank | Method | Backbone | # Params | CA | MS | IMS | VMS | Overall |
|------|------------------------------------------|-----------------|---------|--------|--------|--------|--------|--------|
| 1 | **V-Embedder** | InternVideo2-Chat| 8B | <u>42.2</u> | **20.4** | **15.0** | **15.8** | **23.3** |
| 2 | CoVR | BLIP-Large | 588M | 25.8 | 17.4 | <u>12.3</u> | <u>12.3</u> | <u>17.1</u> |
| 3 | InternVideo2 | ViT | 1B | **44.6** | <u>18.2</u> | 4.8 | 0.0 | 16.9 |
| 4 | MM-Ret | CLIP-Base | 149M | 23.2 | 15.4 | 10.5 | 10.5 | 14.9 |
| 5 | LanguageBind | CLIP-Large | 428M | 39.6 | 16.4 | 3.2 | 0.0 | 14.8 |
| 6 | E5V | LLaVA-1.6 | 8.4B | 25.8 | 16.8 | 6.2 | 5.2 | 13.5 |
| 7 | UniIR | CLIP-Large | 428M | 25.0 | 15.2 | 6.4 | 0.0 | 10.9 |
| 8 | MLLM2VEC | LLaVA-1.6 | 8.4B | 6.4 | 6.2 | 3.0 | 3.0 | 4.7 |
| 9 | MagicLens | CLIP-Large | 428M | 9.0 | 2.4 | 3.2 | 2.8 | 4.4 |
## License
Our dataset is under the CC-BY-NC-SA-4.0 license.
⚠️ If you need to access and use our dataset, you must understand and agree: **This dataset is for research purposes only and cannot be used for any commercial or other purposes. The user assumes all effects arising from any other use and dissemination.**
We do not own the copyright of any raw video files. Currently, we provide video access to researchers under the condition of acknowledging the above license. For the video data used, we respect and acknowledge any copyrights of the video authors. Therefore, for the movies, TV series, documentaries, and cartoons used in the dataset, we have reduced the resolution, clipped the length, adjusted dimensions, etc. of the original videos to minimize the impact on the rights of the original works.
If the original authors of the related works still believe that the videos should be removed, please contact [email protected] or directly raise an issue.
## Evaluation
> Before you access our dataset, we kindly ask you to thoroughly read and understand the license outlined above. If you cannot agree to these terms, we request that you refrain from downloading our video data.
The JSON file provides candidate videos for each question. The candidates can be ranked, and metrics such as Recall@1 and MAP@5 can be computed accordingly.
## Hosting and Maintenance
The annotation files will be permanently retained.
If some videos are requested to be removed, we will replace them with a set of video frames sparsely sampled from the video and adjusted in resolution. Since **all the questions in MomentSeeker are only related to visual content** and do not involve audio, this will not significantly affect the validity of MomentSeeker (most existing MLLMs also understand videos by frame extraction).
If even retaining the frame set is not allowed, we will still keep the relevant annotation files, and replace them with the meta-information of the video, or actively seek more reliable and risk-free video sources.
## Citation
If you find this repository useful, please consider giving a star 🌟 and citation
```
@misc{yuan2025momentseekercomprehensivebenchmarkstrong,
title={MomentSeeker: A Comprehensive Benchmark and A Strong Baseline For Moment Retrieval Within Long Videos},
author={Huaying Yuan and Jian Ni and Yueze Wang and Junjie Zhou and Zhengyang Liang and Zheng Liu and Zhao Cao and Zhicheng Dou and Ji-Rong Wen},
year={2025},
eprint={2502.12558},
archivePrefix={arXiv},
primaryClass={cs.CV},
url={https://arxiv.org/abs/2502.12558},
}
``` |
jkot/parliament_hearings_processed | jkot | "2023-04-25T08:53:38Z" | 18,135 | 1 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2023-04-21T10:06:00Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: transcription
dtype: string
splits:
- name: train
num_bytes: 51234859011.0
num_examples: 191455
- name: test
num_bytes: 762989296.0
num_examples: 2726
download_size: 51507735963
dataset_size: 51997848307.0
---
# Preprocessed parliament hearings ASR dataset to truecased form.
## Original dataset: https://lindat.mff.cuni.cz/repository/xmlui/handle/11234/1-3126
---
dataset_info:
features:
- name: id
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: transcription
sequence: string
splits:
- name: train
num_bytes: 53645064353.18
num_examples: 191455
- name: test
num_bytes: 740331298.0
num_examples: 2726
download_size: 51507379112
dataset_size: 54385395651.18
--- |
AppThreat/vdb | AppThreat | "2025-04-11T20:48:34Z" | 18,132 | 1 | [
"language:en",
"license:mit",
"region:us",
"vulnerabilities",
"vdb",
"sca",
"osv",
"nvd",
"ghsa",
"vers",
"purl"
] | null | "2025-02-17T23:35:01Z" | ---
viewer: false
license: mit
language:
- en
tags:
- vulnerabilities
- vdb
- sca
- osv
- nvd
- ghsa
- vers
- purl
---
This dataset comprises application and OS vulnerabilities aggregated from multiple sources, including OSV, GitHub, NVD, and Linux vendor feeds, in the form of SQLite data files (.vdb6).
## Vulnerability Data sources
- Linux [vuln-list](https://github.com/appthreat/vuln-list)
- OSV (1)
- NVD
- GitHub
## Linux distros
- AlmaLinux
- Debian
- Alpine
- Amazon Linux
- Arch Linux
- RHEL/CentOS
- Rocky Linux
- Ubuntu
- OpenSUSE
- Photon
- Chainguard
- Wolfi OS
## Database files
The vulnerability database comprises two SQLite database files.
- data.index.vdb6 - A smaller index database optimized for quick purl or cpe string searches and vers-based range comparisons.

- data.vdb6 - Full CVE source database containing normalized data in CVE 5.1 specification formation and purl prefix.

## Folders
- app - Application only vulnerabilities from 2018
- app-10y - Application only vulnerabilities from 2014
- app-os - Application and OS vulnerabilities from 2018
- app-os-10y - Application and OS vulnerabilities from 2014
Download data.vdb6 and data.index.vdb6 files from a single folder of your choice.
## Searching for CVEs
Use the smaller index database for all search operations.
### Searching by purl
Given a purl string (`purl_str`), perform the following steps to convert this into a suitable purl prefix (`purl_prefix`) string:
In most cases, a purl prefix is a substring at index 0 after a split by "@". Eg: `purl_prefix = purl_str.split("@")[0]`.
A more robust approach:
- Parse and validate the string using a suitable [library](https://github.com/package-url/). Retain the parsed purl object (`purl_obj`)
- Construct a purl prefix string with the following logic:
- Set the value for `purl_prefix` to `"pkg:" + purl_obj["type"]`
- If there is a namespace, append it to purl_prefix after the slash character. Eg: `purl_prefix = purl_prefix + "/" + purl_obj['namespace']`
- Optional for Linux distros: If there is a qualifier string with the name `distro_name`, append it to the purl_prefix after the slash character. Eg: `purl_prefix = purl_prefix + "/" + purl_obj['qualifiers']['distro_name']`
- Append the name after the slash character. Eg: `purl_prefix = purl_prefix + "/" + purl_obj['name']`
Use the below SQL query to search by purl_prefix:
```
SELECT DISTINCT cve_id, type, namespace, name, vers, purl_prefix FROM cve_index where purl_prefix = ?;
```
### Searching by cpe
Parse the cpe string to extract the vendor, product, and version. The regex for python is shown below:
```python
import re
CPE_FULL_REGEX = re.compile(
"cpe:?:[^:]+:(?P<cve_type>[^:]+):(?P<vendor>[^:]+):(?P<package>[^:]+):(?P<version>[^:]+):(?P<update>[^:]+):(?P<edition>[^:]+):(?P<lang>[^:]+):(?P<sw_edition>[^:]+):(?P<target_sw>[^:]+):(?P<target_hw>[^:]+):(?P<other>[^:]+)"
)
```
In the `cve_index` table, vendor maps to namespace and package maps to name. The SQL query is below:
```sql
SELECT DISTINCT cve_id, type, namespace, name, vers, purl_prefix FROM cve_index where namespace = ? AND name = ?;
```
### Comparing version ranges using vers
Refer to the vers [documentation](https://github.com/package-url/purl-spec/blob/version-range-spec/VERSION-RANGE-SPEC.rst) for information regarding vers and a logic to parse and check if a version is within a range. To simplify the logic, a value from the vers column in `cve_index` would contain only a maximum of two constraints (one greater than and one lesser than).
## Combining data
Search the `cve_index` table in the index database first to retrieve any matching cve_id and purl_prefix values. Use these two column values to retrieve the full CVE source information from the `cve_data` table. An example query is shown below:
```sql
SELECT DISTINCT cve_id, type, namespace, name, source_data_hash, json(source_data), json(override_data), purl_prefix FROM cve_data
WHERE cve_id = ? AND purl_prefix = ?
GROUP BY purl_prefix
ORDER BY cve_id DESC;
```
Use the `source_data_hash` values to filter out any duplicate results for the same CVE. Duplicate results are possible when multiple vers match the same CVE and purl prefixes.
## Citation
Use the below citation in your research.
```text
@misc{vdb,
author = {Team AppThreat},
month = Feb,
title = {{AppThreat vulnerability-db}},
howpublished = {{https://huggingface.co/datasets/AppThreat/vdb}},
year = {2025}
}
```
|
ptb-text-only/ptb_text_only | ptb-text-only | "2024-01-18T11:13:39Z" | 18,045 | 16 | [
"task_categories:text-generation",
"task_categories:fill-mask",
"task_ids:language-modeling",
"task_ids:masked-language-modeling",
"annotations_creators:expert-generated",
"language_creators:found",
"multilinguality:monolingual",
"source_datasets:original",
"language:en",
"license:other",
"size_categories:10K<n<100K",
"region:us"
] | [
"text-generation",
"fill-mask"
] | "2022-03-02T23:29:22Z" | ---
annotations_creators:
- expert-generated
language_creators:
- found
language:
- en
license:
- other
license_details: LDC User Agreement for Non-Members
multilinguality:
- monolingual
size_categories:
- 10K<n<100K
source_datasets:
- original
task_categories:
- text-generation
- fill-mask
task_ids:
- language-modeling
- masked-language-modeling
paperswithcode_id: null
pretty_name: Penn Treebank
dataset_info:
features:
- name: sentence
dtype: string
config_name: penn_treebank
splits:
- name: train
num_bytes: 5143706
num_examples: 42068
- name: test
num_bytes: 453710
num_examples: 3761
- name: validation
num_bytes: 403156
num_examples: 3370
download_size: 5951345
dataset_size: 6000572
---
# Dataset Card for Penn Treebank
## Table of Contents
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** https://catalog.ldc.upenn.edu/LDC99T42
- **Repository:** 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.train.txt',
'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.valid.txt',
'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.test.txt'
- **Paper:** https://www.aclweb.org/anthology/J93-2004.pdf
- **Leaderboard:** [Needs More Information]
- **Point of Contact:** [Needs More Information]
### Dataset Summary
This is the Penn Treebank Project: Release 2 CDROM, featuring a million words of 1989 Wall Street Journal material.
The rare words in this version are already replaced with <unk> token. The numbers are replaced with <N> token.
### Supported Tasks and Leaderboards
Language Modelling
### Languages
The text in the dataset is in American English
## Dataset Structure
### Data Instances
[Needs More Information]
### Data Fields
[Needs More Information]
### Data Splits
[Needs More Information]
## Dataset Creation
### Curation Rationale
[Needs More Information]
### Source Data
#### Initial Data Collection and Normalization
[Needs More Information]
#### Who are the source language producers?
[Needs More Information]
### Annotations
#### Annotation process
[Needs More Information]
#### Who are the annotators?
[Needs More Information]
### Personal and Sensitive Information
[Needs More Information]
## Considerations for Using the Data
### Social Impact of Dataset
[Needs More Information]
### Discussion of Biases
[Needs More Information]
### Other Known Limitations
[Needs More Information]
## Additional Information
### Dataset Curators
[Needs More Information]
### Licensing Information
Dataset provided for research purposes only. Please check dataset license for additional information.
### Citation Information
@article{marcus-etal-1993-building,
title = "Building a Large Annotated Corpus of {E}nglish: The {P}enn {T}reebank",
author = "Marcus, Mitchell P. and
Santorini, Beatrice and
Marcinkiewicz, Mary Ann",
journal = "Computational Linguistics",
volume = "19",
number = "2",
year = "1993",
url = "https://www.aclweb.org/anthology/J93-2004",
pages = "313--330",
}
### Contributions
Thanks to [@harshalmittal4](https://github.com/harshalmittal4) for adding this dataset. |
allenai/sciq | allenai | "2024-01-04T16:23:51Z" | 18,019 | 104 | [
"task_categories:question-answering",
"task_ids:closed-domain-qa",
"annotations_creators:no-annotation",
"language_creators:crowdsourced",
"multilinguality:monolingual",
"source_datasets:original",
"language:en",
"license:cc-by-nc-3.0",
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [
"question-answering"
] | "2022-03-02T23:29:22Z" | ---
annotations_creators:
- no-annotation
language_creators:
- crowdsourced
language:
- en
license:
- cc-by-nc-3.0
multilinguality:
- monolingual
size_categories:
- 10K<n<100K
source_datasets:
- original
task_categories:
- question-answering
task_ids:
- closed-domain-qa
paperswithcode_id: sciq
pretty_name: SciQ
dataset_info:
features:
- name: question
dtype: string
- name: distractor3
dtype: string
- name: distractor1
dtype: string
- name: distractor2
dtype: string
- name: correct_answer
dtype: string
- name: support
dtype: string
splits:
- name: train
num_bytes: 6546183
num_examples: 11679
- name: validation
num_bytes: 554120
num_examples: 1000
- name: test
num_bytes: 563927
num_examples: 1000
download_size: 4674410
dataset_size: 7664230
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
---
# Dataset Card for "sciq"
## Table of Contents
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** [https://allenai.org/data/sciq](https://allenai.org/data/sciq)
- **Repository:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
- **Paper:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
- **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
- **Size of downloaded dataset files:** 2.82 MB
- **Size of the generated dataset:** 7.68 MB
- **Total amount of disk used:** 10.50 MB
### Dataset Summary
The SciQ dataset contains 13,679 crowdsourced science exam questions about Physics, Chemistry and Biology, among others. The questions are in multiple-choice format with 4 answer options each. For the majority of the questions, an additional paragraph with supporting evidence for the correct answer is provided.
### Supported Tasks and Leaderboards
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Languages
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
## Dataset Structure
### Data Instances
#### default
- **Size of downloaded dataset files:** 2.82 MB
- **Size of the generated dataset:** 7.68 MB
- **Total amount of disk used:** 10.50 MB
An example of 'train' looks as follows.
```
This example was too long and was cropped:
{
"correct_answer": "coriolis effect",
"distractor1": "muon effect",
"distractor2": "centrifugal effect",
"distractor3": "tropical effect",
"question": "What phenomenon makes global winds blow northeast to southwest or the reverse in the northern hemisphere and northwest to southeast or the reverse in the southern hemisphere?",
"support": "\"Without Coriolis Effect the global winds would blow north to south or south to north. But Coriolis makes them blow northeast to..."
}
```
### Data Fields
The data fields are the same among all splits.
#### default
- `question`: a `string` feature.
- `distractor3`: a `string` feature.
- `distractor1`: a `string` feature.
- `distractor2`: a `string` feature.
- `correct_answer`: a `string` feature.
- `support`: a `string` feature.
### Data Splits
| name |train|validation|test|
|-------|----:|---------:|---:|
|default|11679| 1000|1000|
## Dataset Creation
### Curation Rationale
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
#### Who are the source language producers?
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Annotations
#### Annotation process
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
#### Who are the annotators?
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Personal and Sensitive Information
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Discussion of Biases
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Other Known Limitations
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
## Additional Information
### Dataset Curators
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Licensing Information
The dataset is licensed under the [Creative Commons Attribution-NonCommercial 3.0 Unported License](http://creativecommons.org/licenses/by-nc/3.0/).
### Citation Information
```
@inproceedings{SciQ,
title={Crowdsourcing Multiple Choice Science Questions},
author={Johannes Welbl, Nelson F. Liu, Matt Gardner},
year={2017},
journal={arXiv:1707.06209v1}
}
```
### Contributions
Thanks to [@patrickvonplaten](https://github.com/patrickvonplaten), [@lewtun](https://github.com/lewtun), [@thomwolf](https://github.com/thomwolf) for adding this dataset. |
mlfoundations/MINT-1T-PDF-CC-2024-18 | mlfoundations | "2024-09-19T21:02:55Z" | 17,909 | 19 | [
"task_categories:image-to-text",
"task_categories:text-generation",
"language:en",
"license:cc-by-4.0",
"size_categories:100B<n<1T",
"arxiv:2406.11271",
"region:us",
"multimodal"
] | [
"image-to-text",
"text-generation"
] | "2024-07-15T03:19:33Z" | ---
license: cc-by-4.0
task_categories:
- image-to-text
- text-generation
language:
- en
tags:
- multimodal
pretty_name: MINT-1T
size_categories:
- 100B<n<1T
configs:
- config_name: default
data_files:
- split: train
path: CC-MAIN-*/*
---
<h1 align="center">
🍃 MINT-1T:<br>Scaling Open-Source Multimodal Data by 10x:<br> A Multimodal Dataset with One Trillion Tokens
</h1>
🍃 MINT-1T is an open-source **M**ultimodal **INT**erleaved dataset with 1 trillion text tokens and 3.4 billion images, a 10x scale-up from existing open-source datasets. Additionally, we include previously untapped sources such as PDFs and ArXiv papers. 🍃 MINT-1T is designed to facilitate research in multimodal pretraining. 🍃 MINT-1T is created by a team from the University of Washington in collaboration with Salesforce Research, other academic institutions including Stanford University, University of Texas at Austin, and University of California Berkeley.
You are currently viewing a subset of the PDF portion of 🍃 MINT-1T associated with CommonCrawl dump `CC-2024-18`. For other PDF, HTML, and ArXiv subsets, refer to the [🍃 MINT-1T collection](https://huggingface.co/collections/mlfoundations/mint-1t-6690216ca4d0df7e518dde1c).

## Updates
### 9/19/24
We have removed roughly 10% of the PDF samples as there was a mismatch between the frames in the TIFF images and the document metadata.
### 8/8/24
We have become aware that the image hashes in the PDF subset of MINT-1T do not match the images in the documents. We want to emphasize that the images for each document are correct, and only the image hashes in the documents' metadata are mislabeled.
## Dataset Details
### Dataset Sources
- **Repository**: https://github.com/mlfoundations/MINT-1T
- **Paper:** https://arxiv.org/abs/2406.11271
- **Blog:** https://blog.salesforceairesearch.com/mint-1t/
## Uses
### Direct Use
<!-- This section describes suitable use cases for the dataset. -->
🍃 MINT-1T is designed to facilitate research in multimodal pretraining. The dataset can be used for training multimodal models that can reson about interleaved text and images sequences such as [Idefics2](https://huggingface.co/HuggingFaceM4/idefics2-8b), [XGen-MM](https://huggingface.co/Salesforce/xgen-mm-phi3-mini-instruct-r-v1), and [Chameleon](https://huggingface.co/facebook/chameleon-30b).
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. -->
🍃 MINT-1T was built to make research into large multimodal models more accessible. Using
the dataset to train models that ingest or generate personally identifying information (such
as images of people’s faces and other sensitive content) as well as military applications are all inappropriate use cases of 🍃 MINT-1T.
## Dataset Creation
### Curation Rationale
🍃 MINT-1T was created to address a significant gap in the open-source domain by providing a large-scale multimodal interleaved dataset for pre-training large multimodal models. This dataset aims to be a valuable resource for the research community, facilitating open science in multimodal pretraining.
### Source Data
The dataset is a comprehensive collection of multimodal documents from various sources:
- HTML documents: Filtered from CommonCrawl WARC dumps spanning from 2017 to 2024
- PDF documents: Extracted from CommonCrawl WAT dumps covering 2023 to 2024
- ArXiv documents: A subset of papers from the ArXiv repository
In total, 🍃 MINT-1T contains 1056.8 million documents, broken down as follows:
- 1029.4 million HTML documents
- 24.0 million PDF documents
- 0.6 million ArXiv documents
#### Data Collection and Processing
The data collection and processing involved several steps:
1. Document Extraction:
- HTML documents were parsed from CommonCrawl WARC files
- PDF documents were extracted from CommonCrawl WAT files
- ArXiv papers were directly sourced from ArXiv S3 buckets
2. Filtering Process:
- Applied text quality filters to ensure content relevance and readability
- Removed duplicate content at both paragraph and document levels
- Filtered out undesirable content based on predefined criteria
- Verified image availability and quality for HTML documents
- Limited PDF size to 50MB and 50 pages to manage dataset size and quality
3. Image Processing:
- Used NSFW image detection to remove pornographic or otherwise undesirable images
- Removed images smaller than 150 pixels or larger than 20,000 pixels
- Adjusted aspect ratio thresholds for HTML (2:1) and PDF (3:1) to preserve scientific figures
4. Text Processing:
- Used fasttext for language identification, focusing on English content
- Masked personally identifiable information such as email addresses and IP addresses
- Applied paragraph and document-level deduplication using Bloom filters
5. PDF Specific Processing:
- Used PyMuPDF for parsing PDFs and extracting reading order
- Clustered text blocks based on columns and ordered from top left to bottom right
6. ArXiv Specific Processing:
- Used TexSoup to parse LaTeX source code and interleave images with text
- Cleaned up LaTeX code by removing imports, bibliography, tables, and citation tags
Various open-source tools were utilized in this process, including fasttext, [PyMuPDF](https://github.com/pymupdf/PyMuPDF), and [DCLM](https://www.datacomp.ai/dclm/) and [bff](https://github.com/revbucket/bff) for deduplication and content filtering.
#### Personal and Sensitive Information
Despite sourcing from public web data, significant efforts were made to minimize the inclusion of personal and sensitive information:
- Email addresses and IP addresses were masked to protect privacy
- An NSFW image classifierto remove inappropriate visual content
- URLs containing substrings associated with undesirable or sensitive content were filtered out
However, users should be aware that as the data originates from the public web, it may still contain some sensitive or personal information. The dataset creators acknowledge this limitation and advise users to exercise caution and potentially apply additional filtering based on their specific use cases.
## Bias, Risks, and Limitations
Several potential biases, risks, and limitations have been identified:
1. Data Bias: As the dataset is sourced from web crawls, it may inherit biases present in online content.
2. Content Risks: Despite extensive filtering, there's a possibility that some offensive, insensitive, or inappropriate content may remain in the dataset.
3. Image Availability: The dataset relies on external image URLs, which may become unavailable over time due to link rot, potentially affecting the dataset's long-term usability.
4. PDF Parsing Limitations: The current method for extracting reading order from PDFs may not always accurately capture the intended flow, especially for documents with complex layouts.
5. Potential Legal and Ethical Concerns: While efforts were made to respect robots.txt files and remove sensitive information, there may still be content that individuals did not explicitly consent to include.
### Recommendations
Given these considerations, the following recommendations are provided:
1. Additional Filtering: Users are strongly encouraged to apply additional filtering based on their specific use case and ethical considerations.
2. Inappropriate Use Cases: The dataset is not recommended for applications involving the processing or generation of personally identifying information, nor for military applications.
3. Legal Compliance: Users should independently verify compliance with applicable laws before employing MINT-1T for commercial purposes.
4. Bias Awareness: Researchers and developers should be cognizant of potential biases in the dataset and consider their impact on model training and outputs.
## License
We release 🍃 MINT-1T under a CC-BY-4.0 license, designating it primarily as a research artifact. While the dataset is freely available, users are responsible for ensuring its legal use in commercial settings. Users must independently verify compliance with applicable laws before employing MINT-1T for commercial purposes.
## Citation
```
@article{awadalla2024mint1t,
title={MINT-1T: Scaling Open-Source Multimodal Data by 10x: A Multimodal Dataset with One Trillion Tokens},
author={Anas Awadalla and Le Xue and Oscar Lo and Manli Shu and Hannah Lee and Etash Kumar Guha and Matt Jordan and Sheng Shen and Mohamed Awadalla and Silvio Savarese and Caiming Xiong and Ran Xu and Yejin Choi and Ludwig Schmidt},
year={2024}
}
``` |
gigant/tib_slides | gigant | "2025-01-28T14:40:52Z" | 17,901 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2023-01-27T01:44:02Z" | ---
dataset_info:
features:
- name: Image
dtype: image
- name: file_name
dtype: string
splits:
- name: train
num_bytes: 131956494917.654
num_examples: 484843
download_size: 0
dataset_size: 131956494917.654
---
# Dataset Card for "tib_slides"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
bhatvineet/shrutilipi_mr | bhatvineet | "2023-03-13T18:54:45Z" | 17,888 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2023-03-13T07:12:52Z" | ---
dataset_info:
features:
- name: audio
dtype: audio
- name: transcriptions
dtype: string
splits:
- name: train
num_bytes: 114253169328.11655
num_examples: 474332
- name: test
num_bytes: 39048725811.21545
num_examples: 158111
download_size: 147662822982
dataset_size: 153301895139.332
---
# Dataset Card for "shrutilipi_mr"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
google/IFEval | google | "2024-08-14T08:21:56Z" | 17,851 | 64 | [
"task_categories:text-generation",
"language:en",
"license:apache-2.0",
"size_categories:n<1K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"arxiv:2311.07911",
"region:us"
] | [
"text-generation"
] | "2023-12-22T11:20:03Z" | ---
license: apache-2.0
task_categories:
- text-generation
language:
- en
pretty_name: IFEval
---
# Dataset Card for IFEval
<!-- Provide a quick summary of the dataset. -->
## Dataset Description
- **Repository:** https://github.com/google-research/google-research/tree/master/instruction_following_eval
- **Paper:** https://huggingface.co/papers/2311.07911
- **Leaderboard:** https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard
- **Point of Contact:** [Le Hou]([email protected])
### Dataset Summary
This dataset contains the prompts used in the [Instruction-Following Eval (IFEval) benchmark](https://arxiv.org/abs/2311.07911) for large language models. It contains around 500 "verifiable instructions" such as "write in more than 400 words" and "mention the keyword of AI at least 3 times" which can be verified by heuristics. To load the dataset, run:
```python
from datasets import load_dataset
ifeval = load_dataset("google/IFEval")
```
### Supported Tasks and Leaderboards
The IFEval dataset is designed for evaluating chat or instruction fine-tuned language models and is one of the core benchmarks used in the [Open LLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard).
### Languages
The data in IFEval are in English (BCP-47 en).
## Dataset Structure
### Data Instances
An example of the `train` split looks as follows:
```
{
"key": 1000,
"prompt": 'Write a 300+ word summary of the wikipedia page "https://en.wikipedia.org/wiki/Raymond_III,_Count_of_Tripoli". Do not use any commas and highlight at least 3 sections that has titles in markdown format, for example *highlighted section part 1*, *highlighted section part 2*, *highlighted section part 3*.',
"instruction_id_list": [
"punctuation:no_comma",
"detectable_format:number_highlighted_sections",
"length_constraints:number_words",
],
"kwargs": [
{
"num_highlights": None,
"relation": None,
"num_words": None,
"num_placeholders": None,
"prompt_to_repeat": None,
"num_bullets": None,
"section_spliter": None,
"num_sections": None,
"capital_relation": None,
"capital_frequency": None,
"keywords": None,
"num_paragraphs": None,
"language": None,
"let_relation": None,
"letter": None,
"let_frequency": None,
"end_phrase": None,
"forbidden_words": None,
"keyword": None,
"frequency": None,
"num_sentences": None,
"postscript_marker": None,
"first_word": None,
"nth_paragraph": None,
},
{
"num_highlights": 3,
"relation": None,
"num_words": None,
"num_placeholders": None,
"prompt_to_repeat": None,
"num_bullets": None,
"section_spliter": None,
"num_sections": None,
"capital_relation": None,
"capital_frequency": None,
"keywords": None,
"num_paragraphs": None,
"language": None,
"let_relation": None,
"letter": None,
"let_frequency": None,
"end_phrase": None,
"forbidden_words": None,
"keyword": None,
"frequency": None,
"num_sentences": None,
"postscript_marker": None,
"first_word": None,
"nth_paragraph": None,
},
{
"num_highlights": None,
"relation": "at least",
"num_words": 300,
"num_placeholders": None,
"prompt_to_repeat": None,
"num_bullets": None,
"section_spliter": None,
"num_sections": None,
"capital_relation": None,
"capital_frequency": None,
"keywords": None,
"num_paragraphs": None,
"language": None,
"let_relation": None,
"letter": None,
"let_frequency": None,
"end_phrase": None,
"forbidden_words": None,
"keyword": None,
"frequency": None,
"num_sentences": None,
"postscript_marker": None,
"first_word": None,
"nth_paragraph": None,
},
],
}
```
### Data Fields
The data fields are as follows:
* `key`: A unique ID for the prompt.
* `prompt`: Describes the task the model should perform.
* `instruction_id_list`: An array of verifiable instructions. See Table 1 of the paper for the full set with their descriptions.
* `kwargs`: An array of arguments used to specify each verifiable instruction in `instruction_id_list`.
### Data Splits
| | train |
|---------------|------:|
| IFEval | 541 |
### Licensing Information
The dataset is available under the [Apache 2.0 license](https://www.apache.org/licenses/LICENSE-2.0).
### Citation Information
```
@misc{zhou2023instructionfollowingevaluationlargelanguage,
title={Instruction-Following Evaluation for Large Language Models},
author={Jeffrey Zhou and Tianjian Lu and Swaroop Mishra and Siddhartha Brahma and Sujoy Basu and Yi Luan and Denny Zhou and Le Hou},
year={2023},
eprint={2311.07911},
archivePrefix={arXiv},
primaryClass={cs.CL},
url={https://arxiv.org/abs/2311.07911},
}
``` |
mythicinfinity/libritts | mythicinfinity | "2024-02-09T21:19:32Z" | 17,754 | 14 | [
"task_categories:text-to-speech",
"language:en",
"license:cc-by-4.0",
"size_categories:100K<n<1M",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"arxiv:1904.02882",
"region:us"
] | [
"text-to-speech"
] | "2024-02-08T02:07:23Z" | ---
license: cc-by-4.0
task_categories:
- text-to-speech
language:
- en
size_categories:
- 10K<n<100K
configs:
- config_name: dev
data_files:
- split: dev.clean
path: "data/dev.clean/dev.clean*.parquet"
- config_name: clean
data_files:
- split: dev.clean
path: "data/dev.clean/dev.clean*.parquet"
- split: test.clean
path: "data/test.clean/test.clean*.parquet"
- split: train.clean.100
path: "data/train.clean.100/train.clean.100*.parquet"
- split: train.clean.360
path: "data/train.clean.360/train.clean.360*.parquet"
- config_name: other
data_files:
- split: dev.other
path: "data/dev.other/dev.other*.parquet"
- split: test.other
path: "data/test.other/test.other*.parquet"
- split: train.other.500
path: "data/train.other.500/train.other.500*.parquet"
- config_name: all
data_files:
- split: dev.clean
path: "data/dev.clean/dev.clean*.parquet"
- split: dev.other
path: "data/dev.other/dev.other*.parquet"
- split: test.clean
path: "data/test.clean/test.clean*.parquet"
- split: test.other
path: "data/test.other/test.other*.parquet"
- split: train.clean.100
path: "data/train.clean.100/train.clean.100*.parquet"
- split: train.clean.360
path: "data/train.clean.360/train.clean.360*.parquet"
- split: train.other.500
path: "data/train.other.500/train.other.500*.parquet"
---
# Dataset Card for LibriTTS
<!-- Provide a quick summary of the dataset. -->
LibriTTS is a multi-speaker English corpus of approximately 585 hours of read English speech at 24kHz sampling rate,
prepared by Heiga Zen with the assistance of Google Speech and Google Brain team members. The LibriTTS corpus is
designed for TTS research. It is derived from the original materials (mp3 audio files from LibriVox and text files
from Project Gutenberg) of the LibriSpeech corpus.
## Overview
This is the LibriTTS dataset, adapted for the `datasets` library.
## Usage
### Splits
There are 7 splits (dots replace dashes from the original dataset, to comply with hf naming requirements):
- dev.clean
- dev.other
- test.clean
- test.other
- train.clean.100
- train.clean.360
- train.other.500
### Configurations
There are 3 configurations, each which limits the splits the `load_dataset()` function will download.
The default configuration is "all".
- "dev": only the "dev.clean" split (good for testing the dataset quickly)
- "clean": contains only "clean" splits
- "other": contains only "other" splits
- "all": contains only "all" splits
### Example
Loading the `clean` config with only the `train.clean.360` split.
```
load_dataset("blabble-io/libritts", "clean", split="train.clean.100")
```
Streaming is also supported.
```
load_dataset("blabble-io/libritts", streaming=True)
```
### Columns
```
{
"audio": datasets.Audio(sampling_rate=24_000),
"text_normalized": datasets.Value("string"),
"text_original": datasets.Value("string"),
"speaker_id": datasets.Value("string"),
"path": datasets.Value("string"),
"chapter_id": datasets.Value("string"),
"id": datasets.Value("string"),
}
```
### Example Row
```
{
'audio': {
'path': '/home/user/.cache/huggingface/datasets/downloads/extracted/5551a515e85b9e463062524539c2e1cb52ba32affe128dffd866db0205248bdd/LibriTTS/dev-clean/3081/166546/3081_166546_000028_000002.wav',
'array': ...,
'sampling_rate': 24000
},
'text_normalized': 'How quickly he disappeared!"',
'text_original': 'How quickly he disappeared!"',
'speaker_id': '3081',
'path': '/home/user/.cache/huggingface/datasets/downloads/extracted/5551a515e85b9e463062524539c2e1cb52ba32affe128dffd866db0205248bdd/LibriTTS/dev-clean/3081/166546/3081_166546_000028_000002.wav',
'chapter_id': '166546',
'id': '3081_166546_000028_000002'
}
```
## Dataset Details
### Dataset Description
- **License:** CC BY 4.0
### Dataset Sources [optional]
<!-- Provide the basic links for the dataset. -->
- **Homepage:** https://www.openslr.org/60/
- **Paper:** https://arxiv.org/abs/1904.02882
## Citation
<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
```
@ARTICLE{Zen2019-kz,
title = "{LibriTTS}: A corpus derived from {LibriSpeech} for
text-to-speech",
author = "Zen, Heiga and Dang, Viet and Clark, Rob and Zhang, Yu and
Weiss, Ron J and Jia, Ye and Chen, Zhifeng and Wu, Yonghui",
abstract = "This paper introduces a new speech corpus called
``LibriTTS'' designed for text-to-speech use. It is derived
from the original audio and text materials of the
LibriSpeech corpus, which has been used for training and
evaluating automatic speech recognition systems. The new
corpus inherits desired properties of the LibriSpeech corpus
while addressing a number of issues which make LibriSpeech
less than ideal for text-to-speech work. The released corpus
consists of 585 hours of speech data at 24kHz sampling rate
from 2,456 speakers and the corresponding texts.
Experimental results show that neural end-to-end TTS models
trained from the LibriTTS corpus achieved above 4.0 in mean
opinion scores in naturalness in five out of six evaluation
speakers. The corpus is freely available for download from
http://www.openslr.org/60/.",
month = apr,
year = 2019,
copyright = "http://arxiv.org/licenses/nonexclusive-distrib/1.0/",
archivePrefix = "arXiv",
primaryClass = "cs.SD",
eprint = "1904.02882"
}
``` |
Helsinki-NLP/opus_openoffice | Helsinki-NLP | "2024-02-22T15:14:50Z" | 17,712 | 7 | [
"task_categories:translation",
"annotations_creators:found",
"language_creators:found",
"multilinguality:multilingual",
"source_datasets:original",
"language:de",
"language:en",
"language:es",
"language:fr",
"language:ja",
"language:ru",
"language:sv",
"language:zh",
"license:unknown",
"size_categories:1M<n<10M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [
"translation"
] | "2022-03-02T23:29:22Z" | ---
annotations_creators:
- found
language_creators:
- found
language:
- de
- en
- es
- fr
- ja
- ru
- sv
- zh
license:
- unknown
multilinguality:
- multilingual
size_categories:
- 10K<n<100K
source_datasets:
- original
task_categories:
- translation
task_ids: []
pretty_name: OpusOpenoffice
config_names:
- de-en_GB
- de-es
- de-fr
- de-ja
- de-ru
- de-sv
- de-zh_CN
- en_GB-es
- en_GB-fr
- en_GB-ja
- en_GB-ru
- en_GB-sv
- en_GB-zh_CN
- es-fr
- es-ja
- es-ru
- es-sv
- es-zh_CN
- fr-ja
- fr-ru
- fr-sv
- fr-zh_CN
- ja-ru
- ja-sv
- ja-zh_CN
- ru-sv
- ru-zh_CN
- sv-zh_CN
language_bcp47:
- en-GB
- zh-CN
dataset_info:
- config_name: de-en_GB
features:
- name: translation
dtype:
translation:
languages:
- de
- en_GB
splits:
- name: train
num_bytes: 6201077
num_examples: 77052
download_size: 2983173
dataset_size: 6201077
- config_name: de-es
features:
- name: translation
dtype:
translation:
languages:
- de
- es
splits:
- name: train
num_bytes: 6571615
num_examples: 77000
download_size: 3145841
dataset_size: 6571615
- config_name: de-fr
features:
- name: translation
dtype:
translation:
languages:
- de
- fr
splits:
- name: train
num_bytes: 6715805
num_examples: 76684
download_size: 3167189
dataset_size: 6715805
- config_name: de-ja
features:
- name: translation
dtype:
translation:
languages:
- de
- ja
splits:
- name: train
num_bytes: 7084951
num_examples: 69396
download_size: 3137719
dataset_size: 7084951
- config_name: de-ru
features:
- name: translation
dtype:
translation:
languages:
- de
- ru
splits:
- name: train
num_bytes: 8333241
num_examples: 75511
download_size: 3585304
dataset_size: 8333241
- config_name: de-sv
features:
- name: translation
dtype:
translation:
languages:
- de
- sv
splits:
- name: train
num_bytes: 6288962
num_examples: 77366
download_size: 3053987
dataset_size: 6288962
- config_name: de-zh_CN
features:
- name: translation
dtype:
translation:
languages:
- de
- zh_CN
splits:
- name: train
num_bytes: 5836628
num_examples: 68712
download_size: 2862703
dataset_size: 5836628
- config_name: en_GB-es
features:
- name: translation
dtype:
translation:
languages:
- en_GB
- es
splits:
- name: train
num_bytes: 6147581
num_examples: 77646
download_size: 2933203
dataset_size: 6147581
- config_name: en_GB-fr
features:
- name: translation
dtype:
translation:
languages:
- en_GB
- fr
splits:
- name: train
num_bytes: 6297779
num_examples: 77696
download_size: 2952170
dataset_size: 6297779
- config_name: en_GB-ja
features:
- name: translation
dtype:
translation:
languages:
- en_GB
- ja
splits:
- name: train
num_bytes: 6636722
num_examples: 69149
download_size: 2920159
dataset_size: 6636722
- config_name: en_GB-ru
features:
- name: translation
dtype:
translation:
languages:
- en_GB
- ru
splits:
- name: train
num_bytes: 7877970
num_examples: 75401
download_size: 3356420
dataset_size: 7877970
- config_name: en_GB-sv
features:
- name: translation
dtype:
translation:
languages:
- en_GB
- sv
splits:
- name: train
num_bytes: 5861461
num_examples: 77815
download_size: 2839624
dataset_size: 5861461
- config_name: en_GB-zh_CN
features:
- name: translation
dtype:
translation:
languages:
- en_GB
- zh_CN
splits:
- name: train
num_bytes: 5424865
num_examples: 69400
download_size: 2663377
dataset_size: 5424865
- config_name: es-fr
features:
- name: translation
dtype:
translation:
languages:
- es
- fr
splits:
- name: train
num_bytes: 6663092
num_examples: 77417
download_size: 3115129
dataset_size: 6663092
- config_name: es-ja
features:
- name: translation
dtype:
translation:
languages:
- es
- ja
splits:
- name: train
num_bytes: 7005123
num_examples: 68944
download_size: 3075174
dataset_size: 7005123
- config_name: es-ru
features:
- name: translation
dtype:
translation:
languages:
- es
- ru
splits:
- name: train
num_bytes: 8283703
num_examples: 76461
download_size: 3533017
dataset_size: 8283703
- config_name: es-sv
features:
- name: translation
dtype:
translation:
languages:
- es
- sv
splits:
- name: train
num_bytes: 6232466
num_examples: 77825
download_size: 2999454
dataset_size: 6232466
- config_name: es-zh_CN
features:
- name: translation
dtype:
translation:
languages:
- es
- zh_CN
splits:
- name: train
num_bytes: 5776827
num_examples: 68583
download_size: 2815094
dataset_size: 5776827
- config_name: fr-ja
features:
- name: translation
dtype:
translation:
languages:
- fr
- ja
splits:
- name: train
num_bytes: 7160332
num_examples: 69026
download_size: 3104825
dataset_size: 7160332
- config_name: fr-ru
features:
- name: translation
dtype:
translation:
languages:
- fr
- ru
splits:
- name: train
num_bytes: 8432061
num_examples: 76464
download_size: 3553215
dataset_size: 8432061
- config_name: fr-sv
features:
- name: translation
dtype:
translation:
languages:
- fr
- sv
splits:
- name: train
num_bytes: 6373350
num_examples: 77398
download_size: 3020247
dataset_size: 6373350
- config_name: fr-zh_CN
features:
- name: translation
dtype:
translation:
languages:
- fr
- zh_CN
splits:
- name: train
num_bytes: 5918482
num_examples: 68723
download_size: 2834942
dataset_size: 5918482
- config_name: ja-ru
features:
- name: translation
dtype:
translation:
languages:
- ja
- ru
splits:
- name: train
num_bytes: 8781230
num_examples: 68589
download_size: 3534714
dataset_size: 8781230
- config_name: ja-sv
features:
- name: translation
dtype:
translation:
languages:
- ja
- sv
splits:
- name: train
num_bytes: 6709627
num_examples: 69154
download_size: 2983777
dataset_size: 6709627
- config_name: ja-zh_CN
features:
- name: translation
dtype:
translation:
languages:
- ja
- zh_CN
splits:
- name: train
num_bytes: 6397676
num_examples: 68953
download_size: 2877818
dataset_size: 6397676
- config_name: ru-sv
features:
- name: translation
dtype:
translation:
languages:
- ru
- sv
splits:
- name: train
num_bytes: 7966150
num_examples: 75560
download_size: 3425447
dataset_size: 7966150
- config_name: ru-zh_CN
features:
- name: translation
dtype:
translation:
languages:
- ru
- zh_CN
splits:
- name: train
num_bytes: 7393659
num_examples: 66259
download_size: 3224677
dataset_size: 7393659
- config_name: sv-zh_CN
features:
- name: translation
dtype:
translation:
languages:
- sv
- zh_CN
splits:
- name: train
num_bytes: 5492902
num_examples: 68846
download_size: 2722340
dataset_size: 5492902
configs:
- config_name: de-en_GB
data_files:
- split: train
path: de-en_GB/train-*
- config_name: de-es
data_files:
- split: train
path: de-es/train-*
- config_name: de-fr
data_files:
- split: train
path: de-fr/train-*
- config_name: de-ja
data_files:
- split: train
path: de-ja/train-*
- config_name: de-ru
data_files:
- split: train
path: de-ru/train-*
- config_name: de-sv
data_files:
- split: train
path: de-sv/train-*
- config_name: de-zh_CN
data_files:
- split: train
path: de-zh_CN/train-*
- config_name: en_GB-es
data_files:
- split: train
path: en_GB-es/train-*
- config_name: en_GB-fr
data_files:
- split: train
path: en_GB-fr/train-*
- config_name: en_GB-ja
data_files:
- split: train
path: en_GB-ja/train-*
- config_name: en_GB-ru
data_files:
- split: train
path: en_GB-ru/train-*
- config_name: en_GB-sv
data_files:
- split: train
path: en_GB-sv/train-*
- config_name: en_GB-zh_CN
data_files:
- split: train
path: en_GB-zh_CN/train-*
- config_name: es-fr
data_files:
- split: train
path: es-fr/train-*
- config_name: es-ja
data_files:
- split: train
path: es-ja/train-*
- config_name: es-ru
data_files:
- split: train
path: es-ru/train-*
- config_name: es-sv
data_files:
- split: train
path: es-sv/train-*
- config_name: es-zh_CN
data_files:
- split: train
path: es-zh_CN/train-*
- config_name: fr-ja
data_files:
- split: train
path: fr-ja/train-*
- config_name: fr-ru
data_files:
- split: train
path: fr-ru/train-*
- config_name: fr-sv
data_files:
- split: train
path: fr-sv/train-*
- config_name: fr-zh_CN
data_files:
- split: train
path: fr-zh_CN/train-*
- config_name: ja-ru
data_files:
- split: train
path: ja-ru/train-*
- config_name: ja-sv
data_files:
- split: train
path: ja-sv/train-*
- config_name: ja-zh_CN
data_files:
- split: train
path: ja-zh_CN/train-*
- config_name: ru-sv
data_files:
- split: train
path: ru-sv/train-*
- config_name: ru-zh_CN
data_files:
- split: train
path: ru-zh_CN/train-*
- config_name: sv-zh_CN
data_files:
- split: train
path: sv-zh_CN/train-*
---
# Dataset Card for [Dataset Name]
## Table of Contents
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** https://opus.nlpl.eu/OpenOffice/corpus/version/OpenOffice
- **Repository:**
- **Paper:**
- **Leaderboard:**
- **Point of Contact:**
### Dataset Summary
A collection of documents from http://www.openoffice.org/.
8 languages, 28 bitexts
### Supported Tasks and Leaderboards
The underlying task is machine translation.
### Languages
[More Information Needed]
## Dataset Structure
### Data Instances
[More Information Needed]
### Data Fields
[More Information Needed]
### Data Splits
[More Information Needed]
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
[More Information Needed]
### Annotations
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
[More Information Needed]
### Citation Information
```
@InProceedings{TIEDEMANN12.463,
author = {J�rg Tiedemann},
title = {Parallel Data, Tools and Interfaces in OPUS},
booktitle = {Proceedings of the Eight International Conference on Language Resources and Evaluation (LREC'12)},
year = {2012},
month = {may},
date = {23-25},
address = {Istanbul, Turkey},
editor = {Nicoletta Calzolari (Conference Chair) and Khalid Choukri and Thierry Declerck and Mehmet Ugur Dogan and Bente Maegaard and Joseph Mariani and Jan Odijk and Stelios Piperidis},
publisher = {European Language Resources Association (ELRA)},
isbn = {978-2-9517408-7-7},
language = {english}
}
```
### Contributions
Thanks to [@patil-suraj](https://github.com/patil-suraj) for adding this dataset.
|
MMMU/MMMU | MMMU | "2024-09-19T17:11:03Z" | 17,684 | 242 | [
"task_categories:question-answering",
"task_categories:visual-question-answering",
"task_categories:multiple-choice",
"language:en",
"license:apache-2.0",
"size_categories:10K<n<100K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"arxiv:2311.16502",
"region:us",
"biology",
"medical",
"finance",
"chemistry",
"music",
"art",
"art_theory",
"design",
"business",
"accounting",
"economics",
"manage",
"marketing",
"health",
"medicine",
"basic_medical_science",
"clinical",
"pharmacy",
"public_health",
"humanities",
"social_science",
"history",
"literature",
"sociology",
"psychology",
"science",
"geography",
"math",
"physics",
"engineering",
"agriculture",
"architecture",
"computer_science",
"electronics",
"energy_and_power",
"materials",
"mechanical_engineering"
] | [
"question-answering",
"visual-question-answering",
"multiple-choice"
] | "2023-11-27T17:52:01Z" | ---
language:
- en
license: apache-2.0
size_categories:
- 10K<n<100K
task_categories:
- question-answering
- visual-question-answering
- multiple-choice
pretty_name: mmmu
dataset_info:
- config_name: Accounting
features:
- name: id
dtype: string
- name: question
dtype: string
- name: options
dtype: string
- name: explanation
dtype: string
- name: image_1
dtype: image
- name: image_2
dtype: image
- name: image_3
dtype: image
- name: image_4
dtype: image
- name: image_5
dtype: image
- name: image_6
dtype: image
- name: image_7
dtype: image
- name: img_type
dtype: string
- name: answer
dtype: string
- name: topic_difficulty
dtype: string
- name: question_type
dtype: string
- name: subfield
dtype: string
splits:
- name: dev
num_bytes: 262599.0
num_examples: 5
- name: validation
num_bytes: 1598285.0
num_examples: 30
- name: test
num_bytes: 22135625.0
num_examples: 380
download_size: 37363379
dataset_size: 23996509.0
- config_name: Agriculture
features:
- name: id
dtype: string
- name: question
dtype: string
- name: options
dtype: string
- name: explanation
dtype: string
- name: image_1
dtype: image
- name: image_2
dtype: image
- name: image_3
dtype: image
- name: image_4
dtype: image
- name: image_5
dtype: image
- name: image_6
dtype: image
- name: image_7
dtype: image
- name: img_type
dtype: string
- name: answer
dtype: string
- name: topic_difficulty
dtype: string
- name: question_type
dtype: string
- name: subfield
dtype: string
splits:
- name: dev
num_bytes: 22082656.0
num_examples: 5
- name: validation
num_bytes: 119217558.0
num_examples: 30
- name: test
num_bytes: 993664077.0
num_examples: 287
download_size: 1158036990
dataset_size: 1134964291.0
- config_name: Architecture_and_Engineering
features:
- name: id
dtype: string
- name: question
dtype: string
- name: options
dtype: string
- name: explanation
dtype: string
- name: image_1
dtype: image
- name: image_2
dtype: image
- name: image_3
dtype: image
- name: image_4
dtype: image
- name: image_5
dtype: image
- name: image_6
dtype: image
- name: image_7
dtype: image
- name: img_type
dtype: string
- name: answer
dtype: string
- name: topic_difficulty
dtype: string
- name: question_type
dtype: string
- name: subfield
dtype: string
splits:
- name: dev
num_bytes: 137750.0
num_examples: 5
- name: validation
num_bytes: 721378.0
num_examples: 30
- name: test
num_bytes: 16054607.0
num_examples: 551
download_size: 48763955
dataset_size: 16913735.0
- config_name: Art
features:
- name: id
dtype: string
- name: question
dtype: string
- name: options
dtype: string
- name: explanation
dtype: string
- name: image_1
dtype: image
- name: image_2
dtype: image
- name: image_3
dtype: image
- name: image_4
dtype: image
- name: image_5
dtype: image
- name: image_6
dtype: image
- name: image_7
dtype: image
- name: img_type
dtype: string
- name: answer
dtype: string
- name: topic_difficulty
dtype: string
- name: question_type
dtype: string
- name: subfield
dtype: string
splits:
- name: dev
num_bytes: 6241184.0
num_examples: 5
- name: validation
num_bytes: 29934534.0
num_examples: 30
- name: test
num_bytes: 237801390.0
num_examples: 231
download_size: 585798641
dataset_size: 273977108.0
- config_name: Art_Theory
features:
- name: id
dtype: string
- name: question
dtype: string
- name: options
dtype: string
- name: explanation
dtype: string
- name: image_1
dtype: image
- name: image_2
dtype: image
- name: image_3
dtype: image
- name: image_4
dtype: image
- name: image_5
dtype: image
- name: image_6
dtype: image
- name: image_7
dtype: image
- name: img_type
dtype: string
- name: answer
dtype: string
- name: topic_difficulty
dtype: string
- name: question_type
dtype: string
- name: subfield
dtype: string
splits:
- name: dev
num_bytes: 7435106.0
num_examples: 5
- name: validation
num_bytes: 33481558.0
num_examples: 30
- name: test
num_bytes: 553174647.0
num_examples: 429
download_size: 930525695
dataset_size: 594091311.0
- config_name: Basic_Medical_Science
features:
- name: id
dtype: string
- name: question
dtype: string
- name: options
dtype: string
- name: explanation
dtype: string
- name: image_1
dtype: image
- name: image_2
dtype: image
- name: image_3
dtype: image
- name: image_4
dtype: image
- name: image_5
dtype: image
- name: image_6
dtype: image
- name: image_7
dtype: image
- name: img_type
dtype: string
- name: answer
dtype: string
- name: topic_difficulty
dtype: string
- name: question_type
dtype: string
- name: subfield
dtype: string
splits:
- name: dev
num_bytes: 814310.0
num_examples: 5
- name: validation
num_bytes: 4125930.0
num_examples: 30
- name: test
num_bytes: 48125891.0
num_examples: 326
download_size: 84666454
dataset_size: 53066131.0
- config_name: Biology
features:
- name: id
dtype: string
- name: question
dtype: string
- name: options
dtype: string
- name: explanation
dtype: string
- name: image_1
dtype: image
- name: image_2
dtype: image
- name: image_3
dtype: image
- name: image_4
dtype: image
- name: image_5
dtype: image
- name: image_6
dtype: image
- name: image_7
dtype: image
- name: img_type
dtype: string
- name: answer
dtype: string
- name: topic_difficulty
dtype: string
- name: question_type
dtype: string
- name: subfield
dtype: string
splits:
- name: dev
num_bytes: 574342.0
num_examples: 5
- name: validation
num_bytes: 8491863.0
num_examples: 30
- name: test
num_bytes: 132966151.0
num_examples: 345
download_size: 410242502
dataset_size: 142032356.0
- config_name: Chemistry
features:
- name: id
dtype: string
- name: question
dtype: string
- name: options
dtype: string
- name: explanation
dtype: string
- name: image_1
dtype: image
- name: image_2
dtype: image
- name: image_3
dtype: image
- name: image_4
dtype: image
- name: image_5
dtype: image
- name: image_6
dtype: image
- name: image_7
dtype: image
- name: img_type
dtype: string
- name: answer
dtype: string
- name: topic_difficulty
dtype: string
- name: question_type
dtype: string
- name: subfield
dtype: string
splits:
- name: dev
num_bytes: 262397.0
num_examples: 5
- name: validation
num_bytes: 1518573.0
num_examples: 30
- name: test
num_bytes: 37219529.0
num_examples: 603
download_size: 108345562
dataset_size: 39000499.0
- config_name: Clinical_Medicine
features:
- name: id
dtype: string
- name: question
dtype: string
- name: options
dtype: string
- name: explanation
dtype: string
- name: image_1
dtype: image
- name: image_2
dtype: image
- name: image_3
dtype: image
- name: image_4
dtype: image
- name: image_5
dtype: image
- name: image_6
dtype: image
- name: image_7
dtype: image
- name: img_type
dtype: string
- name: answer
dtype: string
- name: topic_difficulty
dtype: string
- name: question_type
dtype: string
- name: subfield
dtype: string
splits:
- name: dev
num_bytes: 1467945.0
num_examples: 5
- name: validation
num_bytes: 10882484.0
num_examples: 30
- name: test
num_bytes: 98201863.0
num_examples: 325
download_size: 160611488
dataset_size: 110552292.0
- config_name: Computer_Science
features:
- name: id
dtype: string
- name: question
dtype: string
- name: options
dtype: string
- name: explanation
dtype: string
- name: image_1
dtype: image
- name: image_2
dtype: image
- name: image_3
dtype: image
- name: image_4
dtype: image
- name: image_5
dtype: image
- name: image_6
dtype: image
- name: image_7
dtype: image
- name: img_type
dtype: string
- name: answer
dtype: string
- name: topic_difficulty
dtype: string
- name: question_type
dtype: string
- name: subfield
dtype: string
splits:
- name: dev
num_bytes: 440523.0
num_examples: 5
- name: validation
num_bytes: 2072018.0
num_examples: 30
- name: test
num_bytes: 32047381.0
num_examples: 371
download_size: 55640991
dataset_size: 34559922.0
- config_name: Design
features:
- name: id
dtype: string
- name: question
dtype: string
- name: options
dtype: string
- name: explanation
dtype: string
- name: image_1
dtype: image
- name: image_2
dtype: image
- name: image_3
dtype: image
- name: image_4
dtype: image
- name: image_5
dtype: image
- name: image_6
dtype: image
- name: image_7
dtype: image
- name: img_type
dtype: string
- name: answer
dtype: string
- name: topic_difficulty
dtype: string
- name: question_type
dtype: string
- name: subfield
dtype: string
splits:
- name: dev
num_bytes: 2259873.0
num_examples: 5
- name: validation
num_bytes: 17923120.0
num_examples: 30
- name: test
num_bytes: 77676331.0
num_examples: 169
download_size: 142866617
dataset_size: 97859324.0
- config_name: Diagnostics_and_Laboratory_Medicine
features:
- name: id
dtype: string
- name: question
dtype: string
- name: options
dtype: string
- name: explanation
dtype: string
- name: image_1
dtype: image
- name: image_2
dtype: image
- name: image_3
dtype: image
- name: image_4
dtype: image
- name: image_5
dtype: image
- name: image_6
dtype: image
- name: image_7
dtype: image
- name: img_type
dtype: string
- name: answer
dtype: string
- name: topic_difficulty
dtype: string
- name: question_type
dtype: string
- name: subfield
dtype: string
splits:
- name: dev
num_bytes: 2056117.0
num_examples: 5
- name: validation
num_bytes: 37106233.0
num_examples: 30
- name: test
num_bytes: 157003069.0
num_examples: 162
download_size: 603957093
dataset_size: 196165419.0
- config_name: Economics
features:
- name: id
dtype: string
- name: question
dtype: string
- name: options
dtype: string
- name: explanation
dtype: string
- name: image_1
dtype: image
- name: image_2
dtype: image
- name: image_3
dtype: image
- name: image_4
dtype: image
- name: image_5
dtype: image
- name: image_6
dtype: image
- name: image_7
dtype: image
- name: img_type
dtype: string
- name: answer
dtype: string
- name: topic_difficulty
dtype: string
- name: question_type
dtype: string
- name: subfield
dtype: string
splits:
- name: dev
num_bytes: 171434.0
num_examples: 5
- name: validation
num_bytes: 1487048.0
num_examples: 30
- name: test
num_bytes: 11852300.0
num_examples: 267
download_size: 20777635
dataset_size: 13510782.0
- config_name: Electronics
features:
- name: id
dtype: string
- name: question
dtype: string
- name: options
dtype: string
- name: explanation
dtype: string
- name: image_1
dtype: image
- name: image_2
dtype: image
- name: image_3
dtype: image
- name: image_4
dtype: image
- name: image_5
dtype: image
- name: image_6
dtype: image
- name: image_7
dtype: image
- name: img_type
dtype: string
- name: answer
dtype: string
- name: topic_difficulty
dtype: string
- name: question_type
dtype: string
- name: subfield
dtype: string
splits:
- name: dev
num_bytes: 123632.0
num_examples: 5
- name: validation
num_bytes: 641377.0
num_examples: 30
- name: test
num_bytes: 5717686.0
num_examples: 256
download_size: 11602832
dataset_size: 6482695.0
- config_name: Energy_and_Power
features:
- name: id
dtype: string
- name: question
dtype: string
- name: options
dtype: string
- name: explanation
dtype: string
- name: image_1
dtype: image
- name: image_2
dtype: image
- name: image_3
dtype: image
- name: image_4
dtype: image
- name: image_5
dtype: image
- name: image_6
dtype: image
- name: image_7
dtype: image
- name: img_type
dtype: string
- name: answer
dtype: string
- name: topic_difficulty
dtype: string
- name: question_type
dtype: string
- name: subfield
dtype: string
splits:
- name: dev
num_bytes: 105006.0
num_examples: 5
- name: validation
num_bytes: 1641935.0
num_examples: 30
- name: test
num_bytes: 14748428.0
num_examples: 432
download_size: 35246567
dataset_size: 16495369.0
- config_name: Finance
features:
- name: id
dtype: string
- name: question
dtype: string
- name: options
dtype: string
- name: explanation
dtype: string
- name: image_1
dtype: image
- name: image_2
dtype: image
- name: image_3
dtype: image
- name: image_4
dtype: image
- name: image_5
dtype: image
- name: image_6
dtype: image
- name: image_7
dtype: image
- name: img_type
dtype: string
- name: answer
dtype: string
- name: topic_difficulty
dtype: string
- name: question_type
dtype: string
- name: subfield
dtype: string
splits:
- name: dev
num_bytes: 296124.0
num_examples: 5
- name: validation
num_bytes: 1071060.0
num_examples: 30
- name: test
num_bytes: 12065803.0
num_examples: 355
download_size: 29551521
dataset_size: 13432987.0
- config_name: Geography
features:
- name: id
dtype: string
- name: question
dtype: string
- name: options
dtype: string
- name: explanation
dtype: string
- name: image_1
dtype: image
- name: image_2
dtype: image
- name: image_3
dtype: image
- name: image_4
dtype: image
- name: image_5
dtype: image
- name: image_6
dtype: image
- name: image_7
dtype: image
- name: img_type
dtype: string
- name: answer
dtype: string
- name: topic_difficulty
dtype: string
- name: question_type
dtype: string
- name: subfield
dtype: string
splits:
- name: dev
num_bytes: 1494060.0
num_examples: 5
- name: validation
num_bytes: 6671316.0
num_examples: 30
- name: test
num_bytes: 137218400.0
num_examples: 565
download_size: 374766631
dataset_size: 145383776.0
- config_name: History
features:
- name: id
dtype: string
- name: question
dtype: string
- name: options
dtype: string
- name: explanation
dtype: string
- name: image_1
dtype: image
- name: image_2
dtype: image
- name: image_3
dtype: image
- name: image_4
dtype: image
- name: image_5
dtype: image
- name: image_6
dtype: image
- name: image_7
dtype: image
- name: img_type
dtype: string
- name: answer
dtype: string
- name: topic_difficulty
dtype: string
- name: question_type
dtype: string
- name: subfield
dtype: string
splits:
- name: dev
num_bytes: 1444231.0
num_examples: 5
- name: validation
num_bytes: 8819857.0
num_examples: 30
- name: test
num_bytes: 115228815.0
num_examples: 278
download_size: 232549641
dataset_size: 125492903.0
- config_name: Literature
features:
- name: id
dtype: string
- name: question
dtype: string
- name: options
dtype: string
- name: explanation
dtype: string
- name: image_1
dtype: image
- name: image_2
dtype: image
- name: image_3
dtype: image
- name: image_4
dtype: image
- name: image_5
dtype: image
- name: image_6
dtype: image
- name: image_7
dtype: image
- name: img_type
dtype: string
- name: answer
dtype: string
- name: topic_difficulty
dtype: string
- name: question_type
dtype: string
- name: subfield
dtype: string
splits:
- name: dev
num_bytes: 2451201.0
num_examples: 5
- name: validation
num_bytes: 14241046.0
num_examples: 30
- name: test
num_bytes: 50301541.0
num_examples: 112
download_size: 132145895
dataset_size: 66993788.0
- config_name: Manage
features:
- name: id
dtype: string
- name: question
dtype: string
- name: options
dtype: string
- name: explanation
dtype: string
- name: image_1
dtype: image
- name: image_2
dtype: image
- name: image_3
dtype: image
- name: image_4
dtype: image
- name: image_5
dtype: image
- name: image_6
dtype: image
- name: image_7
dtype: image
- name: img_type
dtype: string
- name: answer
dtype: string
- name: topic_difficulty
dtype: string
- name: question_type
dtype: string
- name: subfield
dtype: string
splits:
- name: dev
num_bytes: 449514.0
num_examples: 5
- name: validation
num_bytes: 3277436.0
num_examples: 30
- name: test
num_bytes: 29963963.0
num_examples: 245
download_size: 51186888
dataset_size: 33690913.0
- config_name: Marketing
features:
- name: id
dtype: string
- name: question
dtype: string
- name: options
dtype: string
- name: explanation
dtype: string
- name: image_1
dtype: image
- name: image_2
dtype: image
- name: image_3
dtype: image
- name: image_4
dtype: image
- name: image_5
dtype: image
- name: image_6
dtype: image
- name: image_7
dtype: image
- name: img_type
dtype: string
- name: answer
dtype: string
- name: topic_difficulty
dtype: string
- name: question_type
dtype: string
- name: subfield
dtype: string
splits:
- name: dev
num_bytes: 116960.0
num_examples: 5
- name: validation
num_bytes: 1472981.0
num_examples: 30
- name: test
num_bytes: 7732976.0
num_examples: 181
download_size: 13146078
dataset_size: 9322917.0
- config_name: Materials
features:
- name: id
dtype: string
- name: question
dtype: string
- name: options
dtype: string
- name: explanation
dtype: string
- name: image_1
dtype: image
- name: image_2
dtype: image
- name: image_3
dtype: image
- name: image_4
dtype: image
- name: image_5
dtype: image
- name: image_6
dtype: image
- name: image_7
dtype: image
- name: img_type
dtype: string
- name: answer
dtype: string
- name: topic_difficulty
dtype: string
- name: question_type
dtype: string
- name: subfield
dtype: string
splits:
- name: dev
num_bytes: 239632.0
num_examples: 5
- name: validation
num_bytes: 2305223.0
num_examples: 30
- name: test
num_bytes: 25256854.0
num_examples: 458
download_size: 105773156
dataset_size: 27801709.0
- config_name: Math
features:
- name: id
dtype: string
- name: question
dtype: string
- name: options
dtype: string
- name: explanation
dtype: string
- name: image_1
dtype: image
- name: image_2
dtype: image
- name: image_3
dtype: image
- name: image_4
dtype: image
- name: image_5
dtype: image
- name: image_6
dtype: image
- name: image_7
dtype: image
- name: img_type
dtype: string
- name: answer
dtype: string
- name: topic_difficulty
dtype: string
- name: question_type
dtype: string
- name: subfield
dtype: string
splits:
- name: dev
num_bytes: 175839.0
num_examples: 5
- name: validation
num_bytes: 1444496.0
num_examples: 30
- name: test
num_bytes: 27701845.0
num_examples: 505
download_size: 174098418
dataset_size: 29322180.0
- config_name: Mechanical_Engineering
features:
- name: id
dtype: string
- name: question
dtype: string
- name: options
dtype: string
- name: explanation
dtype: string
- name: image_1
dtype: image
- name: image_2
dtype: image
- name: image_3
dtype: image
- name: image_4
dtype: image
- name: image_5
dtype: image
- name: image_6
dtype: image
- name: image_7
dtype: image
- name: img_type
dtype: string
- name: answer
dtype: string
- name: topic_difficulty
dtype: string
- name: question_type
dtype: string
- name: subfield
dtype: string
splits:
- name: dev
num_bytes: 152542.0
num_examples: 5
- name: validation
num_bytes: 874988.0
num_examples: 30
- name: test
num_bytes: 15093746.0
num_examples: 429
download_size: 30450114
dataset_size: 16121276.0
- config_name: Music
features:
- name: id
dtype: string
- name: question
dtype: string
- name: options
dtype: string
- name: explanation
dtype: string
- name: image_1
dtype: image
- name: image_2
dtype: image
- name: image_3
dtype: image
- name: image_4
dtype: image
- name: image_5
dtype: image
- name: image_6
dtype: image
- name: image_7
dtype: image
- name: img_type
dtype: string
- name: answer
dtype: string
- name: topic_difficulty
dtype: string
- name: question_type
dtype: string
- name: subfield
dtype: string
splits:
- name: dev
num_bytes: 1417615.0
num_examples: 5
- name: validation
num_bytes: 9359372.0
num_examples: 30
- name: test
num_bytes: 134096770.0
num_examples: 334
download_size: 174725052
dataset_size: 144873757.0
- config_name: Pharmacy
features:
- name: id
dtype: string
- name: question
dtype: string
- name: options
dtype: string
- name: explanation
dtype: string
- name: image_1
dtype: image
- name: image_2
dtype: image
- name: image_3
dtype: image
- name: image_4
dtype: image
- name: image_5
dtype: image
- name: image_6
dtype: image
- name: image_7
dtype: image
- name: img_type
dtype: string
- name: answer
dtype: string
- name: topic_difficulty
dtype: string
- name: question_type
dtype: string
- name: subfield
dtype: string
splits:
- name: dev
num_bytes: 207924.0
num_examples: 5
- name: validation
num_bytes: 1656342.0
num_examples: 30
- name: test
num_bytes: 31866248.0
num_examples: 430
download_size: 62721263
dataset_size: 33730514.0
- config_name: Physics
features:
- name: id
dtype: string
- name: question
dtype: string
- name: options
dtype: string
- name: explanation
dtype: string
- name: image_1
dtype: image
- name: image_2
dtype: image
- name: image_3
dtype: image
- name: image_4
dtype: image
- name: image_5
dtype: image
- name: image_6
dtype: image
- name: image_7
dtype: image
- name: img_type
dtype: string
- name: answer
dtype: string
- name: topic_difficulty
dtype: string
- name: question_type
dtype: string
- name: subfield
dtype: string
splits:
- name: dev
num_bytes: 233734.0
num_examples: 5
- name: validation
num_bytes: 1114130.0
num_examples: 30
- name: test
num_bytes: 15905705.0
num_examples: 408
download_size: 35238571
dataset_size: 17253569.0
- config_name: Psychology
features:
- name: id
dtype: string
- name: question
dtype: string
- name: options
dtype: string
- name: explanation
dtype: string
- name: image_1
dtype: image
- name: image_2
dtype: image
- name: image_3
dtype: image
- name: image_4
dtype: image
- name: image_5
dtype: image
- name: image_6
dtype: image
- name: image_7
dtype: image
- name: img_type
dtype: string
- name: answer
dtype: string
- name: topic_difficulty
dtype: string
- name: question_type
dtype: string
- name: subfield
dtype: string
splits:
- name: dev
num_bytes: 600864.0
num_examples: 5
- name: validation
num_bytes: 4403886.0
num_examples: 30
- name: test
num_bytes: 53813915.0
num_examples: 305
download_size: 102466671
dataset_size: 58818665.0
- config_name: Public_Health
features:
- name: id
dtype: string
- name: question
dtype: string
- name: options
dtype: string
- name: explanation
dtype: string
- name: image_1
dtype: image
- name: image_2
dtype: image
- name: image_3
dtype: image
- name: image_4
dtype: image
- name: image_5
dtype: image
- name: image_6
dtype: image
- name: image_7
dtype: image
- name: img_type
dtype: string
- name: answer
dtype: string
- name: topic_difficulty
dtype: string
- name: question_type
dtype: string
- name: subfield
dtype: string
splits:
- name: dev
num_bytes: 234781.0
num_examples: 5
- name: validation
num_bytes: 1508761.0
num_examples: 30
- name: test
num_bytes: 32150088.0
num_examples: 509
download_size: 48231609
dataset_size: 33893630.0
- config_name: Sociology
features:
- name: id
dtype: string
- name: question
dtype: string
- name: options
dtype: string
- name: explanation
dtype: string
- name: image_1
dtype: image
- name: image_2
dtype: image
- name: image_3
dtype: image
- name: image_4
dtype: image
- name: image_5
dtype: image
- name: image_6
dtype: image
- name: image_7
dtype: image
- name: img_type
dtype: string
- name: answer
dtype: string
- name: topic_difficulty
dtype: string
- name: question_type
dtype: string
- name: subfield
dtype: string
splits:
- name: dev
num_bytes: 3769220.0
num_examples: 5
- name: validation
num_bytes: 18455336.0
num_examples: 30
- name: test
num_bytes: 144301123.0
num_examples: 252
download_size: 310313826
dataset_size: 166525679.0
configs:
- config_name: Accounting
data_files:
- split: dev
path: Accounting/dev-*
- split: validation
path: Accounting/validation-*
- split: test
path: Accounting/test-*
- config_name: Agriculture
data_files:
- split: dev
path: Agriculture/dev-*
- split: validation
path: Agriculture/validation-*
- split: test
path: Agriculture/test-*
- config_name: Architecture_and_Engineering
data_files:
- split: dev
path: Architecture_and_Engineering/dev-*
- split: validation
path: Architecture_and_Engineering/validation-*
- split: test
path: Architecture_and_Engineering/test-*
- config_name: Art
data_files:
- split: dev
path: Art/dev-*
- split: validation
path: Art/validation-*
- split: test
path: Art/test-*
- config_name: Art_Theory
data_files:
- split: dev
path: Art_Theory/dev-*
- split: validation
path: Art_Theory/validation-*
- split: test
path: Art_Theory/test-*
- config_name: Basic_Medical_Science
data_files:
- split: dev
path: Basic_Medical_Science/dev-*
- split: validation
path: Basic_Medical_Science/validation-*
- split: test
path: Basic_Medical_Science/test-*
- config_name: Biology
data_files:
- split: dev
path: Biology/dev-*
- split: validation
path: Biology/validation-*
- split: test
path: Biology/test-*
- config_name: Chemistry
data_files:
- split: dev
path: Chemistry/dev-*
- split: validation
path: Chemistry/validation-*
- split: test
path: Chemistry/test-*
- config_name: Clinical_Medicine
data_files:
- split: dev
path: Clinical_Medicine/dev-*
- split: validation
path: Clinical_Medicine/validation-*
- split: test
path: Clinical_Medicine/test-*
- config_name: Computer_Science
data_files:
- split: dev
path: Computer_Science/dev-*
- split: validation
path: Computer_Science/validation-*
- split: test
path: Computer_Science/test-*
- config_name: Design
data_files:
- split: dev
path: Design/dev-*
- split: validation
path: Design/validation-*
- split: test
path: Design/test-*
- config_name: Diagnostics_and_Laboratory_Medicine
data_files:
- split: dev
path: Diagnostics_and_Laboratory_Medicine/dev-*
- split: validation
path: Diagnostics_and_Laboratory_Medicine/validation-*
- split: test
path: Diagnostics_and_Laboratory_Medicine/test-*
- config_name: Economics
data_files:
- split: dev
path: Economics/dev-*
- split: validation
path: Economics/validation-*
- split: test
path: Economics/test-*
- config_name: Electronics
data_files:
- split: dev
path: Electronics/dev-*
- split: validation
path: Electronics/validation-*
- split: test
path: Electronics/test-*
- config_name: Energy_and_Power
data_files:
- split: dev
path: Energy_and_Power/dev-*
- split: validation
path: Energy_and_Power/validation-*
- split: test
path: Energy_and_Power/test-*
- config_name: Finance
data_files:
- split: dev
path: Finance/dev-*
- split: validation
path: Finance/validation-*
- split: test
path: Finance/test-*
- config_name: Geography
data_files:
- split: dev
path: Geography/dev-*
- split: validation
path: Geography/validation-*
- split: test
path: Geography/test-*
- config_name: History
data_files:
- split: dev
path: History/dev-*
- split: validation
path: History/validation-*
- split: test
path: History/test-*
- config_name: Literature
data_files:
- split: dev
path: Literature/dev-*
- split: validation
path: Literature/validation-*
- split: test
path: Literature/test-*
- config_name: Manage
data_files:
- split: dev
path: Manage/dev-*
- split: validation
path: Manage/validation-*
- split: test
path: Manage/test-*
- config_name: Marketing
data_files:
- split: dev
path: Marketing/dev-*
- split: validation
path: Marketing/validation-*
- split: test
path: Marketing/test-*
- config_name: Materials
data_files:
- split: dev
path: Materials/dev-*
- split: validation
path: Materials/validation-*
- split: test
path: Materials/test-*
- config_name: Math
data_files:
- split: dev
path: Math/dev-*
- split: validation
path: Math/validation-*
- split: test
path: Math/test-*
- config_name: Mechanical_Engineering
data_files:
- split: dev
path: Mechanical_Engineering/dev-*
- split: validation
path: Mechanical_Engineering/validation-*
- split: test
path: Mechanical_Engineering/test-*
- config_name: Music
data_files:
- split: dev
path: Music/dev-*
- split: validation
path: Music/validation-*
- split: test
path: Music/test-*
- config_name: Pharmacy
data_files:
- split: dev
path: Pharmacy/dev-*
- split: validation
path: Pharmacy/validation-*
- split: test
path: Pharmacy/test-*
- config_name: Physics
data_files:
- split: dev
path: Physics/dev-*
- split: validation
path: Physics/validation-*
- split: test
path: Physics/test-*
- config_name: Psychology
data_files:
- split: dev
path: Psychology/dev-*
- split: validation
path: Psychology/validation-*
- split: test
path: Psychology/test-*
- config_name: Public_Health
data_files:
- split: dev
path: Public_Health/dev-*
- split: validation
path: Public_Health/validation-*
- split: test
path: Public_Health/test-*
- config_name: Sociology
data_files:
- split: dev
path: Sociology/dev-*
- split: validation
path: Sociology/validation-*
- split: test
path: Sociology/test-*
tags:
- biology
- medical
- finance
- chemistry
- music
- art
- art_theory
- design
- music
- business
- accounting
- economics
- finance
- manage
- marketing
- health
- medicine
- basic_medical_science
- clinical
- pharmacy
- public_health
- humanities
- social_science
- history
- literature
- sociology
- psychology
- science
- biology
- chemistry
- geography
- math
- physics
- engineering
- agriculture
- architecture
- computer_science
- electronics
- energy_and_power
- materials
- mechanical_engineering
---
# MMMU (A Massive Multi-discipline Multimodal Understanding and Reasoning Benchmark for Expert AGI)
[**🌐 Homepage**](https://mmmu-benchmark.github.io/) | [**🏆 Leaderboard**](https://mmmu-benchmark.github.io/#leaderboard) | [**🤗 Dataset**](https://huggingface.co/datasets/MMMU/MMMU/) | [**🤗 Paper**](https://huggingface.co/papers/2311.16502) | [**📖 arXiv**](https://arxiv.org/abs/2311.16502) | [**GitHub**](https://github.com/MMMU-Benchmark/MMMU)
## 🔔News
- **🛠️[2024-05-30]: Fixed duplicate option issues in Materials dataset items (validation_Materials_25; test_Materials_17, 242) and content error in validation_Materials_25.**
- **🛠️[2024-04-30]: Fixed missing "-" or "^" signs in Math dataset items (dev_Math_2, validation_Math_11, 12, 16; test_Math_8, 23, 43, 113, 164, 223, 236, 287, 329, 402, 498) and corrected option errors in validation_Math_2. If you encounter any issues with the dataset, please contact us promptly!**
- **🚀[2024-01-31]: We added Human Expert performance on the [Leaderboard](https://mmmu-benchmark.github.io/#leaderboard)!🌟**
- **🔥[2023-12-04]: Our evaluation server for test set is now availble on [EvalAI](https://eval.ai/web/challenges/challenge-page/2179/overview). We welcome all submissions and look forward to your participation! 😆**
## Dataset Details
### Dataset Description
We introduce MMMU: a new benchmark designed to evaluate multimodal models on massive multi-discipline tasks demanding college-level subject knowledge and deliberate reasoning. MMMU includes **11.5K meticulously collected multimodal questions** from college exams, quizzes, and textbooks, covering six core disciplines: Art & Design, Business, Science, Health & Medicine, Humanities & Social Science, and Tech & Engineering. These questions span **30 subjects** and **183 subfields**, comprising **30 highly heterogeneous image types**, such as charts, diagrams, maps, tables, music sheets, and chemical structures. We believe MMMU will stimulate the community to build next-generation multimodal foundation models towards expert artificial general intelligence (AGI).
🎯 **We have released a full set comprising 150 development samples and 900 validation samples. We have released 10,500 test questions without their answers.**
The development set is used for few-shot/in-context learning, and the validation set is used for debugging models, selecting hyperparameters, or quick evaluations. The answers and explanations for the test set questions are withheld. You can submit your model's predictions for the **test set** on **[EvalAI](https://eval.ai/web/challenges/challenge-page/2179/overview)**.

### Dataset Creation
MMMU was created to challenge multimodal models with tasks that demand college-level subject knowledge and deliberate reasoning, pushing the boundaries of what these models can achieve in terms of expert-level perception and reasoning.
The data for the MMMU dataset was manually collected by a team of college students from various disciplines, using online sources, textbooks, and lecture materials.
- **Content:** The dataset contains 11.5K college-level problems across six broad disciplines (Art & Design, Business, Science, Health & Medicine, Humanities & Social Science, Tech & Engineering) and 30 college subjects.
- **Image Types:** The dataset includes 30 highly heterogeneous image types, such as charts, diagrams, maps, tables, music sheets, and chemical structures, interleaved with text.

## 🏆 Mini-Leaderboard
We show a mini-leaderboard here and please find more information in our paper or [**homepage**](https://mmmu-benchmark.github.io/).
| Model | Val (900) | Test (10.5K) |
|--------------------------------|:---------:|:------------:|
| Expert (Best) | 88.6 | - |
| Expert (Medium) | 82.6 | - |
| Expert (Worst) | 76.2 | - |
| GPT-4o* | **69.1** | - |
| Gemini 1.5 Pro* | 62.2 | - |
| InternVL2-Pro* | 62.0 | **55.7** |
| Gemini 1.0 Ultra* | 59.4 | - |
| Claude 3 Opus* | 59.4 | - |
| GPT-4V(ision) (Playground) | 56.8 | **55.7** |
| Reka Core* | 56.3 | - |
| Gemini 1.5 Flash* | 56.1 | - |
| SenseChat-Vision-0423-Preview* | 54.6 | 50.3 |
| Reka Flash* | 53.3 | - |
| Claude 3 Sonnet* | 53.1 | - |
| HPT Pro* | 52.0 | - |
| VILA1.5* | 51.9 | 46.9 |
| Qwen-VL-MAX* | 51.4 | 46.8 |
| InternVL-Chat-V1.2* | 51.6 | 46.2 |
| Skywork-VL* | 51.4 | 46.2 |
| LLaVA-1.6-34B* | 51.1 | 44.7 |
| Claude 3 Haiku* | 50.2 | - |
| Adept Fuyu-Heavy* | 48.3 | - |
| Gemini 1.0 Pro* | 47.9 | - |
| Marco-VL-Plus* | 46.2 | 44.3 |
| Yi-VL-34B* | 45.9 | 41.6 |
| Qwen-VL-PLUS* | 45.2 | 40.8 |
| HPT Air* | 44.0 | - |
| Reka Edge* | 42.8 | - |
| Marco-VL* | 41.2 | 40.4 |
| OmniLMM-12B* | 41.1 | 40.4 |
| Bunny-8B* | 43.3 | 39.0 |
| Bunny-4B* | 41.4 | 38.4 |
| Weitu-VL-1.0-15B* | - | 38.4 |
| InternLM-XComposer2-VL* | 43.0 | 38.2 |
| Yi-VL-6B* | 39.1 | 37.8 |
| InfiMM-Zephyr-7B* | 39.4 | 35.5 |
| InternVL-Chat-V1.1* | 39.1 | 35.3 |
| Math-LLaVA-13B* | 38.3 | 34.6 |
| SVIT* | 38.0 | 34.1 |
| MiniCPM-V* | 37.2 | 34.1 |
| MiniCPM-V-2* | 37.1 | - |
| Emu2-Chat* | 36.3 | 34.1 |
| BLIP-2 FLAN-T5-XXL | 35.4 | 34.0 |
| InstructBLIP-T5-XXL | 35.7 | 33.8 |
| LLaVA-1.5-13B | 36.4 | 33.6 |
| Bunny-3B* | 38.2 | 33.0 |
| Qwen-VL-7B-Chat | 35.9 | 32.9 |
| SPHINX* | 32.9 | 32.9 |
| mPLUG-OWL2* | 32.7 | 32.1 |
| BLIP-2 FLAN-T5-XL | 34.4 | 31.0 |
| InstructBLIP-T5-XL | 32.9 | 30.6 |
| Gemini Nano2* | 32.6 | - |
| CogVLM | 32.1 | 30.1 |
| Otter | 32.2 | 29.1 |
| LLaMA-Adapter2-7B | 29.8 | 27.7 |
| MiniGPT4-Vicuna-13B | 26.8 | 27.6 |
| Adept Fuyu-8B | 27.9 | 27.4 |
| Kosmos2 | 24.4 | 26.6 |
| OpenFlamingo2-9B | 28.7 | 26.3 |
| Frequent Choice | 22.1 | 23.9 |
| Random Choice | 26.8 | 25.8 |
*: results provided by the authors.
## Limitations
Despite its comprehensive nature, MMMU, like any benchmark, is not without limitations. The manual curation process, albeit thorough, may carry biases.
And the focus on college-level subjects might not fully be a sufficient test for Expert AGI.
However, we believe it should be necessary for an Expert AGI to achieve strong performance on MMMU to demonstrate their broad and deep subject knowledge as well as expert-level understanding and reasoning capabilities.
In future work, we plan to incorporate human evaluations into MMMU. This will provide a more grounded comparison between model capabilities and expert performance, shedding light on the proximity of current AI systems to achieving Expert AGI.
## Disclaimers
The guidelines for the annotators emphasized strict compliance with copyright and licensing rules from the initial data source, specifically avoiding materials from websites that forbid copying and redistribution.
Should you encounter any data samples potentially breaching the copyright or licensing regulations of any site, we encourage you to notify us. Upon verification, such samples will be promptly removed.
## Contact
- Xiang Yue: [email protected]
- Yu Su: [email protected]
- Wenhu Chen: [email protected]
## Citation
**BibTeX:**
```bibtex
@inproceedings{yue2023mmmu,
title={MMMU: A Massive Multi-discipline Multimodal Understanding and Reasoning Benchmark for Expert AGI},
author={Xiang Yue and Yuansheng Ni and Kai Zhang and Tianyu Zheng and Ruoqi Liu and Ge Zhang and Samuel Stevens and Dongfu Jiang and Weiming Ren and Yuxuan Sun and Cong Wei and Botao Yu and Ruibin Yuan and Renliang Sun and Ming Yin and Boyuan Zheng and Zhenzhu Yang and Yibo Liu and Wenhao Huang and Huan Sun and Yu Su and Wenhu Chen},
booktitle={Proceedings of CVPR},
year={2024},
}
``` |
sggetao/PwC | sggetao | "2024-03-30T20:03:44Z" | 17,618 | 3 | [
"license:apache-2.0",
"size_categories:100K<n<1M",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-03-30T20:01:08Z" | ---
license: apache-2.0
---
|
alvations/c4p0-x1-en-es | alvations | "2024-03-24T03:55:08Z" | 17,528 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-03-23T10:03:20Z" | ---
dataset_info:
features:
- name: source
dtype: string
- name: target
dtype: string
- name: target_backto_source
dtype: string
- name: raw_target
list:
- name: generated_text
dtype: string
- name: raw_target_backto_source
list:
- name: generated_text
dtype: string
- name: prompt
dtype: string
- name: reverse_prompt
dtype: string
- name: source_langid
dtype: string
- name: target_langid
dtype: string
- name: target_backto_source_langid
dtype: string
- name: doc_id
dtype: int64
- name: sent_id
dtype: int64
- name: timestamp
dtype: string
- name: url
dtype: string
- name: doc_hash
dtype: string
splits:
- name: train
num_bytes: 2308
num_examples: 2
download_size: 15489
dataset_size: 2308
configs:
- config_name: default
data_files:
- split: train
path: 1f24f16745a166b0/train-*
---
|
weizhiwang/Open-Qwen2VL-Data | weizhiwang | "2025-04-03T02:23:45Z" | 17,528 | 9 | [
"task_categories:image-text-to-text",
"size_categories:10M<n<100M",
"format:parquet",
"modality:image",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"arxiv:2504.00595",
"region:us"
] | [
"image-text-to-text"
] | "2024-09-27T06:48:06Z" | ---
task_categories:
- image-text-to-text
---
This repository contains the data for [Open-Qwen2VL: Compute-Efficient Pre-Training of Fully-Open Multimodal LLMs on Academic Resources](https://huggingface.co/papers/2504.00595).
Project page: https://victorwz.github.io/Open-Qwen2VL
Code: https://github.com/Victorwz/Open-Qwen2VL |
lmms-lab/GQA | lmms-lab | "2024-03-08T05:02:22Z" | 17,310 | 14 | [
"license:mit",
"size_categories:10M<n<100M",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2023-12-26T13:11:16Z" | ---
license: mit
dataset_info:
- config_name: challenge_all_images
features:
- name: id
dtype: string
- name: image
dtype: image
splits:
- name: challenge
num_bytes: 261636425.25
num_examples: 1590
download_size: 261271928
dataset_size: 261636425.25
- config_name: challenge_all_instructions
features:
- name: id
dtype: string
- name: imageId
dtype: string
- name: question
dtype: string
- name: isBalanced
dtype: bool
splits:
- name: challenge
num_bytes: 50797705
num_examples: 713449
download_size: 19869828
dataset_size: 50797705
- config_name: challenge_balanced_images
features:
- name: id
dtype: string
- name: image
dtype: image
splits:
- name: challenge
num_bytes: 261636425.25
num_examples: 1590
download_size: 261333538
dataset_size: 261636425.25
- config_name: challenge_balanced_instructions
features:
- name: id
dtype: string
- name: imageId
dtype: string
- name: question
dtype: string
- name: isBalanced
dtype: bool
splits:
- name: challenge
num_bytes: 3523973
num_examples: 50726
download_size: 1787024
dataset_size: 3523973
- config_name: submission_all_images
features:
- name: id
dtype: string
- name: image
dtype: image
splits:
- name: submission
num_bytes: 2314978438.875
num_examples: 15545
download_size: 2309217874
dataset_size: 2314978438.875
- config_name: submission_all_instructions
features:
- name: id
dtype: string
- name: imageId
dtype: string
- name: question
dtype: string
- name: isBalanced
dtype: bool
splits:
- name: submission
num_bytes: 298875520
num_examples: 4237524
download_size: 121458425
dataset_size: 298875520
- config_name: test_all_images
features:
- name: id
dtype: string
- name: image
dtype: image
splits:
- name: test
num_bytes: 492571840.875
num_examples: 2993
download_size: 491611526
dataset_size: 492571840.875
- config_name: test_all_instructions
features:
- name: id
dtype: string
- name: imageId
dtype: string
- name: question
dtype: string
- name: isBalanced
dtype: bool
splits:
- name: test
num_bytes: 95588974
num_examples: 1340048
download_size: 39561711
dataset_size: 95588974
- config_name: test_balanced_images
features:
- name: id
dtype: string
- name: image
dtype: image
splits:
- name: test
num_bytes: 491210370.625
num_examples: 2987
download_size: 490293506
dataset_size: 491210370.625
- config_name: test_balanced_instructions
features:
- name: id
dtype: string
- name: imageId
dtype: string
- name: question
dtype: string
- name: isBalanced
dtype: bool
splits:
- name: test
num_bytes: 6622775
num_examples: 95336
download_size: 3401070
dataset_size: 6622775
- config_name: testdev_all_images
features:
- name: id
dtype: string
- name: image
dtype: image
splits:
- name: testdev
num_bytes: 65779269.0
num_examples: 398
download_size: 65670255
dataset_size: 65779269.0
- config_name: testdev_all_instructions
features:
- name: id
dtype: string
- name: imageId
dtype: string
- name: question
dtype: string
- name: answer
dtype: string
- name: fullAnswer
dtype: string
- name: isBalanced
dtype: bool
- name: groups
struct:
- name: global
dtype: string
- name: local
dtype: string
- name: entailed
dtype: string
- name: equivalent
dtype: string
- name: types
struct:
- name: structural
dtype: string
- name: semantic
dtype: string
- name: detailed
dtype: string
- name: annotations
sequence:
- name: question
struct:
- name: objectId
dtype: string
- name: value
dtype: string
- name: answer
struct:
- name: objectId
dtype: string
- name: value
dtype: string
- name: fullAnswer
struct:
- name: objectId
dtype: string
- name: value
dtype: string
- name: semantic
list:
- name: operation
dtype: string
- name: argument
dtype: string
- name: dependencies
sequence: int32
- name: semanticStr
dtype: string
splits:
- name: testdev
num_bytes: 86970760
num_examples: 172174
download_size: 23385535
dataset_size: 86970760
- config_name: testdev_balanced_images
features:
- name: id
dtype: string
- name: image
dtype: image
splits:
- name: testdev
num_bytes: 65779269.0
num_examples: 398
download_size: 65647745
dataset_size: 65779269.0
- config_name: testdev_balanced_instructions
features:
- name: id
dtype: string
- name: imageId
dtype: string
- name: question
dtype: string
- name: answer
dtype: string
- name: fullAnswer
dtype: string
- name: isBalanced
dtype: bool
- name: groups
struct:
- name: global
dtype: string
- name: local
dtype: string
- name: entailed
dtype: string
- name: equivalent
dtype: string
- name: types
struct:
- name: structural
dtype: string
- name: semantic
dtype: string
- name: detailed
dtype: string
- name: annotations
sequence:
- name: question
struct:
- name: objectId
dtype: string
- name: value
dtype: string
- name: answer
struct:
- name: objectId
dtype: string
- name: value
dtype: string
- name: fullAnswer
struct:
- name: objectId
dtype: string
- name: value
dtype: string
- name: semantic
list:
- name: operation
dtype: string
- name: argument
dtype: string
- name: dependencies
sequence: int32
- name: semanticStr
dtype: string
splits:
- name: testdev
num_bytes: 6113469
num_examples: 12578
download_size: 2090335
dataset_size: 6113469
- config_name: train_all_images
features:
- name: id
dtype: string
- name: image
dtype: image
splits:
- name: train
num_bytes: 10509758457.0
num_examples: 74256
download_size: 10480239090
dataset_size: 10509758457.0
- config_name: train_all_instructions
features:
- name: id
dtype: string
- name: imageId
dtype: string
- name: question
dtype: string
- name: answer
dtype: string
- name: fullAnswer
dtype: string
- name: isBalanced
dtype: bool
- name: groups
struct:
- name: global
dtype: string
- name: local
dtype: string
- name: entailed
dtype: string
- name: equivalent
dtype: string
- name: types
struct:
- name: structural
dtype: string
- name: semantic
dtype: string
- name: detailed
dtype: string
- name: annotations
sequence:
- name: question
struct:
- name: objectId
dtype: string
- name: value
dtype: string
- name: answer
struct:
- name: objectId
dtype: string
- name: value
dtype: string
- name: fullAnswer
struct:
- name: objectId
dtype: string
- name: value
dtype: string
- name: semantic
list:
- name: operation
dtype: string
- name: argument
dtype: string
- name: dependencies
sequence: int32
- name: semanticStr
dtype: string
splits:
- name: train
num_bytes: 6891129609
num_examples: 14305356
download_size: 1874173198
dataset_size: 6891129609
- config_name: train_balanced_images
features:
- name: id
dtype: string
- name: image
dtype: image
splits:
- name: train
num_bytes: 10200292415.5
num_examples: 72140
download_size: 10171627271
dataset_size: 10200292415.5
- config_name: train_balanced_instructions
features:
- name: id
dtype: string
- name: imageId
dtype: string
- name: question
dtype: string
- name: answer
dtype: string
- name: fullAnswer
dtype: string
- name: isBalanced
dtype: bool
- name: groups
struct:
- name: global
dtype: string
- name: local
dtype: string
- name: entailed
dtype: string
- name: equivalent
dtype: string
- name: types
struct:
- name: structural
dtype: string
- name: semantic
dtype: string
- name: detailed
dtype: string
- name: annotations
sequence:
- name: question
struct:
- name: objectId
dtype: string
- name: value
dtype: string
- name: answer
struct:
- name: objectId
dtype: string
- name: value
dtype: string
- name: fullAnswer
struct:
- name: objectId
dtype: string
- name: value
dtype: string
- name: semantic
list:
- name: operation
dtype: string
- name: argument
dtype: string
- name: dependencies
sequence: int32
- name: semanticStr
dtype: string
splits:
- name: train
num_bytes: 460429581
num_examples: 943000
download_size: 183979778
dataset_size: 460429581
- config_name: val_all_images
features:
- name: id
dtype: string
- name: image
dtype: image
splits:
- name: val
num_bytes: 1494990904.5
num_examples: 10564
download_size: 1490744689
dataset_size: 1494990904.5
- config_name: val_all_instructions
features:
- name: id
dtype: string
- name: imageId
dtype: string
- name: question
dtype: string
- name: answer
dtype: string
- name: fullAnswer
dtype: string
- name: isBalanced
dtype: bool
- name: groups
struct:
- name: global
dtype: string
- name: local
dtype: string
- name: entailed
dtype: string
- name: equivalent
dtype: string
- name: types
struct:
- name: structural
dtype: string
- name: semantic
dtype: string
- name: detailed
dtype: string
- name: annotations
sequence:
- name: question
struct:
- name: objectId
dtype: string
- name: value
dtype: string
- name: answer
struct:
- name: objectId
dtype: string
- name: value
dtype: string
- name: fullAnswer
struct:
- name: objectId
dtype: string
- name: value
dtype: string
- name: semantic
list:
- name: operation
dtype: string
- name: argument
dtype: string
- name: dependencies
sequence: int32
- name: semanticStr
dtype: string
splits:
- name: val
num_bytes: 967338322
num_examples: 2011853
download_size: 266476025
dataset_size: 967338322
- config_name: val_balanced_images
features:
- name: id
dtype: string
- name: image
dtype: image
splits:
- name: val
num_bytes: 1447074448.75
num_examples: 10234
download_size: 1443033919
dataset_size: 1447074448.75
- config_name: val_balanced_instructions
features:
- name: id
dtype: string
- name: imageId
dtype: string
- name: question
dtype: string
- name: answer
dtype: string
- name: fullAnswer
dtype: string
- name: isBalanced
dtype: bool
- name: groups
struct:
- name: global
dtype: string
- name: local
dtype: string
- name: entailed
dtype: string
- name: equivalent
dtype: string
- name: types
struct:
- name: structural
dtype: string
- name: semantic
dtype: string
- name: detailed
dtype: string
- name: annotations
sequence:
- name: question
struct:
- name: objectId
dtype: string
- name: value
dtype: string
- name: answer
struct:
- name: objectId
dtype: string
- name: value
dtype: string
- name: fullAnswer
struct:
- name: objectId
dtype: string
- name: value
dtype: string
- name: semantic
list:
- name: operation
dtype: string
- name: argument
dtype: string
- name: dependencies
sequence: int32
- name: semanticStr
dtype: string
splits:
- name: val
num_bytes: 64498952
num_examples: 132062
download_size: 25794272
dataset_size: 64498952
configs:
- config_name: challenge_all_images
data_files:
- split: challenge
path: challenge_all_images/challenge-*
- config_name: challenge_all_instructions
data_files:
- split: challenge
path: challenge_all_instructions/challenge-*
- config_name: challenge_balanced_images
data_files:
- split: challenge
path: challenge_balanced_images/challenge-*
- config_name: challenge_balanced_instructions
data_files:
- split: challenge
path: challenge_balanced_instructions/challenge-*
- config_name: submission_all_images
data_files:
- split: submission
path: submission_all_images/submission-*
- config_name: submission_all_instructions
data_files:
- split: submission
path: submission_all_instructions/submission-*
- config_name: test_all_images
data_files:
- split: test
path: test_all_images/test-*
- config_name: test_all_instructions
data_files:
- split: test
path: test_all_instructions/test-*
- config_name: test_balanced_images
data_files:
- split: test
path: test_balanced_images/test-*
- config_name: test_balanced_instructions
data_files:
- split: test
path: test_balanced_instructions/test-*
- config_name: testdev_all_images
data_files:
- split: testdev
path: testdev_all_images/testdev-*
- config_name: testdev_all_instructions
data_files:
- split: testdev
path: testdev_all_instructions/testdev-*
- config_name: testdev_balanced_images
data_files:
- split: testdev
path: testdev_balanced_images/testdev-*
- config_name: testdev_balanced_instructions
data_files:
- split: testdev
path: testdev_balanced_instructions/testdev-*
- config_name: train_all_images
data_files:
- split: train
path: train_all_images/train-*
- config_name: train_all_instructions
data_files:
- split: train
path: train_all_instructions/train-*
- config_name: train_balanced_images
data_files:
- split: train
path: train_balanced_images/train-*
- config_name: train_balanced_instructions
data_files:
- split: train
path: train_balanced_instructions/train-*
- config_name: val_all_images
data_files:
- split: val
path: val_all_images/val-*
- config_name: val_all_instructions
data_files:
- split: val
path: val_all_instructions/val-*
- config_name: val_balanced_images
data_files:
- split: val
path: val_balanced_images/val-*
- config_name: val_balanced_instructions
data_files:
- split: val
path: val_balanced_instructions/val-*
---
<p align="center" width="100%">
<img src="https://i.postimg.cc/g0QRgMVv/WX20240228-113337-2x.png" width="100%" height="80%">
</p>
# Large-scale Multi-modality Models Evaluation Suite
> Accelerating the development of large-scale multi-modality models (LMMs) with `lmms-eval`
🏠 [Homepage](https://lmms-lab.github.io/) | 📚 [Documentation](docs/README.md) | 🤗 [Huggingface Datasets](https://huggingface.co/lmms-lab)
# This Dataset
This is a formatted version of [GQA](hhttps://cs.stanford.edu/people/dorarad/gqa/about.html). It is used in our `lmms-eval` pipeline to allow for one-click evaluations of large multi-modality models.
```
@inproceedings{hudson2019gqa,
title={Gqa: A new dataset for real-world visual reasoning and compositional question answering},
author={Hudson, Drew A and Manning, Christopher D},
booktitle={Proceedings of the IEEE/CVF conference on computer vision and pattern recognition},
pages={6700--6709},
year={2019}
}
``` |
facebook/xnli | facebook | "2024-01-05T08:30:52Z" | 17,244 | 56 | [
"language:ar",
"language:bg",
"language:de",
"language:el",
"language:en",
"language:es",
"language:fr",
"language:hi",
"language:ru",
"language:sw",
"language:th",
"language:tr",
"language:ur",
"language:vi",
"language:zh",
"size_categories:1M<n<10M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2022-03-02T23:29:22Z" | ---
language:
- ar
- bg
- de
- el
- en
- es
- fr
- hi
- ru
- sw
- th
- tr
- ur
- vi
- zh
paperswithcode_id: xnli
pretty_name: Cross-lingual Natural Language Inference
dataset_info:
- config_name: all_languages
features:
- name: premise
dtype:
translation:
languages:
- ar
- bg
- de
- el
- en
- es
- fr
- hi
- ru
- sw
- th
- tr
- ur
- vi
- zh
- name: hypothesis
dtype:
translation_variable_languages:
languages:
- ar
- bg
- de
- el
- en
- es
- fr
- hi
- ru
- sw
- th
- tr
- ur
- vi
- zh
num_languages: 15
- name: label
dtype:
class_label:
names:
'0': entailment
'1': neutral
'2': contradiction
splits:
- name: train
num_bytes: 1581471691
num_examples: 392702
- name: test
num_bytes: 19387432
num_examples: 5010
- name: validation
num_bytes: 9566179
num_examples: 2490
download_size: 963942271
dataset_size: 1610425302
- config_name: ar
features:
- name: premise
dtype: string
- name: hypothesis
dtype: string
- name: label
dtype:
class_label:
names:
'0': entailment
'1': neutral
'2': contradiction
splits:
- name: train
num_bytes: 107399614
num_examples: 392702
- name: test
num_bytes: 1294553
num_examples: 5010
- name: validation
num_bytes: 633001
num_examples: 2490
download_size: 59215902
dataset_size: 109327168
- config_name: bg
features:
- name: premise
dtype: string
- name: hypothesis
dtype: string
- name: label
dtype:
class_label:
names:
'0': entailment
'1': neutral
'2': contradiction
splits:
- name: train
num_bytes: 125973225
num_examples: 392702
- name: test
num_bytes: 1573034
num_examples: 5010
- name: validation
num_bytes: 774061
num_examples: 2490
download_size: 66117878
dataset_size: 128320320
- config_name: de
features:
- name: premise
dtype: string
- name: hypothesis
dtype: string
- name: label
dtype:
class_label:
names:
'0': entailment
'1': neutral
'2': contradiction
splits:
- name: train
num_bytes: 84684140
num_examples: 392702
- name: test
num_bytes: 996488
num_examples: 5010
- name: validation
num_bytes: 494604
num_examples: 2490
download_size: 55973883
dataset_size: 86175232
- config_name: el
features:
- name: premise
dtype: string
- name: hypothesis
dtype: string
- name: label
dtype:
class_label:
names:
'0': entailment
'1': neutral
'2': contradiction
splits:
- name: train
num_bytes: 139753358
num_examples: 392702
- name: test
num_bytes: 1704785
num_examples: 5010
- name: validation
num_bytes: 841226
num_examples: 2490
download_size: 74551247
dataset_size: 142299369
- config_name: en
features:
- name: premise
dtype: string
- name: hypothesis
dtype: string
- name: label
dtype:
class_label:
names:
'0': entailment
'1': neutral
'2': contradiction
splits:
- name: train
num_bytes: 74444026
num_examples: 392702
- name: test
num_bytes: 875134
num_examples: 5010
- name: validation
num_bytes: 433463
num_examples: 2490
download_size: 50627367
dataset_size: 75752623
- config_name: es
features:
- name: premise
dtype: string
- name: hypothesis
dtype: string
- name: label
dtype:
class_label:
names:
'0': entailment
'1': neutral
'2': contradiction
splits:
- name: train
num_bytes: 81383284
num_examples: 392702
- name: test
num_bytes: 969813
num_examples: 5010
- name: validation
num_bytes: 478422
num_examples: 2490
download_size: 53677157
dataset_size: 82831519
- config_name: fr
features:
- name: premise
dtype: string
- name: hypothesis
dtype: string
- name: label
dtype:
class_label:
names:
'0': entailment
'1': neutral
'2': contradiction
splits:
- name: train
num_bytes: 85808779
num_examples: 392702
- name: test
num_bytes: 1029239
num_examples: 5010
- name: validation
num_bytes: 510104
num_examples: 2490
download_size: 55968680
dataset_size: 87348122
- config_name: hi
features:
- name: premise
dtype: string
- name: hypothesis
dtype: string
- name: label
dtype:
class_label:
names:
'0': entailment
'1': neutral
'2': contradiction
splits:
- name: train
num_bytes: 170593964
num_examples: 392702
- name: test
num_bytes: 2073073
num_examples: 5010
- name: validation
num_bytes: 1023915
num_examples: 2490
download_size: 70908548
dataset_size: 173690952
- config_name: ru
features:
- name: premise
dtype: string
- name: hypothesis
dtype: string
- name: label
dtype:
class_label:
names:
'0': entailment
'1': neutral
'2': contradiction
splits:
- name: train
num_bytes: 129859615
num_examples: 392702
- name: test
num_bytes: 1603466
num_examples: 5010
- name: validation
num_bytes: 786442
num_examples: 2490
download_size: 70702606
dataset_size: 132249523
- config_name: sw
features:
- name: premise
dtype: string
- name: hypothesis
dtype: string
- name: label
dtype:
class_label:
names:
'0': entailment
'1': neutral
'2': contradiction
splits:
- name: train
num_bytes: 69285725
num_examples: 392702
- name: test
num_bytes: 871651
num_examples: 5010
- name: validation
num_bytes: 429850
num_examples: 2490
download_size: 45564152
dataset_size: 70587226
- config_name: th
features:
- name: premise
dtype: string
- name: hypothesis
dtype: string
- name: label
dtype:
class_label:
names:
'0': entailment
'1': neutral
'2': contradiction
splits:
- name: train
num_bytes: 176062892
num_examples: 392702
- name: test
num_bytes: 2147015
num_examples: 5010
- name: validation
num_bytes: 1061160
num_examples: 2490
download_size: 77222045
dataset_size: 179271067
- config_name: tr
features:
- name: premise
dtype: string
- name: hypothesis
dtype: string
- name: label
dtype:
class_label:
names:
'0': entailment
'1': neutral
'2': contradiction
splits:
- name: train
num_bytes: 71637140
num_examples: 392702
- name: test
num_bytes: 934934
num_examples: 5010
- name: validation
num_bytes: 459308
num_examples: 2490
download_size: 48509680
dataset_size: 73031382
- config_name: ur
features:
- name: premise
dtype: string
- name: hypothesis
dtype: string
- name: label
dtype:
class_label:
names:
'0': entailment
'1': neutral
'2': contradiction
splits:
- name: train
num_bytes: 96441486
num_examples: 392702
- name: test
num_bytes: 1416241
num_examples: 5010
- name: validation
num_bytes: 699952
num_examples: 2490
download_size: 46682785
dataset_size: 98557679
- config_name: vi
features:
- name: premise
dtype: string
- name: hypothesis
dtype: string
- name: label
dtype:
class_label:
names:
'0': entailment
'1': neutral
'2': contradiction
splits:
- name: train
num_bytes: 101417430
num_examples: 392702
- name: test
num_bytes: 1190217
num_examples: 5010
- name: validation
num_bytes: 590680
num_examples: 2490
download_size: 57690058
dataset_size: 103198327
- config_name: zh
features:
- name: premise
dtype: string
- name: hypothesis
dtype: string
- name: label
dtype:
class_label:
names:
'0': entailment
'1': neutral
'2': contradiction
splits:
- name: train
num_bytes: 72224841
num_examples: 392702
- name: test
num_bytes: 777929
num_examples: 5010
- name: validation
num_bytes: 384851
num_examples: 2490
download_size: 48269855
dataset_size: 73387621
configs:
- config_name: all_languages
data_files:
- split: train
path: all_languages/train-*
- split: test
path: all_languages/test-*
- split: validation
path: all_languages/validation-*
- config_name: ar
data_files:
- split: train
path: ar/train-*
- split: test
path: ar/test-*
- split: validation
path: ar/validation-*
- config_name: bg
data_files:
- split: train
path: bg/train-*
- split: test
path: bg/test-*
- split: validation
path: bg/validation-*
- config_name: de
data_files:
- split: train
path: de/train-*
- split: test
path: de/test-*
- split: validation
path: de/validation-*
- config_name: el
data_files:
- split: train
path: el/train-*
- split: test
path: el/test-*
- split: validation
path: el/validation-*
- config_name: en
data_files:
- split: train
path: en/train-*
- split: test
path: en/test-*
- split: validation
path: en/validation-*
- config_name: es
data_files:
- split: train
path: es/train-*
- split: test
path: es/test-*
- split: validation
path: es/validation-*
- config_name: fr
data_files:
- split: train
path: fr/train-*
- split: test
path: fr/test-*
- split: validation
path: fr/validation-*
- config_name: hi
data_files:
- split: train
path: hi/train-*
- split: test
path: hi/test-*
- split: validation
path: hi/validation-*
- config_name: ru
data_files:
- split: train
path: ru/train-*
- split: test
path: ru/test-*
- split: validation
path: ru/validation-*
- config_name: sw
data_files:
- split: train
path: sw/train-*
- split: test
path: sw/test-*
- split: validation
path: sw/validation-*
- config_name: th
data_files:
- split: train
path: th/train-*
- split: test
path: th/test-*
- split: validation
path: th/validation-*
- config_name: tr
data_files:
- split: train
path: tr/train-*
- split: test
path: tr/test-*
- split: validation
path: tr/validation-*
- config_name: ur
data_files:
- split: train
path: ur/train-*
- split: test
path: ur/test-*
- split: validation
path: ur/validation-*
- config_name: vi
data_files:
- split: train
path: vi/train-*
- split: test
path: vi/test-*
- split: validation
path: vi/validation-*
- config_name: zh
data_files:
- split: train
path: zh/train-*
- split: test
path: zh/test-*
- split: validation
path: zh/validation-*
---
# Dataset Card for "xnli"
## Table of Contents
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** [https://www.nyu.edu/projects/bowman/xnli/](https://www.nyu.edu/projects/bowman/xnli/)
- **Repository:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
- **Paper:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
- **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
- **Size of downloaded dataset files:** 7.74 GB
- **Size of the generated dataset:** 3.23 GB
- **Total amount of disk used:** 10.97 GB
### Dataset Summary
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
### Supported Tasks and Leaderboards
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Languages
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
## Dataset Structure
### Data Instances
#### all_languages
- **Size of downloaded dataset files:** 483.96 MB
- **Size of the generated dataset:** 1.61 GB
- **Total amount of disk used:** 2.09 GB
An example of 'train' looks as follows.
```
This example was too long and was cropped:
{
"hypothesis": "{\"language\": [\"ar\", \"bg\", \"de\", \"el\", \"en\", \"es\", \"fr\", \"hi\", \"ru\", \"sw\", \"th\", \"tr\", \"ur\", \"vi\", \"zh\"], \"translation\": [\"احد اع...",
"label": 0,
"premise": "{\"ar\": \"واحدة من رقابنا ستقوم بتنفيذ تعليماتك كلها بكل دقة\", \"bg\": \"един от нашите номера ще ви даде инструкции .\", \"de\": \"Eine ..."
}
```
#### ar
- **Size of downloaded dataset files:** 483.96 MB
- **Size of the generated dataset:** 109.32 MB
- **Total amount of disk used:** 593.29 MB
An example of 'validation' looks as follows.
```
{
"hypothesis": "اتصل بأمه حالما أوصلته حافلة المدرسية.",
"label": 1,
"premise": "وقال، ماما، لقد عدت للمنزل."
}
```
#### bg
- **Size of downloaded dataset files:** 483.96 MB
- **Size of the generated dataset:** 128.32 MB
- **Total amount of disk used:** 612.28 MB
An example of 'train' looks as follows.
```
This example was too long and was cropped:
{
"hypothesis": "\"губиш нещата на следното ниво , ако хората си припомнят .\"...",
"label": 0,
"premise": "\"по време на сезона и предполагам , че на твоето ниво ще ги загубиш на следващото ниво , ако те решат да си припомнят отбора на ..."
}
```
#### de
- **Size of downloaded dataset files:** 483.96 MB
- **Size of the generated dataset:** 86.17 MB
- **Total amount of disk used:** 570.14 MB
An example of 'train' looks as follows.
```
This example was too long and was cropped:
{
"hypothesis": "Man verliert die Dinge auf die folgende Ebene , wenn sich die Leute erinnern .",
"label": 0,
"premise": "\"Du weißt , während der Saison und ich schätze , auf deiner Ebene verlierst du sie auf die nächste Ebene , wenn sie sich entschl..."
}
```
#### el
- **Size of downloaded dataset files:** 483.96 MB
- **Size of the generated dataset:** 142.30 MB
- **Total amount of disk used:** 626.26 MB
An example of 'validation' looks as follows.
```
This example was too long and was cropped:
{
"hypothesis": "\"Τηλεφώνησε στη μαμά του μόλις το σχολικό λεωφορείο τον άφησε.\"...",
"label": 1,
"premise": "Και είπε, Μαμά, έφτασα στο σπίτι."
}
```
### Data Fields
The data fields are the same among all splits.
#### all_languages
- `premise`: a multilingual `string` variable, with possible languages including `ar`, `bg`, `de`, `el`, `en`.
- `hypothesis`: a multilingual `string` variable, with possible languages including `ar`, `bg`, `de`, `el`, `en`.
- `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2).
#### ar
- `premise`: a `string` feature.
- `hypothesis`: a `string` feature.
- `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2).
#### bg
- `premise`: a `string` feature.
- `hypothesis`: a `string` feature.
- `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2).
#### de
- `premise`: a `string` feature.
- `hypothesis`: a `string` feature.
- `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2).
#### el
- `premise`: a `string` feature.
- `hypothesis`: a `string` feature.
- `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2).
### Data Splits
| name |train |validation|test|
|-------------|-----:|---------:|---:|
|all_languages|392702| 2490|5010|
|ar |392702| 2490|5010|
|bg |392702| 2490|5010|
|de |392702| 2490|5010|
|el |392702| 2490|5010|
## Dataset Creation
### Curation Rationale
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
#### Who are the source language producers?
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Annotations
#### Annotation process
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
#### Who are the annotators?
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Personal and Sensitive Information
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Discussion of Biases
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Other Known Limitations
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
## Additional Information
### Dataset Curators
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Licensing Information
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Citation Information
```
@InProceedings{conneau2018xnli,
author = {Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin},
title = {XNLI: Evaluating Cross-lingual Sentence Representations},
booktitle = {Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing},
year = {2018},
publisher = {Association for Computational Linguistics},
location = {Brussels, Belgium},
}
```
### Contributions
Thanks to [@lewtun](https://github.com/lewtun), [@mariamabarham](https://github.com/mariamabarham), [@thomwolf](https://github.com/thomwolf), [@lhoestq](https://github.com/lhoestq), [@patrickvonplaten](https://github.com/patrickvonplaten) for adding this dataset. |
jkot/merged_preprocessed_parliament_commonvoice | jkot | "2023-05-01T13:35:28Z" | 17,240 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2023-05-01T10:37:03Z" | ---
dataset_info:
features:
- name: input_features
sequence:
sequence: float32
- name: labels
sequence: int64
splits:
- name: train
num_bytes: 210499135424
num_examples: 219101
- name: test
num_bytes: 11099630080
num_examples: 11555
download_size: 65027813279
dataset_size: 221598765504
---
# Dataset Card for "merged_preprocessed_parliament_commonvoice"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
misikoff/zillow | misikoff | "2024-03-20T22:24:59Z" | 17,195 | 24 | [
"task_categories:tabular-regression",
"task_categories:time-series-forecasting",
"language:en",
"license:other",
"region:us"
] | [
"tabular-regression",
"time-series-forecasting"
] | "2024-02-16T16:36:27Z" | ---
language:
- en
license: other
task_categories:
- tabular-regression
- time-series-forecasting
pretty_name: Zillow
description: 'This dataset is comprised of seven different configurations of data
covering different aspects of the housing market in the United States. All data
is provided by Zillow. The seven configurations are: home_values_forecasts, new_construction,
for_sale_listings, rentals, sales, home_values, and days_on_market. Each configuration
has a different set of features and target variables. The data is provided in JSONL
format.'
homepage: https://www.zillow.com/research/data/
dataset_info:
- config_name: days_on_market
features:
- name: Region ID
dtype: string
id: Region ID
- name: Size Rank
dtype: int32
id: Size Rank
- name: Region
dtype: string
id: Region
- name: Region Type
dtype:
class_label:
names:
'0': zip
'1': city
'2': county
'3': msa
'4': state
'5': country
- name: State
dtype: string
id: State
- name: Home Type
dtype:
class_label:
names:
'0': multifamily
'1': condo/co-op
'2': SFR
'3': all homes
'4': all homes plus multifamily
- name: Date
dtype: timestamp[ms]
id: Date
- name: Mean Listings Price Cut Amount (Smoothed)
dtype: float32
id: Mean Listings Price Cut Amount (Smoothed)
- name: Percent Listings Price Cut
dtype: float32
id: Percent Listings Price Cut
- name: Mean Listings Price Cut Amount
dtype: float32
id: Mean Listings Price Cut Amount
- name: Percent Listings Price Cut (Smoothed)
dtype: float32
id: Percent Listings Price Cut (Smoothed)
- name: Median Days on Pending (Smoothed)
dtype: float32
id: Median Days on Pending (Smoothed)
- name: Median Days on Pending
dtype: float32
id: Median Days on Pending
splits:
- name: train
num_bytes: 50107320
num_examples: 586714
download_size: 229118598
dataset_size: 50107320
- config_name: for_sale_listings
features:
- name: Region ID
dtype: string
id: Region ID
- name: Size Rank
dtype: int32
id: Size Rank
- name: Region
dtype: string
id: Region
- name: Region Type
dtype:
class_label:
names:
'0': county
'1': city
'2': zip
'3': country
'4': msa
- name: State
dtype: string
id: State
- name: Home Type
dtype:
class_label:
names:
'0': all homes
'1': all homes plus multifamily
'2': SFR
'3': condo/co-op
'4': multifamily
- name: Date
dtype: timestamp[ms]
id: Date
- name: Median Listing Price
dtype: float32
id: Median Listing Price
- name: Median Listing Price (Smoothed)
dtype: float32
id: Median Listing Price (Smoothed)
- name: New Listings
dtype: int32
id: New Listings
- name: New Listings (Smoothed)
dtype: int32
id: New Listings (Smoothed)
- name: New Pending (Smoothed)
dtype: int32
id: New Pending (Smoothed)
- name: New Pending
dtype: int32
id: New Pending
splits:
- name: train
num_bytes: 49412198
num_examples: 578653
download_size: 180206592
dataset_size: 49412198
- config_name: home_values
features:
- name: Region ID
dtype: string
id: Region ID
- name: Size Rank
dtype: int32
id: Size Rank
- name: Region
dtype: string
id: Region
- name: Region Type
dtype:
class_label:
names:
'0': zip
'1': city
'2': county
'3': msa
'4': state
'5': country
- name: State
dtype: string
id: State
- name: Home Type
dtype:
class_label:
names:
'0': multifamily
'1': condo/co-op
'2': SFR
'3': all homes
'4': all homes plus multifamily
- name: Bedroom Count
dtype:
class_label:
names:
'0': 1-Bedroom
'1': 2-Bedrooms
'2': 3-Bedrooms
'3': 4-Bedrooms
'4': 5+-Bedrooms
'5': All Bedrooms
- name: Date
dtype: timestamp[ms]
id: Date
- name: Bottom Tier ZHVI (Smoothed) (Seasonally Adjusted)
dtype: float32
id: Bottom Tier ZHVI (Smoothed) (Seasonally Adjusted)
- name: Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)
dtype: float32
id: Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)
- name: Top Tier ZHVI (Smoothed) (Seasonally Adjusted)
dtype: float32
id: Top Tier ZHVI (Smoothed) (Seasonally Adjusted)
splits:
- name: train
num_bytes: 9377759
num_examples: 117912
download_size: 41145800
dataset_size: 9377759
- config_name: home_values_forecasts
features:
- name: Region ID
dtype: string
id: Region ID
- name: Size Rank
dtype: int32
id: Size Rank
- name: Region
dtype: string
id: Region
- name: Region Type
dtype:
class_label:
names:
'0': county
'1': city
'2': zip
'3': country
'4': msa
- name: State
dtype: string
id: State
- name: City
dtype: string
id: City
- name: Metro
dtype: string
id: Metro
- name: County
dtype: string
id: County
- name: Date
dtype: timestamp[ms]
id: Date
- name: Month Over Month % (Smoothed) (Seasonally Adjusted)
dtype: float32
id: Month Over Month % (Smoothed) (Seasonally Adjusted)
- name: Quarter Over Quarter % (Smoothed) (Seasonally Adjusted)
dtype: float32
id: Quarter Over Quarter % (Smoothed) (Seasonally Adjusted)
- name: Year Over Year % (Smoothed) (Seasonally Adjusted)
dtype: float32
id: Year Over Year % (Smoothed) (Seasonally Adjusted)
- name: Month Over Month %
dtype: float32
id: Month Over Month %
- name: Quarter Over Quarter %
dtype: float32
id: Quarter Over Quarter %
- name: Year Over Year %
dtype: float32
id: Year Over Year %
splits:
- name: train
num_bytes: 3976869
num_examples: 31854
download_size: 14081979
dataset_size: 3976869
- config_name: new_construction
features:
- name: Region ID
dtype: string
id: Region ID
- name: Size Rank
dtype: int32
id: Size Rank
- name: Region
dtype: string
id: Region
- name: Region Type
dtype:
class_label:
names:
'0': county
'1': city
'2': zip
'3': country
'4': msa
- name: State
dtype: string
id: State
- name: Home Type
dtype:
class_label:
names:
'0': all homes
'1': all homes plus multifamily
'2': SFR
'3': condo/co-op
'4': multifamily
- name: Date
dtype: timestamp[ms]
id: Date
- name: Median Sale Price
dtype: float32
id: Median Sale Price
- name: Median Sale Price per Sqft
dtype: float32
id: Sale Price per Sqft
- name: Sales Count
dtype: int32
id: Sales Count
splits:
- name: train
num_bytes: 3624631
num_examples: 49487
download_size: 10937317
dataset_size: 3624631
- config_name: rentals
features:
- name: Region ID
dtype: string
id: Region ID
- name: Size Rank
dtype: int32
id: Size Rank
- name: Region
dtype: string
id: Region
- name: Region Type
dtype:
class_label:
names:
'0': county
'1': city
'2': zip
'3': country
'4': msa
- name: State
dtype: string
id: State
- name: Home Type
dtype:
class_label:
names:
'0': all homes
'1': all homes plus multifamily
'2': SFR
'3': condo/co-op
'4': multifamily
- name: Date
dtype: timestamp[ms]
id: Date
- name: Rent (Smoothed)
dtype: float32
id: Rent (Smoothed)
- name: Rent (Smoothed) (Seasonally Adjusted)
dtype: float32
id: Rent (Smoothed) (Seasonally Adjusted)
splits:
- name: train
num_bytes: 92914681
num_examples: 1258740
download_size: 447425069
dataset_size: 92914681
- config_name: sales
features:
- name: Region ID
dtype: string
id: Region ID
- name: Size Rank
dtype: int32
id: Size Rank
- name: Region
dtype: string
id: Region
- name: Region Type
dtype:
class_label:
names:
'0': county
'1': city
'2': zip
'3': country
'4': msa
- name: State
dtype: string
id: State
- name: Home Type
dtype:
class_label:
names:
'0': all homes
'1': all homes plus multifamily
'2': SFR
'3': condo/co-op
'4': multifamily
- name: Date
dtype: timestamp[ms]
id: Date
- name: Mean Sale to List Ratio (Smoothed)
dtype: float32
id: Mean Sale to List Ratio (Smoothed)
- name: Median Sale to List Ratio
dtype: float32
id: Median Sale to List Ratio
- name: Median Sale Price
dtype: float32
id: Median Sale Price
- name: Median Sale Price (Smoothed) (Seasonally Adjusted)
dtype: float32
id: Median Sale Price (Smoothed) (Seasonally Adjusted)
- name: Median Sale Price (Smoothed)
dtype: float32
id: Median Sale Price (Smoothed)
- name: Median Sale to List Ratio (Smoothed)
dtype: float32
id: Median Sale to List Ratio (Smoothed)
- name: '% Sold Below List'
dtype: float32
id: '% Sold Below List'
- name: '% Sold Below List (Smoothed)'
dtype: float32
id: '% Sold Below List (Smoothed)'
- name: '% Sold Above List'
dtype: float32
id: '% Sold Above List'
- name: '% Sold Above List (Smoothed)'
dtype: float32
id: '% Sold Above List (Smoothed)'
- name: Mean Sale to List Ratio
dtype: float32
id: Mean Sale to List Ratio
splits:
- name: train
num_bytes: 27088039
num_examples: 255024
download_size: 139297577
dataset_size: 27088039
---
# Housing Data Provided by Zillow
Updated: 2023-02-01
This dataset contains several configs produced based on files available at https://www.zillow.com/research/data/.
# Viewer Notes
Because this repository includes the code used to process the raw Zillow data, the dataset viewer is not enabled. This is because the dataset viewer is not enabled for datasets that run arbitrary python code. For a normal viewer experience, please see the simplified version of this dataset at https://huggingface.co/datasets/misikoff/zillow-viewer. The viewer should be enabled there and the config files should match what is found here.
Supported configs:
- [`days_on_market`](#days-on-market): Days to pending, days to close, share of listings with a price cut, and price cuts.
- [`for_sale_listings`](#for-sale-listings): Median listing price, new listings, and new pending listings.
- [`home_values`](#home-values): Zillow Home Value Index (ZHVI) for all homes, mid-tier, bottom-tier, and top-tier homes.
- [`home_values_forecasts`](#home-values-forecasts): Zillow Home Value Forecast (ZHVF) for all homes, mid-tier, bottom-tier, and top-tier homes.
- [`new_construction`](#new-construction): Median sale price, median sale price per square foot, and sales count.
- [`rentals`](#rentals): Zillow Observed Rent Index (ZORI) for all homes, mid-tier, bottom-tier, and top-tier homes.
- [`sales`](#sales): Median sale price, median sale price per square foot, and sales count.
## DAYS ON MARKET AND PRICE CUTS
Days to Pending: How long it takes homes in a region to change to pending status on Zillow.com after first being shown as for sale. The reported figure indicates the number of days (mean or median) that it took for homes that went pending during the week being reported, to go pending. This differs from the old “Days on Zillow” metric in that it excludes the in-contract period before a home sells.
Days to Close (mean/median): Number of days between the listing going pending and the sale date.
Share of Listings With a Price Cut: The number of unique properties with a list price at the end of the month that’s less than the list price at the beginning of the month, divided by the number of unique properties with an active listing at some point during the month.
Price Cuts: The mean and median price cut for listings in a given region during a given time period, expressed as both dollars ($) and as a percentage (%) of list price.
Base Columns
- `Region ID`: dtype="string", a unique identifier for the region
- `Size Rank`: dtype="int32", a rank of the region's size
- `Region`: dtype="string", the name of the region
- `Region Type`: dtype="class_label", the type of region
- '0': zip
- '1': city
- '2': county
- '3': msa
- '4': state
- '5': country
- `State`: dtype="string", the US state abbreviation for the state containing the region
- `Home Type`: dtype="string", the type of home
- '0': multifamily
- '1': condo/co-op
- '2': SFR
- '3': all homes
- '4': all homes plus multifamily
- `Date`: dtype="timestamp[ms]", the date of the last day of the week for this data
Value Columns
- `Mean Listings Price Cut Amount (Smoothed)`: dtype="float32"
- `Percent Listings Price Cut`: dtype="float32", The number of unique properties with a list price at the end of the month that’s less than the list price at the beginning of the month, divided by the number of unique properties with an active listing at some point during the month.
- `Mean Listings Price Cut Amount`: dtype="float32"
- `Percent Listings Price Cut (Smoothed)`: dtype="float32"
- `Median Days on Pending (Smoothed)`: dtype="float32", median number of days it takes for homes in a region to change to pending status on Zillow.com after first being shown as for sale. (smoothed)
- `Median Days on Pending`: dtype="float32", median number of days it takes for homes in a region to change to pending status on Zillow.com after first being shown as for sale.
## FOR-SALE LISTINGS
Base Columns
- `Region ID`: dtype="string", a unique identifier for the region
- `Size Rank`: dtype="int32", a rank of the region's size
- `Region`: dtype="string", the name of the region
- `Region Type`: dtype="class_label", the type of region
- '0': zip
- '1': city
- '2': county
- '3': msa
- '4': state
- `State`: dtype="string", the US state abbreviation for the state containing the region
- `Home Type`: dtype="string", the type of home
- '0': all homes
- '1': all homes plus multifamily
- '2': SFR
- '3': condo/co-op
- '4': multifamily
- `Date`: dtype="timestamp[ms]", the date of the last day of the month for this data
Value Columns
- `Median Listing Price`: dtype="float32", The median price at which homes across various geographies were listed.
- `Median Listing Price (Smoothed)`: dtype="float32", The median price at which homes across various geographies were listed. (smoothed)
- `New Listings`: dtype="int32", how many new listings have come on the market in a given month
- `New Listings (Smoothed)`: dtype="int32", how many new listings have come on the market in a given month. (smoothed)
- `New Pending (Smoothed)`: dtype="int32", The count of listings that changed from for-sale to pending status on Zillow.com in a given time period. (smoothed)
- `New Pending`: dtype="int32", The count of listings that changed from for-sale to pending status on Zillow.com in a given time period.
## HOME VALUES
<!-- Zillow Home Value Index (ZHVI): A measure of the typical home value and market changes across a given region and housing type. It reflects the typical value for homes in the 35th to 65th percentile range. Available as a smoothed, seasonally adjusted measure and as a raw measure. -->
<!-- Zillow publishes top-tier ZHVI (\$, typical value for homes within the 65th to 95th percentile range for a given region) and bottom-tier ZHVI (\$, typical value for homes within the 5th to 35th percentile range for a given region). -->
<!-- Zillow also publishes ZHVI for all single-family residences (\$, typical value for all single-family homes in a given region), for condo/coops (\$), for all homes with 1, 2, 3, 4 and 5+ bedrooms (\$), and the ZHVI per square foot (\$, typical value of all homes per square foot calculated by taking the estimated home value for each home in a given region and dividing it by the home’s square footage). -->
<!-- Note: Starting with the January 2023 data release, and for all subsequent releases, the full ZHVI time series has been upgraded to harness the power of the neural Zestimate. -->
<!-- More information about what ZHVI is and how it’s calculated is available on this overview page. Here’s a handy ZHVI User Guide for information about properly citing and making calculations with this metric. -->
Base Columns
- `Region ID`: dtype="string", a unique identifier for the region
- `Size Rank`: dtype="int32", a rank of the region's size
- `Region`: dtype="string", the name of the region
- `Region Type`: dtype="class_label", the type of region
- '0': zip
- '1': city
- '2': county
- '3': msa
- '4': state
- '5': country"
- `State`: dtype="string", the US state abbreviation for the state containing the region
- `Home Type`: dtype="class_label", the type of home
- '0': multifamily
- '1': condo/co-op
- '2': SFR
- '3': all homes
- '4': all homes plus multifamily
- `Bedroom Count`: dtype="class_label", the number of bedrooms
- '0': 1-Bedroom
- '1': 2-Bedrooms
- '2': 3-Bedrooms
- '3': 4-Bedrooms
- '4': 5+-Bedrooms
- '5': All Bedrooms
- `Date`: dtype="timestamp[ms]", the date of the last day of the month for this data
Value Columns
- `Mid Tier ZHVI (Smoothed) (Seasonally Adjusted)`: dtype="float32",
- `Bottom Tier ZHVI (Smoothed) (Seasonally Adjusted)`: dtype="float32",
- `Top Tier ZHVI (Smoothed) (Seasonally Adjusted)`: dtype="float32",
- `ZHVI`: dtype="float32",
- `Mid Tier ZHVI`: dtype="float32"
## HOME VALUES FORECASTS
<!-- Zillow Home Value Forecast (ZHVF): A month-ahead, quarter-ahead and year-ahead forecast of the Zillow Home Value Index (ZHVI). ZHVF is created using the all homes, mid-tier cut of ZHVI and is available both raw and smoothed, seasonally adjusted. -->
<!-- Note: Starting with the January 2023 forecast (made available in February 2023), Zillow’s Home Value Forecast is based on the upgraded ZHVI that harnesses the power of the neural Zestimate. More information about what ZHVI is and how it’s calculated is available on this overview page. -->
Base Columns
- `Region ID`: dtype="string", a unique identifier for the region
- `Size Rank`: dtype="int32", a rank of the region's size
- `Region`: dtype="string", the name of the region
- `Region Type`: dtype="class_label", the type of region
- '0': county
- '1': city
- '2': zip
- '3': country
- '4': msa
- `State`: dtype="string", the US state abbreviation for the state containing the region
- `City`: dtype="string",
- `Metro`: dtype="string",
- `County`: dtype="string",
- `Home Type`: dtype="string", the type of home
- `Date`: dtype="timestamp[ms]", the date of these forecasts
Value Columns
- `Month Over Month % (Smoothed)`: dtype="float32",
- `Quarter Over Quarter % (Smoothed)`: dtype="float32",
- `Year Over Year % (Smoothed)`: dtype="float32"
- `Month Over Month % (Raw)`: dtype="float32"
- `Quarter Over Quarter % (Raw)`: dtype="float32"
- `Year Over Year % (Raw)`: dtype="float32"
## NEW CONSTRUCTION
Base Columns
- `Region ID`: dtype="string", a unique identifier for the region
- `Size Rank`: dtype="int32", a rank of the region's size
- `Region`: dtype="string", the name of the region
- `Region Type`: dtype="class_label", the type of region
- '0': county
- '1': city
- '2': zip
- '3': country
- '4': msa
- `State`: dtype="string", the US state abbreviation for the state containing the region
- `Home Type`: dtype="class_label", the type of home
- '0': all homes
- '1': all homes plus multifamily
- '2': SFR
- '3': condo/co-op
- '4': multifamily
- `Date`: dtype="timestamp[ms]", the date of the last day of the month for this data
Value Columns
- `Median Sale Price`: dtype="float32", the median sale price of new construction homes that sold during the month in the specified region
- `Median Sale Price per Sqft`: dtype="float32", the median sale price per square foot of new construction homes that sold during the month in the specified region
- `Sales Count`: dtype="int32", the number of new construction homes that sold during the month in the specified region
## RENTALS
Base Columns
- `Region ID`: dtype="string", a unique identifier for the region
- `Size Rank`: dtype="int32", a rank of the region's size
- `Region`: dtype="string", the name of the region
- `Region Type`: dtype="class_label", the type of region
- '0': county
- '1': city
- '2': zip
- '3': country
- '4': msa
- `State`: dtype="string", the US state abbreviation for the state containing the region
- `Home Type`: dtype="string", the type of home
- '0': all homes
- '1': all homes plus multifamily
- '2': SFR
- '3': condo/co-op
- '4': multifamily
- `Date`: dtype="timestamp[ms]", the date of the last day of the month for this data
Value Columns
- `Rent (Smoothed)`: dtype="float32", Zillow Observed Rent Index (ZORI): A smoothed measure of the typical observed market rate rent across a given region. ZORI is a repeat-rent index that is weighted to the rental housing stock to ensure representativeness across the entire market, not just those homes currently listed for-rent. The index is dollar-denominated by computing the mean of listed rents that fall into the 40th to 60th percentile range for all homes and apartments in a given region, which is weighted to reflect the rental housing stock.
- `Rent (Smoothed) (Seasonally Adjusted)`: dtype="float32", Zillow Observed Rent Index (ZORI) :A smoothed measure of the typical observed market rate rent across a given region. ZORI is a repeat-rent index that is weighted to the rental housing stock to ensure representativeness across the entire market, not just those homes currently listed for-rent. The index is dollar-denominated by computing the mean of listed rents that fall into the 40th to 60th percentile range for all homes and apartments in a given region, which is weighted to reflect the rental housing stock.
## SALES
<!-- Sale-to-List Ratio (mean/median): Ratio of sale vs. final list price. -->
<!-- Percent of Sales Below/Above List: Share of sales where sale price below/above the final list price; excludes homes sold for exactly the list price. -->
Base Columns
- `Region ID`: dtype="string", a unique identifier for the region
- `Size Rank`: dtype="int32", a rank of the region's size
- `Region`: dtype="string", the name of the region
- `Region Type`: dtype="class_label", the type of region
- '0': county
- '1': city
- '2': zip
- '3': country
- '4': msa
- `State`: dtype="string", the US state abbreviation for the state containing the region
- `Home Type`: dtype="class_label", the type of home
- '0': all homes
- '1': all homes plus multifamily
- '2': SFR
- '3': condo/co-op
- '4': multifamily
- `Date`: dtype="timestamp[ms]", the date of the last day of the month for this data
Value Columns
- `Median Sale Price`: dtype="float32", The median price at which homes across various geographies were sold.
- `Median Sale Price per Sqft`: dtype="float32" The median price per square foot at which homes across various geographies were sold.
- `Sales Count`: dtype="int32", The "Sales Count Nowcast" is the estimated number of unique properties that sold during the month after accounting for the latency between when sales occur and when they are reported.
## DEFINITIONS OF HOME TYPES
- all Homes: Zillow defines all homes as single-family, condominium and co-operative homes with a county record. Unless specified, all series cover this segment of the housing stock.
- SFR: single family residence
- condo/co-op: Condominium and co-operative homes.
- Multifamily 5+ units: Units in buildings with 5 or more housing units, that are not condominiums or co-ops.
<!-- - Duplex/Triplex/Quadplex: Housing units in buildings with 2, 3, or 4 housing units. -->
# Example Usage
```python
from datasets import load_dataset
dataset = load_dataset("misikoff/zillow", 'home_values', trust_remote_code=True)
```
## Example Notebook
Linked below is an example notebook that demonstrates usage and analysis of one of the configs of this dataset.
[](https://colab.research.google.com/drive/1lEPLbWsOKmNBzkPWT30RCBVxhG1LWkGg?usp=sharing) |
taesiri/arxiv_db | taesiri | "2024-05-07T22:36:44Z" | 17,167 | 4 | [
"license:cc-by-nc-4.0",
"region:us"
] | null | "2023-10-26T23:53:25Z" | ---
license: cc-by-nc-4.0
---
|
IGNF/FLAIR_1_osm_clip | IGNF | "2024-09-18T09:05:07Z" | 17,158 | 5 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"IGN"
] | null | "2023-04-27T12:28:28Z" | ---
size_categories:
- 10K<n<100K
dataset_info:
features:
- name: image
dtype: image
- name: segmentation
dtype: image
- name: metadata
struct:
- name: address
struct:
- name: ISO3166-2-lvl4
dtype: string
- name: ISO3166-2-lvl6
dtype: string
- name: ISO3166-2-lvl7
dtype: string
- name: aerialway
dtype: string
- name: aeroway
dtype: string
- name: allotments
dtype: string
- name: amenity
dtype: string
- name: bridge
dtype: string
- name: building
dtype: string
- name: city
dtype: string
- name: city_district
dtype: string
- name: club
dtype: string
- name: commercial
dtype: string
- name: country
dtype: string
- name: country_code
dtype: string
- name: county
dtype: string
- name: craft
dtype: string
- name: emergency
dtype: string
- name: farm
dtype: string
- name: farmyard
dtype: string
- name: hamlet
dtype: string
- name: healthcare
dtype: string
- name: highway
dtype: string
- name: historic
dtype: string
- name: house_number
dtype: string
- name: industrial
dtype: string
- name: isolated_dwelling
dtype: string
- name: junction
dtype: string
- name: landuse
dtype: string
- name: leisure
dtype: string
- name: locality
dtype: string
- name: man_made
dtype: string
- name: military
dtype: string
- name: municipality
dtype: string
- name: natural
dtype: string
- name: neighbourhood
dtype: string
- name: office
dtype: string
- name: place
dtype: string
- name: postcode
dtype: string
- name: province
dtype: string
- name: quarter
dtype: string
- name: railway
dtype: string
- name: region
dtype: string
- name: residential
dtype: string
- name: retail
dtype: string
- name: road
dtype: string
- name: shop
dtype: string
- name: square
dtype: string
- name: state
dtype: string
- name: state_district
dtype: string
- name: suburb
dtype: string
- name: tourism
dtype: string
- name: town
dtype: string
- name: village
dtype: string
- name: boundingbox
sequence: string
- name: camera
dtype: string
- name: code
dtype: string
- name: date
dtype: string
- name: dimensions
sequence: int64
- name: display_name
dtype: string
- name: domain
dtype: string
- name: lat
dtype: string
- name: latlong
sequence: float64
- name: licence
dtype: string
- name: lon
dtype: string
- name: origin
sequence: float64
- name: osm_id
dtype: int64
- name: osm_type
dtype: string
- name: patch_centroid_x
dtype: float64
- name: patch_centroid_y
dtype: float64
- name: patch_centroid_z
dtype: float64
- name: place_id
dtype: int64
- name: time
dtype: string
- name: unit_system
dtype: string
- name: zone
dtype: string
- name: class_rep
struct:
- name: class_repartition
struct:
- name: agricultural land
dtype: float64
- name: bare soil
dtype: float64
- name: brushwood
dtype: float64
- name: building
dtype: float64
- name: clear cut
dtype: float64
- name: coniferous
dtype: float64
- name: deciduous
dtype: float64
- name: greenhouse
dtype: float64
- name: herbaceous vegetation
dtype: float64
- name: impervious surface
dtype: float64
- name: ligneous
dtype: float64
- name: mixed
dtype: float64
- name: other
dtype: float64
- name: pervious surface
dtype: float64
- name: plowed land
dtype: float64
- name: snow
dtype: float64
- name: swimming pool
dtype: float64
- name: vineyard
dtype: float64
- name: water
dtype: float64
- name: prompt
dtype: string
- name: clip_label
dtype: string
- name: clip_embeddings
sequence: float32
splits:
- name: train
num_bytes: 26969627407.0
num_examples: 61712
download_size: 26975935912
dataset_size: 26969627407.0
tags:
- IGN
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "FLAIR_OSM_CLIP"
Dataset for the Seg2Sat model: https://github.com/RubenGres/Seg2Sat
Derived from [FLAIR#1](https://huggingface.co/datasets/IGNF/FLAIR) train split.
This dataset incudes the following features:
- **image**: FLAIR#1 .tif files RBG bands converted into a more managable jpg format
- **segmentation**: FLAIR#1 segmentation converted to JPG using the [LUT from the documentation](https://ignf.github.io/FLAIR/index_fr.html)
- **metadata**: OSM metadata for the centroid of the image
- **clip_label**: [CLIP ViT-H](https://github.com/openai/CLIP) description
- **class_rep**: ratio of appearance of each class in the segmentation
- **prompt**: Prompt used for the Seg2Sat model
- **clip_embeddings**: Embedding using openai/clip-vit-base-patch32 model
<!---
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
---> |
fsicoli/common_voice_15_0 | fsicoli | "2023-12-20T18:55:52Z" | 16,944 | 5 | [
"task_categories:automatic-speech-recognition",
"language:ab",
"language:af",
"language:am",
"language:ar",
"language:as",
"language:ast",
"language:az",
"language:ba",
"language:bas",
"language:be",
"language:bg",
"language:bn",
"language:br",
"language:ca",
"language:ckb",
"language:cnh",
"language:cs",
"language:cv",
"language:cy",
"language:da",
"language:de",
"language:dv",
"language:dyu",
"language:el",
"language:en",
"language:eo",
"language:es",
"language:et",
"language:eu",
"language:fa",
"language:fi",
"language:fr",
"language:gl",
"language:gn",
"language:ha",
"language:he",
"language:hi",
"language:hsb",
"language:hu",
"language:ia",
"language:id",
"language:ig",
"language:is",
"language:it",
"language:ja",
"language:ka",
"language:kab",
"language:kk",
"language:kmr",
"language:ko",
"language:ky",
"language:lg",
"language:lo",
"language:lt",
"language:lv",
"language:mdf",
"language:mhr",
"language:mk",
"language:ml",
"language:mn",
"language:mr",
"language:mrj",
"language:mt",
"language:myv",
"language:nl",
"language:oc",
"language:or",
"language:pl",
"language:ps",
"language:pt",
"language:quy",
"language:ro",
"language:ru",
"language:rw",
"language:sah",
"language:sat",
"language:sc",
"language:sk",
"language:skr",
"language:sl",
"language:sq",
"language:sr",
"language:sw",
"language:ta",
"language:th",
"language:ti",
"language:tig",
"language:tk",
"language:tok",
"language:tr",
"language:tt",
"language:tw",
"language:ug",
"language:uk",
"language:ur",
"language:uz",
"language:vi",
"language:vot",
"language:yue",
"language:zgh",
"language:zh",
"language:yo",
"license:cc",
"size_categories:100B<n<1T",
"region:us",
"mozilla",
"foundation"
] | [
"automatic-speech-recognition"
] | "2023-11-13T13:27:04Z" | ---
license: cc
language:
- ab
- af
- am
- ar
- as
- ast
- az
- ba
- bas
- be
- bg
- bn
- br
- ca
- ckb
- cnh
- cs
- cv
- cy
- da
- de
- dv
- dyu
- el
- en
- eo
- es
- et
- eu
- fa
- fi
- fr
- gl
- gn
- ha
- he
- hi
- hsb
- hu
- ia
- id
- ig
- is
- it
- ja
- ka
- kab
- kk
- kmr
- ko
- ky
- lg
- lo
- lt
- lv
- mdf
- mhr
- mk
- ml
- mn
- mr
- mrj
- mt
- myv
- nl
- oc
- or
- pl
- ps
- pt
- quy
- ro
- ru
- rw
- sah
- sat
- sc
- sk
- skr
- sl
- sq
- sr
- sw
- ta
- th
- ti
- tig
- tk
- tok
- tr
- tt
- tw
- ug
- uk
- ur
- uz
- vi
- vot
- yue
- zgh
- zh
- yo
task_categories:
- automatic-speech-recognition
pretty_name: Common Voice Corpus 15.0
size_categories:
- 100B<n<1T
tags:
- mozilla
- foundation
---
# Dataset Card for Common Voice Corpus 15.0
<!-- Provide a quick summary of the dataset. -->
This dataset is an unofficial version of the Mozilla Common Voice Corpus 15. It was downloaded and converted from the project's website https://commonvoice.mozilla.org/.
## Languages
```
Abkhaz, Albanian, Amharic, Arabic, Armenian, Assamese, Asturian, Azerbaijani, Basaa, Bashkir, Basque, Belarusian, Bengali, Breton, Bulgarian, Cantonese, Catalan, Central Kurdish, Chinese (China), Chinese (Hong Kong), Chinese (Taiwan), Chuvash, Czech, Danish, Dhivehi, Dioula, Dutch, English, Erzya, Esperanto, Estonian, Finnish, French, Frisian, Galician, Georgian, German, Greek, Guarani, Hakha Chin, Hausa, Hill Mari, Hindi, Hungarian, Icelandic, Igbo, Indonesian, Interlingua, Irish, Italian, Japanese, Kabyle, Kazakh, Kinyarwanda, Korean, Kurmanji Kurdish, Kyrgyz, Lao, Latvian, Lithuanian, Luganda, Macedonian, Malayalam, Maltese, Marathi, Meadow Mari, Moksha, Mongolian, Nepali, Norwegian Nynorsk, Occitan, Odia, Pashto, Persian, Polish, Portuguese, Punjabi, Quechua Chanka, Romanian, Romansh Sursilvan, Romansh Vallader, Russian, Sakha, Santali (Ol Chiki), Saraiki, Sardinian, Serbian, Slovak, Slovenian, Sorbian, Upper, Spanish, Swahili, Swedish, Taiwanese (Minnan), Tamazight, Tamil, Tatar, Thai, Tigre, Tigrinya, Toki Pona, Turkish, Turkmen, Twi, Ukrainian, Urdu, Uyghur, Uzbek, Vietnamese, Votic, Welsh, Yoruba
```
## How to use
The datasets library allows you to load and pre-process your dataset in pure Python, at scale. The dataset can be downloaded and prepared in one call to your local drive by using the load_dataset function.
For example, to download the Portuguese config, simply specify the corresponding language config name (i.e., "pt" for Portuguese):
```
from datasets import load_dataset
cv_15 = load_dataset("fsicoli/common_voice_15_0", "pt", split="train")
```
Using the datasets library, you can also stream the dataset on-the-fly by adding a streaming=True argument to the load_dataset function call. Loading a dataset in streaming mode loads individual samples of the dataset at a time, rather than downloading the entire dataset to disk.
```
from datasets import load_dataset
cv_15 = load_dataset("fsicoli/common_voice_15_0", "pt", split="train", streaming=True)
print(next(iter(cv_15)))
```
Bonus: create a PyTorch dataloader directly with your own datasets (local/streamed).
### Local
```
from datasets import load_dataset
from torch.utils.data.sampler import BatchSampler, RandomSampler
cv_15 = load_dataset("fsicoli/common_voice_15_0", "pt", split="train")
batch_sampler = BatchSampler(RandomSampler(cv_15), batch_size=32, drop_last=False)
dataloader = DataLoader(cv_15, batch_sampler=batch_sampler)
```
### Streaming
```
from datasets import load_dataset
from torch.utils.data import DataLoader
cv_15 = load_dataset("fsicoli/common_voice_15_0", "pt", split="train")
dataloader = DataLoader(cv_15, batch_size=32)
```
To find out more about loading and preparing audio datasets, head over to hf.co/blog/audio-datasets.
### Dataset Structure
Data Instances
A typical data point comprises the path to the audio file and its sentence. Additional fields include accent, age, client_id, up_votes, down_votes, gender, locale and segment.
### Licensing Information
Public Domain, CC-0
### Citation Information
```
@inproceedings{commonvoice:2020,
author = {Ardila, R. and Branson, M. and Davis, K. and Henretty, M. and Kohler, M. and Meyer, J. and Morais, R. and Saunders, L. and Tyers, F. M. and Weber, G.},
title = {Common Voice: A Massively-Multilingual Speech Corpus},
booktitle = {Proceedings of the 12th Conference on Language Resources and Evaluation (LREC 2020)},
pages = {4211--4215},
year = 2020
}
``` |
RogerQi/PH2D | RogerQi | "2025-03-31T09:27:25Z" | 16,719 | 1 | [
"task_categories:robotics",
"license:mit",
"arxiv:2503.13441",
"region:us",
"Human",
"Humanoid",
"Robot"
] | [
"robotics"
] | "2025-03-14T02:03:14Z" | ---
license: mit
task_categories:
- robotics
tags:
- Human
- Humanoid
- Robot
---
## Dataset Description
This dataset contains egocentric human-humanoid data that can be used to co-train manipulation policy for humanoid robot.
For paper, visualization, and example code using this dataset, please refer to https://human-as-robot.github.io/
arXiv link: https://arxiv.org/abs/2503.13441
## Task Descriptions
We provide some example dataset config files that we used for our robots [here](https://github.com/RogerQi/human-policy/tree/main/hdt/configs/datasets).
Data organizations
- Each folder represents one task. Most HDF5 files contain ONE execution of the task specified by the language, but certain tasks, such as cup passing, contain multiple demonstrations in each HDF5.
- Taskids start with `1` mean the data is a human demonstrations. |
hssd/hssd-hab | hssd | "2025-02-14T02:19:58Z" | 16,714 | 35 | [
"language:en",
"license:cc-by-nc-4.0",
"region:us",
"3D scenes",
"Embodied AI"
] | null | "2023-06-04T18:59:50Z" | ---
language:
- en
pretty_name: HSSD
tags:
- 3D scenes
- Embodied AI
license: cc-by-nc-4.0
extra_gated_heading: "Acknowledge license to accept the repository"
extra_gated_prompt: "You agree to use this dataset under the [CC BY-NC 4.0 license](https://creativecommons.org/licenses/by-nc/4.0/) terms"
viewer: false
---
HSSD: Habitat Synthetic Scenes Dataset
==================================
The [Habitat Synthetic Scenes Dataset (HSSD)](https://3dlg-hcvc.github.io/hssd/) is a human-authored 3D scene dataset that more closely mirrors real scenes than prior datasets.
Our dataset represents real interiors and contains a diverse set of 211 scenes and more than 18000 models of real-world objects.
<img src="https://i.imgur.com/XEkLxNs.png" width=50%>
This repository provides a Habitat consumption-ready compressed version of HSSD.
See [this repository](https://huggingface.co/datasets/hssd/hssd-models) for corresponding uncompressed assets.
## Dataset Structure
```
├── objects
│ ├── */*.glb
│ ├── */*.collider.glb
│ ├── */*.filteredSupportSurface(.ply|.glb)
│ ├── */*.object_config.json
├── stages
│ ├── *.glb
│ ├── *.stage_config.json
├── scenes
│ ├── *.scene_instance.json
├── scenes_uncluttered
│ ├── *.scene_instance.json
├── scenes_articulated
│ ├── *.scene_instance.json
├── scene_filter_files
│ ├── *.rec_filter.json
├── metadata
│ ├── *.csv
│ ├── *.json
├── semantics
│ ├── hssd-hab_semantic_lexicon.json
│ ├── scenes
| ├── *.semantic_config.json
├── urdf
│ ├── <model_name>
| ├── *.glb
| ├── *.urdf
| ├── *.ao_config.json
└── hssd-hab.scene_dataset_config.json
└── hssd-hab-uncluttered.scene_dataset_config.json
└── hssd-hab-articulated.scene_dataset_config.json
```
- `hssd-hab.scene_dataset_config.json`: This SceneDataset config file aggregates the assets and metadata necessary to fully describe the set of stages, objects, and scenes constituting the dataset.
- `objects`: 3D models representing distinct objects that are used to compose scenes. Contains configuration files, render assets, collider assets, and Receptacle mesh assets.
- `stages`: A stage in Habitat is the set of static mesh components which make up the backdrop of a scene (e.g. floor, walls, stairs, etc.).
- `scenes`: A scene is a single 3D world composed of a static stage and a variable number of objects.
- `scene_filter_files`: These .rec_filter.json files contain mappings of Receptacle instance unique_names to active or filtered sets based on their locations and accessibility within the scene. They also contain a "within_set" defining Receptacles which can only be accessed when the parent Furniture object's "default_link" is in the "open" state.
- `metadata`: The metadata directory contains several csv and json files which provide semantic mappings for objects in the dataset as well as rational mappings from regions to the types of clutter objects typically found in them to support procedural generation.
- `semantics`: Primarily defines instance semantics for the scenes. *.semantic_config.json files contain the region annotations for each scene.
- `urdf`: The urdf directory contains the articulated furniture assets, each contained in its own sub-directory named after the source asset. The .urdf files define the articulation properties. Each .glb file is either a render asset or Receptacle mesh connected to a rigid link. The .ao_config.json file contains habitat-specific metadata such as markersets and Receptacle definitions.
### Rearrange-ready assets:
Supporting Habitat 3.0 embodied rearrangement tasks with updated colliders, adjusted and de-cluttered scene contents, receptacle meshes, and receptacle filter files. See [aihabitat.org/habitat3/](aihabitat.org/habitat3/) for more details.
- `hssd-hab-uncluttered.scene_dataset_config.json`: This SceneDataset config file aggregates adds the adjusted and uncluttered scenes for rearrangement tasks.
- `scenes_uncluttered`: Contains the adjusted scene instance configuration files.
- `scene_filter_files`: A scene filter file organizes available Receptacle instances in a scene into active and inactive groups based on simualtion heuristics and manual edits. It is consumed by the RearrangeEpisodeGenerator to construct valid RearrangeEpisodeDatasets.
### Articulated scenes and assets:
Introduced in `v0.3.0`, the `hssd-hab-articulated.scene_dataset_config.json` SceneDataset provides 202 fully articulated HSSD scenes ready for use within the AI Habitat simulation ecosystem. Note that only 161 are publicly available on this repo. The remainder and their unique assets are reserved as an internal test set.
To enable more realistic indoor object manipulation, articulated 3D furniture models such as drawers, cabinets, and appliances were added to replace rigid assets. These models were converted from rigid source assets in HSSD and swapped into the scenes.
Furniture is annotated with a set of Receptacles (surfaces which support small object placement such as shelves and drawers) and can be opened and closed by the agents. Receptacles are further filtered contextually in each scene to ensure that the active set is accessible to the agents.
Additional annotations include point or marker sets for each furniture, region annotations, and semantic classification of objects.
## Getting Started
To load HSSD scenes into the Habitat simulator, you can start by installing [habitat-sim](https://github.com/facebookresearch/habitat-sim) using instructions specified [here](https://github.com/facebookresearch/habitat-sim#installation).
Once installed, you can run the interactive Habitat viewer to load a scene:
```
habitat-viewer --dataset /path/to/hssd-hab/hssd-hab.scene_dataset_config.json -- 102344280
# or ./build/viewer if compiling from source
```
You can find more information about using the interactive viewer [here](https://github.com/facebookresearch/habitat-sim#testing:~:text=path/to/data/-,Interactive%20testing,-%3A%20Use%20the%20interactive).
Habitat-Sim is typically used with [Habitat-Lab](https://github.com/facebookresearch/habitat-lab), a modular high-level library for end-to-end experiments in embodied AI.
To define embodied AI tasks (e.g. navigation, instruction following, question answering), train agents, and benchmark their performance using standard metrics, you can download habitat-lab using the instructions provided [here](https://github.com/facebookresearch/habitat-lab#installation).
## Changelog
- `v0.3.0`: **Articulated Scenes and PARTNR support**
- This major version update adds a large set of changes to support the introduction of 202 articulated HSSD scenes and the [PARTNR benchmark](https://github.com/facebookresearch/partnr-planner).
- Includes improvements to stage texture/geometry and object collision shapes and receptacles.
- Adds:
- 2000+ articulated assets in the urdf/ directory representing and replacing rigid furniture objects. Annotated with Receptacles and semantics.
- 202 new articulated scenes with rigid objects replaced by AOs. These are uncluttered and often significantly altered from originals to accommodate the new assets.
- Note that test scenes and assets are removed before migration to this repo.
- Receptacle filter files for new scenes annotating accessible Receptacles and "within" Receptacles (those which require opening an articulated link for access).
- Note that only one link per AO is configured with an active Receptacle. This is based on logic in PARTNR and habitat-lab (default_link).
- Region volume semantic annotations to all scenes
- Semantic lexicon file with updated classes
- Metadata files mapping object semantics and common-sense object->region sets for PARTNR
- `v0.2.5`: **Rearrange-ready HSSD**
- Note: this is a checkpoint. Known issues exist and continued polish is ongoing.
- Adds Receptacle meshes describing support surfaces for small objects (e.g. table or shelf surfaces).
- Adds collider meshes (.collider.glb) for assets with Receptacle meshes to support simulation.
- Adds new scenes 'scenes_uncluttered' and new SceneDataset 'hssd-hab-uncluttered' containing adjusted and de-cluttered versions of the scenes for use in embodied rearrangement tasks.
- Adds 'scene_filter_files' which sort Receptacles in each scene into active and inactive groups for RearrangeEpisode generation.
- `v0.2.4`:
- Recompresses several object GLBs to preserve PBR material status.
- Adds CSV with object metadata and semantic lexicon files for Habitat.
- Adds train/val scene splits file.
- `v0.2.3`: First release.
|
open-social-world/EgoNormia | open-social-world | "2025-03-04T19:44:49Z" | 16,711 | 3 | [
"task_categories:visual-question-answering",
"task_categories:video-text-to-text",
"task_categories:question-answering",
"language:en",
"license:cc-by-sa-4.0",
"size_categories:1K<n<10K",
"format:parquet",
"modality:image",
"modality:text",
"modality:video",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"arxiv:2502.20490",
"region:us",
"visual-reasoning",
"perception",
"predictive",
"spatial",
"temporal",
"causal"
] | [
"visual-question-answering",
"video-text-to-text",
"question-answering"
] | "2025-02-01T01:25:44Z" | ---
language:
- en
license: cc-by-sa-4.0
size_categories:
- 1K<n<10K
task_categories:
- visual-question-answering
- video-text-to-text
- question-answering
pretty_name: EgoNormia
tags:
- visual-reasoning
- perception
- predictive
- spatial
- temporal
- causal
---
<h1 align='center' style="text-align:center; font-weight:bold; font-size:2.0em;letter-spacing:2.0px;">
EgoNormia: Benchmarking Physical Social Norm Understanding</h1>
<p align='center' style="text-align:center;font-size:1.25em;">
<a href="https://mhrezaei.com" target="_blank" style="text-decoration: none;">MohammadHossein Rezaei*</a>,
<a href="https://sofyc.github.io" target="_blank" style="text-decoration: none;">Yicheng Fu*</a>,
<a href="https://scholar.google.com/citations?user=bDIUeu4AAAAJ&hl=en" target="_blank" style="text-decoration: none;">Phil Cuvin*</a>,
<a href="https://calebziems.com" target="_blank" style="text-decoration: none;">Caleb Ziems</a>,
<a href="https://stevenyzzhang.github.io/website/" target="_blank" style="text-decoration: none;">Yanzhe Zhang</a>, <br/>
<a href="https://zhuhao.me" target="_blank" style="text-decoration: none;">Hao Zhu</a>,
<a href="https://cs.stanford.edu/~diyiy/" target="_blank" style="text-decoration: none;">Diyi Yang</a>,
</p>
<p align='center' style="text-align:center;font-size:1.25em;">
<a href="https://egonormia.org" title="Website" target="_blank" rel="nofollow" style="text-decoration: none;">🌎Website</a> |
<a href="https://huggingface.co/datasets/open-social-world/EgoNormia" title="Dataset" target="_blank" rel="nofollow" style="text-decoration: none;">🤗 Dataset</a> |
<a href="https://arxiv.org/abs/2502.20490" title="aXiv" target="_blank" rel="nofollow" style="text-decoration: none;">📄 arXiv</a> |
<a href="https://hf.co/papers/2502.20490" title="Paper" target="_blank" rel="nofollow" style="text-decoration: none;">📄 HF Paper</a>
</p>
## EgoNormia
EgoNormia is a challenging QA benchmark that tests VLMs' ability to reason over norms in context.
The datset consists of **1,853** physically grounded egocentric
interaction clips from [Ego4D](https://ego4d-data.org/) and corresponding **five-way multiple-choice questions** tasks for each.
<br><br>
EgoNormia spans 100 distinct settings across a wide range of activities, cultures, and interactions. Unlike
other visually-grounded spatiotemporal, predictive, or causal reasoning benchmarks,
EgoNormia evaluates models’ ability to reason about what should be done under social norms.
EgoNormia highlights cases where these norm-related objectives conflict—the richest arena for
evaluating normative decision-making.
## Example Data Point
<img src="image/egonormia_teaser.jpg" alt="Teaser" width="800"/>
## EgoNormia Statistics
Statistics | Data Points | Video Sources | Scenarios | Types of Actions
--- | --- | --- | --- | --- |
Number | 1853 | 1077 | 97 | 93
<img src="image/activity_distribution.png" alt="Activity Distribution" width="400"/>
## Benchmarking Guide
The supported method for benchmarking on EgoNormia is to download the github repo `git clone https://github.com/Open-Social-World/EgoNormia.git`,
and then to follow the included guide for running model evaluation and benchmarking (which automatically downloads and uses this dataset).
Alternatively, the HuggingFace datset can be directly downloaded and evaluated with custom code; however, this is not officially supported.
## Dataset Organization
Below is a detailed explanation of the dataset's structure and its contents:
```
EgoNormia/
├── images/
├── videos/ # Images for dataset card
│ └── {video_uid}/
│ ├── frame_all_during.jpg # Concatenated frames sampled at 1 FPS from video_during.mp4
│ ├── frame_all_prev.jpg # Concatenated frames sampled at 1 FPS from video_prev.mp4
│ ├── video_during.mp4 # Video clip capturing an action being performed
│ └── video_prev.mp4 # Video clip capturing the five seconds before an action is performed
└── final_data.json # JSON file with MCQs and descriptions for videos in EgoNormia
├── train-norm-updated.parquet # Parquet file for dataset visualization
├── .gitattributes # Git configuration
└── README.md # README file
```
### Annotation Format
Each video includes metadata and an MCQ in the following format:
```
{
"video_uid": {
"id": "video_uid",
"behaviors": list of str,
"justifications": list of str,
"correct": int,
"sensibles": list of int,
"taxonomy": {
"0": list of string,
"1": list of string,
"2": list of string,
"3": list of string,
"4": list of string,
},
"desc": str
},
// Next video uid
}
```
## Limitations
While multiple rounds of filtering are applied to ensure diversity in EgoNormis video clips, all
video clips in EgoNormia are sourced exclusively from Ego4D, which may reflect inherent distribution biases
within Ego4D.
## Contact
- Yicheng Fu: [email protected]
- Phil Cuvin: [email protected]
- Hao Zhu: [email protected]
## Acknowledgement
This research was supported in part by Other Transaction award HR00112490375 from the U.S.
Defense Advanced Research Projects Agency (DARPA) Friction for Accountability in Conversational
Transactions (FACT) program. We thank Google Cloud Platform and Modal Platform for their credits.
We thank feedback from Yonatan Bisk and members of the SALT lab at Stanford University.
The authors thank Leena Mathur and Su Li for their help in collecting out-of-domain robotics videos.
## Citation
```bibtex
@misc{rezaei2025egonormiabenchmarkingphysicalsocial,
title={EgoNormia: Benchmarking Physical Social Norm Understanding},
author={MohammadHossein Rezaei and Yicheng Fu and Phil Cuvin and Caleb Ziems and Yanzhe Zhang and Hao Zhu and Diyi Yang},
year={2025},
eprint={2502.20490},
archivePrefix={arXiv},
primaryClass={cs.CV},
url={https://arxiv.org/abs/2502.20490},
}
``` |
allenai/math_qa | allenai | "2024-01-18T11:08:38Z" | 16,674 | 101 | [
"task_categories:question-answering",
"task_ids:multiple-choice-qa",
"annotations_creators:crowdsourced",
"language_creators:crowdsourced",
"language_creators:expert-generated",
"multilinguality:monolingual",
"source_datasets:extended|aqua_rat",
"language:en",
"license:apache-2.0",
"size_categories:10K<n<100K",
"region:us"
] | [
"question-answering"
] | "2022-03-02T23:29:22Z" | ---
annotations_creators:
- crowdsourced
language:
- en
language_creators:
- crowdsourced
- expert-generated
license:
- apache-2.0
multilinguality:
- monolingual
pretty_name: MathQA
size_categories:
- 10K<n<100K
source_datasets:
- extended|aqua_rat
task_categories:
- question-answering
task_ids:
- multiple-choice-qa
paperswithcode_id: mathqa
dataset_info:
features:
- name: Problem
dtype: string
- name: Rationale
dtype: string
- name: options
dtype: string
- name: correct
dtype: string
- name: annotated_formula
dtype: string
- name: linear_formula
dtype: string
- name: category
dtype: string
splits:
- name: test
num_bytes: 1844184
num_examples: 2985
- name: train
num_bytes: 18368826
num_examples: 29837
- name: validation
num_bytes: 2752969
num_examples: 4475
download_size: 7302821
dataset_size: 22965979
---
# Dataset Card for MathQA
## Table of Contents
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** [https://math-qa.github.io/math-QA/](https://math-qa.github.io/math-QA/)
- **Repository:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
- **Paper:** [MathQA: Towards Interpretable Math Word Problem Solving with Operation-Based Formalisms](https://aclanthology.org/N19-1245/)
- **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
- **Size of downloaded dataset files:** 7.30 MB
- **Size of the generated dataset:** 22.96 MB
- **Total amount of disk used:** 30.27 MB
### Dataset Summary
We introduce a large-scale dataset of math word problems.
Our dataset is gathered by using a new representation language to annotate over the AQuA-RAT dataset with fully-specified operational programs.
AQuA-RAT has provided the questions, options, rationale, and the correct options.
### Supported Tasks and Leaderboards
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Languages
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
## Dataset Structure
### Data Instances
#### default
- **Size of downloaded dataset files:** 7.30 MB
- **Size of the generated dataset:** 22.96 MB
- **Total amount of disk used:** 30.27 MB
An example of 'train' looks as follows.
```
{
"Problem": "a multiple choice test consists of 4 questions , and each question has 5 answer choices . in how many r ways can the test be completed if every question is unanswered ?",
"Rationale": "\"5 choices for each of the 4 questions , thus total r of 5 * 5 * 5 * 5 = 5 ^ 4 = 625 ways to answer all of them . answer : c .\"",
"annotated_formula": "power(5, 4)",
"category": "general",
"correct": "c",
"linear_formula": "power(n1,n0)|",
"options": "a ) 24 , b ) 120 , c ) 625 , d ) 720 , e ) 1024"
}
```
### Data Fields
The data fields are the same among all splits.
#### default
- `Problem`: a `string` feature.
- `Rationale`: a `string` feature.
- `options`: a `string` feature.
- `correct`: a `string` feature.
- `annotated_formula`: a `string` feature.
- `linear_formula`: a `string` feature.
- `category`: a `string` feature.
### Data Splits
| name |train|validation|test|
|-------|----:|---------:|---:|
|default|29837| 4475|2985|
## Dataset Creation
### Curation Rationale
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
#### Who are the source language producers?
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Annotations
#### Annotation process
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
#### Who are the annotators?
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Personal and Sensitive Information
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Discussion of Biases
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Other Known Limitations
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
## Additional Information
### Dataset Curators
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Licensing Information
The dataset is licensed under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0).
### Citation Information
```
@inproceedings{amini-etal-2019-mathqa,
title = "{M}ath{QA}: Towards Interpretable Math Word Problem Solving with Operation-Based Formalisms",
author = "Amini, Aida and
Gabriel, Saadia and
Lin, Shanchuan and
Koncel-Kedziorski, Rik and
Choi, Yejin and
Hajishirzi, Hannaneh",
booktitle = "Proceedings of the 2019 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)",
month = jun,
year = "2019",
address = "Minneapolis, Minnesota",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/N19-1245",
doi = "10.18653/v1/N19-1245",
pages = "2357--2367",
}
```
### Contributions
Thanks to [@thomwolf](https://github.com/thomwolf), [@lewtun](https://github.com/lewtun), [@patrickvonplaten](https://github.com/patrickvonplaten) for adding this dataset. |
awettig/RedPajama-combined-15B-8k-llama | awettig | "2023-11-30T07:16:59Z" | 16,408 | 0 | [
"size_categories:1M<n<10M",
"format:parquet",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2023-11-29T05:26:03Z" | ---
dataset_info:
features:
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
- name: labels
sequence: int64
splits:
- name: test
num_bytes: 3181819992
num_examples: 29874
- name: train
num_bytes: 188533325564
num_examples: 1770133
download_size: 0
dataset_size: 191715145556
---
# Dataset Card for "RedPajama-combined-15B-8K-llama"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
gigant/tib_slides_wip | gigant | "2023-03-26T16:22:49Z" | 16,368 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2023-03-26T00:20:40Z" | ---
dataset_info:
features:
- name: Image
dtype: image
- name: file_name
dtype: string
splits:
- name: train
num_bytes: 161850916866.84
num_examples: 595458
download_size: 29396407498
dataset_size: 161850916866.84
---
# Dataset Card for "tib_slides_wip"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
JonasLange/DBD-research-group | JonasLange | "2023-06-27T11:26:14Z" | 16,218 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2023-06-27T08:44:41Z" | ---
dataset_info:
features:
- name: ID
dtype: string
- name: Audio
dtype: audio
- name: Start Time (s)
dtype: string
- name: End Time (s)
dtype: string
- name: Low Freq (Hz)
dtype: string
- name: High Freq (Hz)
dtype: string
- name: Species eBird Code
dtype: string
- name: Call Type
dtype: 'null'
- name: Sex
dtype: 'null'
- name: Latitude
dtype: float64
- name: Longitude
dtype: float64
- name: Uncertainty
dtype: 'null'
- name: Microphone
dtype: string
- name: License
dtype: string
- name: Source
dtype: string
- name: BirdNet Training Data
dtype: bool
splits:
- name: train
num_bytes: 156231869250.216
num_examples: 10976
download_size: 538872707
dataset_size: 156231869250.216
---
# Dataset Card for "DBD-research-group"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
lmqg/qg_koquad | lmqg | "2022-12-02T18:53:42Z" | 16,168 | 8 | [
"task_categories:text-generation",
"task_ids:language-modeling",
"multilinguality:monolingual",
"source_datasets:squad_es",
"language:ko",
"license:cc-by-4.0",
"size_categories:10K<n<100K",
"modality:text",
"library:datasets",
"library:mlcroissant",
"arxiv:2210.03992",
"region:us",
"question-generation"
] | [
"text-generation"
] | "2022-06-02T23:42:21Z" | ---
license: cc-by-4.0
pretty_name: KorQuAD for question generation
language: ko
multilinguality: monolingual
size_categories: 10K<n<100K
source_datasets: squad_es
task_categories:
- text-generation
task_ids:
- language-modeling
tags:
- question-generation
---
# Dataset Card for "lmqg/qg_korquad"
## Dataset Description
- **Repository:** [https://github.com/asahi417/lm-question-generation](https://github.com/asahi417/lm-question-generation)
- **Paper:** [https://arxiv.org/abs/2210.03992](https://arxiv.org/abs/2210.03992)
- **Point of Contact:** [Asahi Ushio](http://asahiushio.com/)
### Dataset Summary
This is a subset of [QG-Bench](https://github.com/asahi417/lm-question-generation/blob/master/QG_BENCH.md#datasets), a unified question generation benchmark proposed in
["Generative Language Models for Paragraph-Level Question Generation: A Unified Benchmark and Evaluation, EMNLP 2022 main conference"](https://arxiv.org/abs/2210.03992).
This is a modified version of [KorQuAD](https://huggingface.co/datasets/squad_kor_v1) for question generation (QG) task.
Since the original dataset only contains training/validation set, we manually sample test set from training set, which
has no overlap in terms of the paragraph with the training set.
### Supported Tasks and Leaderboards
* `question-generation`: The dataset is assumed to be used to train a model for question generation.
Success on this task is typically measured by achieving a high BLEU4/METEOR/ROUGE-L/BERTScore/MoverScore (see our paper for more in detail).
### Languages
Korean (ko)
## Dataset Structure
An example of 'train' looks as follows.
```
{
"question": "함수해석학이 주목하는 탐구는?",
"paragraph": "변화에 대한 이해와 묘사는 자연과학에 있어서 일반적인 주제이며, 미적분학은 변화를 탐구하는 강력한 도구로서 발전되었다. 함수는 변화하는 양을 묘사함에 있어서 중추적인 개념으로써 떠오르게 된다. 실수와 실변수로 구성된 함수의 엄밀한 탐구가 실해석학이라는 분야로 알려지게 되었고, 복소수에 대한 이와 같은 탐구분야는 복소해석학이라고 한다. 함수해석학은 함수의 공간(특히 무한차원)의 탐구에 주목한다. 함수해석학의 많은 응용분야 중 하나가 양자역학이다. 많은 문제들이 자연스럽게 양과 그 양의 변화율의 관계로 귀착되고, 이러한 문제들이 미분방정식으로 다루어진다. 자연의 많은 현상들이 동역학계로 기술될 수 있다. 혼돈 이론은 이러한 예측 불가능한 현상을 탐구하는 데 상당한 기여를 한다.",
"answer": "함수의 공간(특히 무한차원)의 탐구",
"sentence": "함수해석학은 함수의 공간(특히 무한차원)의 탐구 에 주목한다.",
"paragraph_sentence": '변화에 대한 이해와 묘사는 자연과학에 있어서 일반적인 주제이며, 미적분학은 변화를 탐구하는 강력한 도구로서 발전되었다. 함수는 변화하는 양을 묘사함에 있어서 중추적인 개념으로써 떠오르게 된다. 실수와 실변수로 구성된 함수의 엄밀한 탐구가 실해석학이라는 분야로 알려지게 되었고, 복소수에 대한 이와 같은 탐구 분야는 복소해석학이라고 한다. <hl> 함수해석학은 함수의 공간(특히 무한차원)의 탐구 에 주목한다. <hl> 함수해석학의 많은 응용분야 중 하나가 양자역학이다. 많은 문제들이 자연스럽게 양과 그 양의 변화율의 관계로 귀착되고, 이러한 문제들이 미분방정식으로 다루어진다. 자연의 많은 현상들이 동역학계로 기술될 수 있다. 혼돈 이론은 이러한 예측 불가능한 현상을 탐구하는 데 상당한 기여를 한다.',
"paragraph_answer": '변화에 대한 이해와 묘사는 자연과학에 있어서 일반적인 주제이며, 미적분학은 변화를 탐구하는 강력한 도구로서 발전되었다. 함수는 변화하는 양을 묘사함에 있어서 중추적인 개념으로써 떠오르게 된다. 실수와 실변수로 구성된 함수의 엄밀한 탐구가 실해석학이라는 분야로 알려지게 되었고, 복소수에 대한 이와 같은 탐구 분야는 복소해석학이라고 한다. 함수해석학은 <hl> 함수의 공간(특히 무한차원)의 탐구 <hl>에 주목한다. 함수해석학의 많은 응용분야 중 하나가 양자역학이다. 많은 문제들이 자연스럽게 양과 그 양의 변화율의 관계로 귀착되고, 이러한 문제들이 미분방정식으로 다루어진다. 자연의 많은 현상들이 동역학계로 기술될 수 있다. 혼돈 이론은 이러한 예측 불가능한 현상을 탐구하는 데 상당한 기여를 한다.',
"sentence_answer": "함수해석학은 <hl> 함수의 공간(특히 무한차원)의 탐구 <hl> 에 주목한다."
}
```
The data fields are the same among all splits.
- `question`: a `string` feature.
- `paragraph`: a `string` feature.
- `answer`: a `string` feature.
- `sentence`: a `string` feature.
- `paragraph_answer`: a `string` feature, which is same as the paragraph but the answer is highlighted by a special token `<hl>`.
- `paragraph_sentence`: a `string` feature, which is same as the paragraph but a sentence containing the answer is highlighted by a special token `<hl>`.
- `sentence_answer`: a `string` feature, which is same as the sentence but the answer is highlighted by a special token `<hl>`.
Each of `paragraph_answer`, `paragraph_sentence`, and `sentence_answer` feature is assumed to be used to train a question generation model,
but with different information. The `paragraph_answer` and `sentence_answer` features are for answer-aware question generation and
`paragraph_sentence` feature is for sentence-aware question generation.
## Data Splits
|train|validation|test |
|----:|---------:|----:|
|54556| 5766 |5766 |
## Citation Information
```
@inproceedings{ushio-etal-2022-generative,
title = "{G}enerative {L}anguage {M}odels for {P}aragraph-{L}evel {Q}uestion {G}eneration: {A} {U}nified {B}enchmark and {E}valuation",
author = "Ushio, Asahi and
Alva-Manchego, Fernando and
Camacho-Collados, Jose",
booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing",
month = dec,
year = "2022",
address = "Abu Dhabi, U.A.E.",
publisher = "Association for Computational Linguistics",
}
``` |
fxmarty/transformers-regressions | fxmarty | "2023-09-27T13:33:28Z" | 16,131 | 0 | [
"license:mit",
"region:us"
] | null | "2023-08-08T14:18:24Z" | ---
license: mit
---
|
nyu-mll/blimp | nyu-mll | "2024-01-23T09:58:08Z" | 16,130 | 37 | [
"task_categories:text-classification",
"task_ids:acceptability-classification",
"annotations_creators:crowdsourced",
"language_creators:machine-generated",
"multilinguality:monolingual",
"source_datasets:original",
"language:en",
"license:cc-by-4.0",
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"arxiv:1912.00582",
"region:us"
] | [
"text-classification"
] | "2022-03-02T23:29:22Z" | ---
annotations_creators:
- crowdsourced
language_creators:
- machine-generated
language:
- en
license:
- cc-by-4.0
multilinguality:
- monolingual
size_categories:
- 10K<n<100K
source_datasets:
- original
task_categories:
- text-classification
task_ids:
- acceptability-classification
paperswithcode_id: blimp
pretty_name: BLiMP
dataset_info:
- config_name: adjunct_island
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 165894
num_examples: 1000
download_size: 62231
dataset_size: 165894
- config_name: anaphor_gender_agreement
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 130918
num_examples: 1000
download_size: 39201
dataset_size: 130918
- config_name: anaphor_number_agreement
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 139879
num_examples: 1000
download_size: 41547
dataset_size: 139879
- config_name: animate_subject_passive
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 144423
num_examples: 1000
download_size: 47282
dataset_size: 144423
- config_name: animate_subject_trans
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 127798
num_examples: 1000
download_size: 49651
dataset_size: 127798
- config_name: causative
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 122772
num_examples: 1000
download_size: 48963
dataset_size: 122772
- config_name: complex_NP_island
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 198972
num_examples: 1000
download_size: 78211
dataset_size: 198972
- config_name: coordinate_structure_constraint_complex_left_branch
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 210912
num_examples: 1000
download_size: 67908
dataset_size: 210912
- config_name: coordinate_structure_constraint_object_extraction
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 171655
num_examples: 1000
download_size: 51584
dataset_size: 171655
- config_name: determiner_noun_agreement_1
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 156120
num_examples: 1000
download_size: 49893
dataset_size: 156120
- config_name: determiner_noun_agreement_2
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 156204
num_examples: 1000
download_size: 49527
dataset_size: 156204
- config_name: determiner_noun_agreement_irregular_1
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 164473
num_examples: 1000
download_size: 47274
dataset_size: 164473
- config_name: determiner_noun_agreement_irregular_2
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 161074
num_examples: 1000
download_size: 47422
dataset_size: 161074
- config_name: determiner_noun_agreement_with_adj_2
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 179666
num_examples: 1000
download_size: 56346
dataset_size: 179666
- config_name: determiner_noun_agreement_with_adj_irregular_1
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 184529
num_examples: 1000
download_size: 54405
dataset_size: 184529
- config_name: determiner_noun_agreement_with_adj_irregular_2
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 184396
num_examples: 1000
download_size: 54064
dataset_size: 184396
- config_name: determiner_noun_agreement_with_adjective_1
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 185126
num_examples: 1000
download_size: 55682
dataset_size: 185126
- config_name: distractor_agreement_relational_noun
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 191473
num_examples: 1000
download_size: 59641
dataset_size: 191473
- config_name: distractor_agreement_relative_clause
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 216756
num_examples: 1000
download_size: 77897
dataset_size: 216756
- config_name: drop_argument
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 109806
num_examples: 1000
download_size: 39961
dataset_size: 109806
- config_name: ellipsis_n_bar_1
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 217590
num_examples: 1000
download_size: 92776
dataset_size: 217590
- config_name: ellipsis_n_bar_2
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 233161
num_examples: 1000
download_size: 98882
dataset_size: 233161
- config_name: existential_there_object_raising
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 223741
num_examples: 1000
download_size: 76641
dataset_size: 223741
- config_name: existential_there_quantifiers_1
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 162931
num_examples: 1000
download_size: 51576
dataset_size: 162931
- config_name: existential_there_quantifiers_2
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 164826
num_examples: 1000
download_size: 52092
dataset_size: 164826
- config_name: existential_there_subject_raising
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 200063
num_examples: 1000
download_size: 59519
dataset_size: 200063
- config_name: expletive_it_object_raising
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 238615
num_examples: 1000
download_size: 88607
dataset_size: 238615
- config_name: inchoative
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 104319
num_examples: 1000
download_size: 39842
dataset_size: 104319
- config_name: intransitive
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 111097
num_examples: 1000
download_size: 42387
dataset_size: 111097
- config_name: irregular_past_participle_adjectives
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 144661
num_examples: 1000
download_size: 36654
dataset_size: 144661
- config_name: irregular_past_participle_verbs
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 125692
num_examples: 1000
download_size: 37297
dataset_size: 125692
- config_name: irregular_plural_subject_verb_agreement_1
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 165584
num_examples: 1000
download_size: 50725
dataset_size: 165584
- config_name: irregular_plural_subject_verb_agreement_2
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 153843
num_examples: 1000
download_size: 42707
dataset_size: 153843
- config_name: left_branch_island_echo_question
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 147840
num_examples: 1000
download_size: 50481
dataset_size: 147840
- config_name: left_branch_island_simple_question
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 150060
num_examples: 1000
download_size: 50293
dataset_size: 150060
- config_name: matrix_question_npi_licensor_present
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 153262
num_examples: 1000
download_size: 51899
dataset_size: 153262
- config_name: npi_present_1
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 138465
num_examples: 1000
download_size: 51981
dataset_size: 138465
- config_name: npi_present_2
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 127636
num_examples: 1000
download_size: 51661
dataset_size: 127636
- config_name: only_npi_licensor_present
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 148516
num_examples: 1000
download_size: 51361
dataset_size: 148516
- config_name: only_npi_scope
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 208902
num_examples: 1000
download_size: 84970
dataset_size: 208902
- config_name: passive_1
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 145882
num_examples: 1000
download_size: 53931
dataset_size: 145882
- config_name: passive_2
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 113960
num_examples: 1000
download_size: 40499
dataset_size: 113960
- config_name: principle_A_c_command
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 188490
num_examples: 1000
download_size: 67867
dataset_size: 188490
- config_name: principle_A_case_1
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 170398
num_examples: 1000
download_size: 61092
dataset_size: 170398
- config_name: principle_A_case_2
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 170412
num_examples: 1000
download_size: 56430
dataset_size: 170412
- config_name: principle_A_domain_1
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 171170
num_examples: 1000
download_size: 59120
dataset_size: 171170
- config_name: principle_A_domain_2
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 165333
num_examples: 1000
download_size: 58464
dataset_size: 165333
- config_name: principle_A_domain_3
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 158998
num_examples: 1000
download_size: 52859
dataset_size: 158998
- config_name: principle_A_reconstruction
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 152104
num_examples: 1000
download_size: 44480
dataset_size: 152104
- config_name: regular_plural_subject_verb_agreement_1
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 158819
num_examples: 1000
download_size: 49466
dataset_size: 158819
- config_name: regular_plural_subject_verb_agreement_2
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 153609
num_examples: 1000
download_size: 43365
dataset_size: 153609
- config_name: sentential_negation_npi_licensor_present
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 171864
num_examples: 1000
download_size: 54830
dataset_size: 171864
- config_name: sentential_negation_npi_scope
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 232098
num_examples: 1000
download_size: 90157
dataset_size: 232098
- config_name: sentential_subject_island
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 172432
num_examples: 1000
download_size: 56666
dataset_size: 172432
- config_name: superlative_quantifiers_1
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 159290
num_examples: 1000
download_size: 48453
dataset_size: 159290
- config_name: superlative_quantifiers_2
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 159340
num_examples: 1000
download_size: 50480
dataset_size: 159340
- config_name: tough_vs_raising_1
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 148636
num_examples: 1000
download_size: 44779
dataset_size: 148636
- config_name: tough_vs_raising_2
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 169684
num_examples: 1000
download_size: 61465
dataset_size: 169684
- config_name: transitive
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 133104
num_examples: 1000
download_size: 55090
dataset_size: 133104
- config_name: wh_island
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 142340
num_examples: 1000
download_size: 52808
dataset_size: 142340
- config_name: wh_questions_object_gap
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 193045
num_examples: 1000
download_size: 70049
dataset_size: 193045
- config_name: wh_questions_subject_gap
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 195593
num_examples: 1000
download_size: 71632
dataset_size: 195593
- config_name: wh_questions_subject_gap_long_distance
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 268270
num_examples: 1000
download_size: 98913
dataset_size: 268270
- config_name: wh_vs_that_no_gap
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 188872
num_examples: 1000
download_size: 71710
dataset_size: 188872
- config_name: wh_vs_that_no_gap_long_distance
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 247039
num_examples: 1000
download_size: 95504
dataset_size: 247039
- config_name: wh_vs_that_with_gap
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 173386
num_examples: 1000
download_size: 60291
dataset_size: 173386
- config_name: wh_vs_that_with_gap_long_distance
features:
- name: sentence_good
dtype: string
- name: sentence_bad
dtype: string
- name: field
dtype: string
- name: linguistics_term
dtype: string
- name: UID
dtype: string
- name: simple_LM_method
dtype: bool
- name: one_prefix_method
dtype: bool
- name: two_prefix_method
dtype: bool
- name: lexically_identical
dtype: bool
- name: pair_id
dtype: int32
splits:
- name: train
num_bytes: 231595
num_examples: 1000
download_size: 84147
dataset_size: 231595
configs:
- config_name: adjunct_island
data_files:
- split: train
path: adjunct_island/train-*
- config_name: anaphor_gender_agreement
data_files:
- split: train
path: anaphor_gender_agreement/train-*
- config_name: anaphor_number_agreement
data_files:
- split: train
path: anaphor_number_agreement/train-*
- config_name: animate_subject_passive
data_files:
- split: train
path: animate_subject_passive/train-*
- config_name: animate_subject_trans
data_files:
- split: train
path: animate_subject_trans/train-*
- config_name: causative
data_files:
- split: train
path: causative/train-*
- config_name: complex_NP_island
data_files:
- split: train
path: complex_NP_island/train-*
- config_name: coordinate_structure_constraint_complex_left_branch
data_files:
- split: train
path: coordinate_structure_constraint_complex_left_branch/train-*
- config_name: coordinate_structure_constraint_object_extraction
data_files:
- split: train
path: coordinate_structure_constraint_object_extraction/train-*
- config_name: determiner_noun_agreement_1
data_files:
- split: train
path: determiner_noun_agreement_1/train-*
- config_name: determiner_noun_agreement_2
data_files:
- split: train
path: determiner_noun_agreement_2/train-*
- config_name: determiner_noun_agreement_irregular_1
data_files:
- split: train
path: determiner_noun_agreement_irregular_1/train-*
- config_name: determiner_noun_agreement_irregular_2
data_files:
- split: train
path: determiner_noun_agreement_irregular_2/train-*
- config_name: determiner_noun_agreement_with_adj_2
data_files:
- split: train
path: determiner_noun_agreement_with_adj_2/train-*
- config_name: determiner_noun_agreement_with_adj_irregular_1
data_files:
- split: train
path: determiner_noun_agreement_with_adj_irregular_1/train-*
- config_name: determiner_noun_agreement_with_adj_irregular_2
data_files:
- split: train
path: determiner_noun_agreement_with_adj_irregular_2/train-*
- config_name: determiner_noun_agreement_with_adjective_1
data_files:
- split: train
path: determiner_noun_agreement_with_adjective_1/train-*
- config_name: distractor_agreement_relational_noun
data_files:
- split: train
path: distractor_agreement_relational_noun/train-*
- config_name: distractor_agreement_relative_clause
data_files:
- split: train
path: distractor_agreement_relative_clause/train-*
- config_name: drop_argument
data_files:
- split: train
path: drop_argument/train-*
- config_name: ellipsis_n_bar_1
data_files:
- split: train
path: ellipsis_n_bar_1/train-*
- config_name: ellipsis_n_bar_2
data_files:
- split: train
path: ellipsis_n_bar_2/train-*
- config_name: existential_there_object_raising
data_files:
- split: train
path: existential_there_object_raising/train-*
- config_name: existential_there_quantifiers_1
data_files:
- split: train
path: existential_there_quantifiers_1/train-*
- config_name: existential_there_quantifiers_2
data_files:
- split: train
path: existential_there_quantifiers_2/train-*
- config_name: existential_there_subject_raising
data_files:
- split: train
path: existential_there_subject_raising/train-*
- config_name: expletive_it_object_raising
data_files:
- split: train
path: expletive_it_object_raising/train-*
- config_name: inchoative
data_files:
- split: train
path: inchoative/train-*
- config_name: intransitive
data_files:
- split: train
path: intransitive/train-*
- config_name: irregular_past_participle_adjectives
data_files:
- split: train
path: irregular_past_participle_adjectives/train-*
- config_name: irregular_past_participle_verbs
data_files:
- split: train
path: irregular_past_participle_verbs/train-*
- config_name: irregular_plural_subject_verb_agreement_1
data_files:
- split: train
path: irregular_plural_subject_verb_agreement_1/train-*
- config_name: irregular_plural_subject_verb_agreement_2
data_files:
- split: train
path: irregular_plural_subject_verb_agreement_2/train-*
- config_name: left_branch_island_echo_question
data_files:
- split: train
path: left_branch_island_echo_question/train-*
- config_name: left_branch_island_simple_question
data_files:
- split: train
path: left_branch_island_simple_question/train-*
- config_name: matrix_question_npi_licensor_present
data_files:
- split: train
path: matrix_question_npi_licensor_present/train-*
- config_name: npi_present_1
data_files:
- split: train
path: npi_present_1/train-*
- config_name: npi_present_2
data_files:
- split: train
path: npi_present_2/train-*
- config_name: only_npi_licensor_present
data_files:
- split: train
path: only_npi_licensor_present/train-*
- config_name: only_npi_scope
data_files:
- split: train
path: only_npi_scope/train-*
- config_name: passive_1
data_files:
- split: train
path: passive_1/train-*
- config_name: passive_2
data_files:
- split: train
path: passive_2/train-*
- config_name: principle_A_c_command
data_files:
- split: train
path: principle_A_c_command/train-*
- config_name: principle_A_case_1
data_files:
- split: train
path: principle_A_case_1/train-*
- config_name: principle_A_case_2
data_files:
- split: train
path: principle_A_case_2/train-*
- config_name: principle_A_domain_1
data_files:
- split: train
path: principle_A_domain_1/train-*
- config_name: principle_A_domain_2
data_files:
- split: train
path: principle_A_domain_2/train-*
- config_name: principle_A_domain_3
data_files:
- split: train
path: principle_A_domain_3/train-*
- config_name: principle_A_reconstruction
data_files:
- split: train
path: principle_A_reconstruction/train-*
- config_name: regular_plural_subject_verb_agreement_1
data_files:
- split: train
path: regular_plural_subject_verb_agreement_1/train-*
- config_name: regular_plural_subject_verb_agreement_2
data_files:
- split: train
path: regular_plural_subject_verb_agreement_2/train-*
- config_name: sentential_negation_npi_licensor_present
data_files:
- split: train
path: sentential_negation_npi_licensor_present/train-*
- config_name: sentential_negation_npi_scope
data_files:
- split: train
path: sentential_negation_npi_scope/train-*
- config_name: sentential_subject_island
data_files:
- split: train
path: sentential_subject_island/train-*
- config_name: superlative_quantifiers_1
data_files:
- split: train
path: superlative_quantifiers_1/train-*
- config_name: superlative_quantifiers_2
data_files:
- split: train
path: superlative_quantifiers_2/train-*
- config_name: tough_vs_raising_1
data_files:
- split: train
path: tough_vs_raising_1/train-*
- config_name: tough_vs_raising_2
data_files:
- split: train
path: tough_vs_raising_2/train-*
- config_name: transitive
data_files:
- split: train
path: transitive/train-*
- config_name: wh_island
data_files:
- split: train
path: wh_island/train-*
- config_name: wh_questions_object_gap
data_files:
- split: train
path: wh_questions_object_gap/train-*
- config_name: wh_questions_subject_gap
data_files:
- split: train
path: wh_questions_subject_gap/train-*
- config_name: wh_questions_subject_gap_long_distance
data_files:
- split: train
path: wh_questions_subject_gap_long_distance/train-*
- config_name: wh_vs_that_no_gap
data_files:
- split: train
path: wh_vs_that_no_gap/train-*
- config_name: wh_vs_that_no_gap_long_distance
data_files:
- split: train
path: wh_vs_that_no_gap_long_distance/train-*
- config_name: wh_vs_that_with_gap
data_files:
- split: train
path: wh_vs_that_with_gap/train-*
- config_name: wh_vs_that_with_gap_long_distance
data_files:
- split: train
path: wh_vs_that_with_gap_long_distance/train-*
---
# Dataset Card for "blimp"
## Table of Contents
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:**
- **Repository:** https://github.com/alexwarstadt/blimp
- **Paper:** [BLiMP: The Benchmark of Linguistic Minimal Pairs for English](https://doi.org/10.1162/tacl_a_00321)
- **Paper:** https://arxiv.org/abs/1912.00582
- **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
- **Size of downloaded dataset files:** 29.58 MB
- **Size of the generated dataset:** 11.45 MB
- **Total amount of disk used:** 41.03 MB
### Dataset Summary
BLiMP is a challenge set for evaluating what language models (LMs) know about
major grammatical phenomena in English. BLiMP consists of 67 sub-datasets, each
containing 1000 minimal pairs isolating specific contrasts in syntax,
morphology, or semantics. The data is automatically generated according to
expert-crafted grammars.
### Supported Tasks and Leaderboards
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Languages
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
## Dataset Structure
### Data Instances
#### adjunct_island
- **Size of downloaded dataset files:** 0.36 MB
- **Size of the generated dataset:** 0.17 MB
- **Total amount of disk used:** 0.52 MB
An example of 'train' looks as follows.
```
{
"UID": "tough_vs_raising_1",
"field": "syntax_semantics",
"lexically_identical": false,
"linguistics_term": "control_raising",
"one_prefix_method": false,
"pair_id": 2,
"sentence_bad": "Benjamin's tutor was certain to boast about.",
"sentence_good": "Benjamin's tutor was easy to boast about.",
"simple_LM_method": true,
"two_prefix_method": false
}
```
#### anaphor_gender_agreement
- **Size of downloaded dataset files:** 0.44 MB
- **Size of the generated dataset:** 0.14 MB
- **Total amount of disk used:** 0.57 MB
An example of 'train' looks as follows.
```
{
"UID": "tough_vs_raising_1",
"field": "syntax_semantics",
"lexically_identical": false,
"linguistics_term": "control_raising",
"one_prefix_method": false,
"pair_id": 2,
"sentence_bad": "Benjamin's tutor was certain to boast about.",
"sentence_good": "Benjamin's tutor was easy to boast about.",
"simple_LM_method": true,
"two_prefix_method": false
}
```
#### anaphor_number_agreement
- **Size of downloaded dataset files:** 0.45 MB
- **Size of the generated dataset:** 0.14 MB
- **Total amount of disk used:** 0.59 MB
An example of 'train' looks as follows.
```
{
"UID": "tough_vs_raising_1",
"field": "syntax_semantics",
"lexically_identical": false,
"linguistics_term": "control_raising",
"one_prefix_method": false,
"pair_id": 2,
"sentence_bad": "Benjamin's tutor was certain to boast about.",
"sentence_good": "Benjamin's tutor was easy to boast about.",
"simple_LM_method": true,
"two_prefix_method": false
}
```
#### animate_subject_passive
- **Size of downloaded dataset files:** 0.46 MB
- **Size of the generated dataset:** 0.15 MB
- **Total amount of disk used:** 0.61 MB
An example of 'train' looks as follows.
```
{
"UID": "tough_vs_raising_1",
"field": "syntax_semantics",
"lexically_identical": false,
"linguistics_term": "control_raising",
"one_prefix_method": false,
"pair_id": 2,
"sentence_bad": "Benjamin's tutor was certain to boast about.",
"sentence_good": "Benjamin's tutor was easy to boast about.",
"simple_LM_method": true,
"two_prefix_method": false
}
```
#### animate_subject_trans
- **Size of downloaded dataset files:** 0.43 MB
- **Size of the generated dataset:** 0.13 MB
- **Total amount of disk used:** 0.57 MB
An example of 'train' looks as follows.
```
{
"UID": "tough_vs_raising_1",
"field": "syntax_semantics",
"lexically_identical": false,
"linguistics_term": "control_raising",
"one_prefix_method": false,
"pair_id": 2,
"sentence_bad": "Benjamin's tutor was certain to boast about.",
"sentence_good": "Benjamin's tutor was easy to boast about.",
"simple_LM_method": true,
"two_prefix_method": false
}
```
### Data Fields
The data fields are the same among all splits.
#### adjunct_island
- `sentence_good`: a `string` feature.
- `sentence_bad`: a `string` feature.
- `field`: a `string` feature.
- `linguistics_term`: a `string` feature.
- `UID`: a `string` feature.
- `simple_LM_method`: a `bool` feature.
- `one_prefix_method`: a `bool` feature.
- `two_prefix_method`: a `bool` feature.
- `lexically_identical`: a `bool` feature.
- `pair_id`: a `int32` feature.
#### anaphor_gender_agreement
- `sentence_good`: a `string` feature.
- `sentence_bad`: a `string` feature.
- `field`: a `string` feature.
- `linguistics_term`: a `string` feature.
- `UID`: a `string` feature.
- `simple_LM_method`: a `bool` feature.
- `one_prefix_method`: a `bool` feature.
- `two_prefix_method`: a `bool` feature.
- `lexically_identical`: a `bool` feature.
- `pair_id`: a `int32` feature.
#### anaphor_number_agreement
- `sentence_good`: a `string` feature.
- `sentence_bad`: a `string` feature.
- `field`: a `string` feature.
- `linguistics_term`: a `string` feature.
- `UID`: a `string` feature.
- `simple_LM_method`: a `bool` feature.
- `one_prefix_method`: a `bool` feature.
- `two_prefix_method`: a `bool` feature.
- `lexically_identical`: a `bool` feature.
- `pair_id`: a `int32` feature.
#### animate_subject_passive
- `sentence_good`: a `string` feature.
- `sentence_bad`: a `string` feature.
- `field`: a `string` feature.
- `linguistics_term`: a `string` feature.
- `UID`: a `string` feature.
- `simple_LM_method`: a `bool` feature.
- `one_prefix_method`: a `bool` feature.
- `two_prefix_method`: a `bool` feature.
- `lexically_identical`: a `bool` feature.
- `pair_id`: a `int32` feature.
#### animate_subject_trans
- `sentence_good`: a `string` feature.
- `sentence_bad`: a `string` feature.
- `field`: a `string` feature.
- `linguistics_term`: a `string` feature.
- `UID`: a `string` feature.
- `simple_LM_method`: a `bool` feature.
- `one_prefix_method`: a `bool` feature.
- `two_prefix_method`: a `bool` feature.
- `lexically_identical`: a `bool` feature.
- `pair_id`: a `int32` feature.
### Data Splits
| name |train|
|------------------------|----:|
|adjunct_island | 1000|
|anaphor_gender_agreement| 1000|
|anaphor_number_agreement| 1000|
|animate_subject_passive | 1000|
|animate_subject_trans | 1000|
## Dataset Creation
### Curation Rationale
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
#### Who are the source language producers?
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Annotations
#### Annotation process
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
#### Who are the annotators?
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Personal and Sensitive Information
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Discussion of Biases
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Other Known Limitations
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
## Additional Information
### Dataset Curators
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Licensing Information
BLiMP is distributed under a [CC-BY](https://creativecommons.org/licenses/by/4.0/) license. Source: https://github.com/alexwarstadt/blimp#license
### Citation Information
```
@article{warstadt2020blimp,
author = {Warstadt, Alex and Parrish, Alicia and Liu, Haokun and Mohananey, Anhad and Peng, Wei and Wang, Sheng-Fu and Bowman, Samuel R.},
title = {BLiMP: The Benchmark of Linguistic Minimal Pairs for English},
journal = {Transactions of the Association for Computational Linguistics},
volume = {8},
number = {},
pages = {377-392},
year = {2020},
doi = {10.1162/tacl\_a\_00321},
URL = {https://doi.org/10.1162/tacl_a_00321},
eprint = {https://doi.org/10.1162/tacl_a_00321},
abstract = { We introduce The Benchmark of Linguistic Minimal Pairs (BLiMP),1 a challenge set for evaluating the linguistic knowledge of language models (LMs) on major grammatical phenomena in English. BLiMP consists of 67 individual datasets, each containing 1,000 minimal pairs—that is, pairs of minimally different sentences that contrast in grammatical acceptability and isolate specific phenomenon in syntax, morphology, or semantics. We generate the data according to linguist-crafted grammar templates, and human aggregate agreement with the labels is 96.4\%. We evaluate n-gram, LSTM, and Transformer (GPT-2 and Transformer-XL) LMs by observing whether they assign a higher probability to the acceptable sentence in each minimal pair. We find that state-of-the-art models identify morphological contrasts related to agreement reliably, but they struggle with some subtle semantic and syntactic phenomena, such as negative polarity items and extraction islands. }
}
```
#### Errata
Some results were misreported in the published TACL version. Please refer to the corrected version on arXiv: https://arxiv.org/abs/1912.00582
### Contributions
Thanks to [@lhoestq](https://github.com/lhoestq), [@patrickvonplaten](https://github.com/patrickvonplaten), [@thomwolf](https://github.com/thomwolf) for adding this dataset. |
alvations/c4p0-x1-en-ja | alvations | "2024-03-24T03:55:23Z" | 15,951 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-03-23T09:54:37Z" | ---
dataset_info:
features:
- name: source
dtype: string
- name: target
dtype: string
- name: target_backto_source
dtype: string
- name: raw_target
list:
- name: generated_text
dtype: string
- name: raw_target_backto_source
list:
- name: generated_text
dtype: string
- name: prompt
dtype: string
- name: reverse_prompt
dtype: string
- name: source_langid
dtype: string
- name: target_langid
dtype: string
- name: target_backto_source_langid
dtype: string
- name: doc_id
dtype: int64
- name: sent_id
dtype: int64
- name: timestamp
dtype: string
- name: url
dtype: string
- name: doc_hash
dtype: string
splits:
- name: train
num_bytes: 49764
num_examples: 42
download_size: 37636
dataset_size: 49764
configs:
- config_name: default
data_files:
- split: train
path: 66034f82c5c65ae4/train-*
---
|
DTU54DL/librispeech-augmentated-train-prepared | DTU54DL | "2022-12-05T14:21:47Z" | 15,940 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2022-11-26T21:55:59Z" | ---
dataset_info:
features:
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: text
dtype: string
- name: speaker_id
dtype: int64
- name: chapter_id
dtype: int64
- name: id
dtype: string
- name: input_features
sequence:
sequence: float32
- name: labels
sequence: int64
splits:
- name: train.360
num_bytes: 6796928865.0
num_examples: 5000
download_size: 3988873165
dataset_size: 6796928865.0
---
# Dataset Card for "librispeech-augmentated-train-prepared"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
shenyunhang/VoiceAssistant-400K | shenyunhang | "2025-03-23T08:36:31Z" | 15,853 | 0 | [
"license:apache-2.0",
"region:us"
] | null | "2025-03-19T02:01:25Z" | ---
license: apache-2.0
---
---
Data source:
https://huggingface.co/datasets/gpt-omni/VoiceAssistant-400K
1. Question and answer audios are extracted, which results in `940,108` audio files.
2. Raw conversations are formed as multi-round chat format in `data.jsonl`, which has in total of `251,223` samples.
```
[
...
{
"messages": [
{
"role": "user",
"content": "...<|audio|>"
},
{
"role": "assistant",
"content": "...<|audio|>"
}
{
"role": "user",
"content": "...<|audio|>"
},
{
"role": "assistant",
"content": "...<|audio|>"
},
...
],
"audios": ["path/to/first/audio", "path/to/second/audio", "path/to/third/audio", "path/to/forth/audio", ...],
},
...
]
```
---
|
lmms-lab/MMMU | lmms-lab | "2024-03-08T05:09:42Z" | 15,840 | 4 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-01-15T06:32:16Z" | ---
dataset_info:
features:
- name: id
dtype: string
- name: question
dtype: string
- name: options
dtype: string
- name: explanation
dtype: string
- name: image_1
dtype: image
- name: image_2
dtype: image
- name: image_3
dtype: image
- name: image_4
dtype: image
- name: image_5
dtype: image
- name: image_6
dtype: image
- name: image_7
dtype: image
- name: img_type
dtype: string
- name: answer
dtype: string
- name: topic_difficulty
dtype: string
- name: question_type
dtype: string
- name: subfield
dtype: string
splits:
- name: dev
num_bytes: 57719107.0
num_examples: 150
- name: validation
num_bytes: 347519954.0
num_examples: 900
- name: test
num_bytes: 3271046267.0
num_examples: 10500
download_size: 3377778136
dataset_size: 3676285328.0
configs:
- config_name: default
data_files:
- split: dev
path: data/dev-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
---
This is a merged version of [MMMU/MMMU](https://huggingface.co/datasets/MMMU/MMMU) with all subsets concatenated.
<p align="center" width="100%">
<img src="https://i.postimg.cc/g0QRgMVv/WX20240228-113337-2x.png" width="100%" height="80%">
</p>
# Large-scale Multi-modality Models Evaluation Suite
> Accelerating the development of large-scale multi-modality models (LMMs) with `lmms-eval`
🏠 [Homepage](https://lmms-lab.github.io/) | 📚 [Documentation](docs/README.md) | 🤗 [Huggingface Datasets](https://huggingface.co/lmms-lab)
# This Dataset
This is a formatted version of [MMMU](https://github.com/MMMU-Benchmark/MMMU). It is used in our `lmms-eval` pipeline to allow for one-click evaluations of large multi-modality models.
```
@article{yue2023mmmu,
title={Mmmu: A massive multi-discipline multimodal understanding and reasoning benchmark for expert agi},
author={Yue, Xiang and Ni, Yuansheng and Zhang, Kai and Zheng, Tianyu and Liu, Ruoqi and Zhang, Ge and Stevens, Samuel and Jiang, Dongfu and Ren, Weiming and Sun, Yuxuan and others},
journal={arXiv preprint arXiv:2311.16502},
year={2023}
}
``` |
pixparse/cc12m-wds | pixparse | "2023-12-15T04:50:41Z" | 15,829 | 27 | [
"task_categories:image-to-text",
"license:other",
"size_categories:10M<n<100M",
"format:webdataset",
"modality:image",
"modality:text",
"library:datasets",
"library:webdataset",
"library:mlcroissant",
"arxiv:2102.08981",
"region:us"
] | [
"image-to-text"
] | "2023-12-12T23:59:59Z" | ---
license: other
license_name: conceptual-12m
license_link: LICENSE
task_categories:
- image-to-text
size_categories:
- 10M<n<100M
---
# Dataset Card for Conceptual Captions 12M (CC12M)
## Dataset Description
- **Repository:** [Conceptual 12M repository](https://github.com/google-research-datasets/conceptual-12m)
- **Paper:** [Conceptual 12M: Pushing Web-Scale Image-Text Pre-Training To Recognize Long-Tail Visual Concepts](https://arxiv.org/abs/2102.08981)
- **Point of Contact:** [Conceptual Captions e-mail](mailto:[email protected])
### Dataset Summary
Conceptual 12M (CC12M) is a dataset with 12 million image-text pairs specifically meant to be used for visionand-language pre-training.
Its data collection pipeline is a relaxed version of the one used in Conceptual Captions 3M (CC3M).
### Usage
This instance of Conceptual Captions is in [webdataset](https://github.com/webdataset/webdataset/commits/main) .tar format. It can be used with webdataset library or upcoming releases of Hugging Face `datasets`.
...More Detail TBD
### Data Splits
This dataset was downloaded using img2dataset. Images resized on download if shortest edge > 512 to shortest edge = 512.
#### Train
* `cc12m-train-*.tar`
* Downloaded on 2021/18/22
* 2176 shards, 10968539 samples
## Additional Information
### Dataset Curators
Soravit Changpinyo, Piyush Sharma, Nan Ding and Radu Soricut.
### Licensing Information
The dataset may be freely used for any purpose, although acknowledgement of
Google LLC ("Google") as the data source would be appreciated. The dataset is
provided "AS IS" without any warranty, express or implied. Google disclaims all
liability for any damages, direct or indirect, resulting from the use of the
dataset.
### Citation Information
```bibtex
@inproceedings{changpinyo2021cc12m,
title = {{Conceptual 12M}: Pushing Web-Scale Image-Text Pre-Training To Recognize Long-Tail Visual Concepts},
author = {Changpinyo, Soravit and Sharma, Piyush and Ding, Nan and Soricut, Radu},
booktitle = {CVPR},
year = {2021},
}
``` |
alvarobartt/mmlu-okapi-eval-es | alvarobartt | "2024-02-29T17:57:41Z" | 15,789 | 1 | [
"task_categories:multiple-choice",
"task_categories:question-answering",
"task_ids:multiple-choice-qa",
"task_ids:open-domain-qa",
"language:es",
"license:cc-by-nc-4.0",
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"arxiv:2009.03300",
"arxiv:2307.16039",
"region:us",
"chatgpt-translated"
] | [
"multiple-choice",
"question-answering"
] | "2024-02-29T12:55:40Z" | ---
language:
- es
license: cc-by-nc-4.0
size_categories:
- 10K<n<100K
task_categories:
- multiple-choice
- question-answering
task_ids:
- multiple-choice-qa
- open-domain-qa
tags:
- chatgpt-translated
dataset_info:
- config_name: abstract_algebra
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 1760
num_examples: 5
- name: validation
num_bytes: 4246
num_examples: 11
- name: test
num_bytes: 40783
num_examples: 100
download_size: 31838
dataset_size: 46789
- config_name: anatomy
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 2024
num_examples: 5
- name: validation
num_bytes: 6533
num_examples: 14
- name: test
num_bytes: 68781
num_examples: 134
download_size: 55543
dataset_size: 77338
- config_name: astronomy
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 4476
num_examples: 5
- name: validation
num_bytes: 10713
num_examples: 16
- name: test
num_bytes: 100179
num_examples: 152
download_size: 78498
dataset_size: 115368
- config_name: business_ethics
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 4608
num_examples: 5
- name: validation
num_bytes: 6415
num_examples: 11
- name: test
num_bytes: 69628
num_examples: 98
download_size: 62408
dataset_size: 80651
- config_name: clinical_knowledge
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 2594
num_examples: 5
- name: validation
num_bytes: 14438
num_examples: 29
- name: test
num_bytes: 135086
num_examples: 263
download_size: 103601
dataset_size: 152118
- config_name: college_biology
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 3177
num_examples: 5
- name: validation
num_bytes: 10330
num_examples: 16
- name: test
num_bytes: 103413
num_examples: 143
download_size: 87080
dataset_size: 116920
- config_name: college_chemistry
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 2779
num_examples: 5
- name: validation
num_bytes: 4874
num_examples: 8
- name: test
num_bytes: 52123
num_examples: 100
download_size: 51328
dataset_size: 59776
- config_name: college_computer_science
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 5805
num_examples: 5
- name: validation
num_bytes: 9920
num_examples: 11
- name: test
num_bytes: 89185
num_examples: 98
download_size: 82341
dataset_size: 104910
- config_name: college_mathematics
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 3089
num_examples: 5
- name: validation
num_bytes: 5484
num_examples: 11
- name: test
num_bytes: 50044
num_examples: 97
download_size: 51658
dataset_size: 58617
- config_name: college_medicine
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 3649
num_examples: 5
- name: validation
num_bytes: 16728
num_examples: 22
- name: test
num_bytes: 171553
num_examples: 171
download_size: 115167
dataset_size: 191930
- config_name: college_physics
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 2898
num_examples: 5
- name: validation
num_bytes: 7335
num_examples: 11
- name: test
num_bytes: 62094
num_examples: 100
download_size: 57235
dataset_size: 72327
- config_name: computer_security
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 1733
num_examples: 4
- name: validation
num_bytes: 9678
num_examples: 11
- name: test
num_bytes: 58507
num_examples: 100
download_size: 57512
dataset_size: 69918
- config_name: conceptual_physics
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 1965
num_examples: 5
- name: validation
num_bytes: 9503
num_examples: 26
- name: test
num_bytes: 86744
num_examples: 235
download_size: 69227
dataset_size: 98212
- config_name: econometrics
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 3459
num_examples: 5
- name: validation
num_bytes: 10475
num_examples: 12
- name: test
num_bytes: 94779
num_examples: 110
download_size: 71152
dataset_size: 108713
- config_name: electrical_engineering
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 2046
num_examples: 5
- name: validation
num_bytes: 6173
num_examples: 16
- name: test
num_bytes: 54302
num_examples: 145
download_size: 51393
dataset_size: 62521
- config_name: elementary_mathematics
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 2726
num_examples: 4
- name: validation
num_bytes: 18444
num_examples: 40
- name: test
num_bytes: 144531
num_examples: 369
download_size: 109491
dataset_size: 165701
- config_name: formal_logic
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 3619
num_examples: 5
- name: validation
num_bytes: 12939
num_examples: 14
- name: test
num_bytes: 96569
num_examples: 118
download_size: 63090
dataset_size: 113127
- config_name: global_facts
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 2553
num_examples: 5
- name: validation
num_bytes: 3941
num_examples: 10
- name: test
num_bytes: 39400
num_examples: 100
download_size: 37322
dataset_size: 45894
- config_name: high_school_biology
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 3476
num_examples: 5
- name: validation
num_bytes: 23395
num_examples: 32
- name: test
num_bytes: 232885
num_examples: 309
download_size: 158998
dataset_size: 259756
- config_name: high_school_chemistry
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 2507
num_examples: 5
- name: validation
num_bytes: 14839
num_examples: 22
- name: test
num_bytes: 120079
num_examples: 200
download_size: 90031
dataset_size: 137425
- config_name: high_school_computer_science
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 6121
num_examples: 5
- name: validation
num_bytes: 6988
num_examples: 8
- name: test
num_bytes: 93799
num_examples: 97
download_size: 75958
dataset_size: 106908
- config_name: high_school_european_history
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 14265
num_examples: 3
- name: validation
num_bytes: 57480
num_examples: 17
- name: test
num_bytes: 479646
num_examples: 148
download_size: 359755
dataset_size: 551391
- config_name: high_school_geography
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 3101
num_examples: 5
- name: validation
num_bytes: 9160
num_examples: 22
- name: test
num_bytes: 87342
num_examples: 192
download_size: 75260
dataset_size: 99603
- config_name: high_school_government_and_politics
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 3772
num_examples: 5
- name: validation
num_bytes: 14984
num_examples: 21
- name: test
num_bytes: 141849
num_examples: 193
download_size: 106607
dataset_size: 160605
- config_name: high_school_macroeconomics
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 2933
num_examples: 5
- name: validation
num_bytes: 27945
num_examples: 43
- name: test
num_bytes: 249710
num_examples: 387
download_size: 141531
dataset_size: 280588
- config_name: high_school_mathematics
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 2617
num_examples: 5
- name: validation
num_bytes: 11798
num_examples: 28
- name: test
num_bytes: 112551
num_examples: 266
download_size: 89117
dataset_size: 126966
- config_name: high_school_microeconomics
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 2710
num_examples: 5
- name: validation
num_bytes: 16309
num_examples: 26
- name: test
num_bytes: 160145
num_examples: 234
download_size: 98676
dataset_size: 179164
- config_name: high_school_physics
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 3173
num_examples: 5
- name: validation
num_bytes: 14127
num_examples: 17
- name: test
num_bytes: 123938
num_examples: 149
download_size: 90127
dataset_size: 141238
- config_name: high_school_psychology
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 3883
num_examples: 5
- name: validation
num_bytes: 36566
num_examples: 60
- name: test
num_bytes: 318886
num_examples: 513
download_size: 221819
dataset_size: 359335
- config_name: high_school_statistics
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 5264
num_examples: 5
- name: validation
num_bytes: 21199
num_examples: 23
- name: test
num_bytes: 234094
num_examples: 215
download_size: 150556
dataset_size: 260557
- config_name: high_school_us_history
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 10940
num_examples: 3
- name: validation
num_bytes: 57373
num_examples: 19
- name: test
num_bytes: 415443
num_examples: 149
download_size: 309982
dataset_size: 483756
- config_name: high_school_world_history
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 10120
num_examples: 5
- name: validation
num_bytes: 70014
num_examples: 21
- name: test
num_bytes: 629850
num_examples: 201
download_size: 441428
dataset_size: 709984
- config_name: human_aging
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 2066
num_examples: 5
- name: validation
num_bytes: 10131
num_examples: 23
- name: test
num_bytes: 96475
num_examples: 219
download_size: 81152
dataset_size: 108672
- config_name: human_sexuality
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 1251
num_examples: 3
- name: validation
num_bytes: 5129
num_examples: 12
- name: test
num_bytes: 53324
num_examples: 110
download_size: 53146
dataset_size: 59704
- config_name: international_law
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 5118
num_examples: 5
- name: validation
num_bytes: 13609
num_examples: 13
- name: test
num_bytes: 114851
num_examples: 121
download_size: 83492
dataset_size: 133578
- config_name: jurisprudence
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 2642
num_examples: 5
- name: validation
num_bytes: 7940
num_examples: 11
- name: test
num_bytes: 71653
num_examples: 108
download_size: 66964
dataset_size: 82235
- config_name: logical_fallacies
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 3160
num_examples: 5
- name: validation
num_bytes: 10588
num_examples: 18
- name: test
num_bytes: 103636
num_examples: 161
download_size: 66840
dataset_size: 117384
- config_name: machine_learning
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 5036
num_examples: 5
- name: validation
num_bytes: 6891
num_examples: 11
- name: test
num_bytes: 73135
num_examples: 112
download_size: 60833
dataset_size: 85062
- config_name: management
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 1872
num_examples: 5
- name: validation
num_bytes: 3799
num_examples: 11
- name: test
num_bytes: 42556
num_examples: 103
download_size: 43017
dataset_size: 48227
- config_name: marketing
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 3239
num_examples: 5
- name: validation
num_bytes: 15704
num_examples: 25
- name: test
num_bytes: 132425
num_examples: 231
download_size: 98948
dataset_size: 151368
- config_name: medical_genetics
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 2284
num_examples: 5
- name: validation
num_bytes: 6400
num_examples: 11
- name: test
num_bytes: 44372
num_examples: 100
download_size: 48735
dataset_size: 53056
- config_name: miscellaneous
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 1437
num_examples: 5
- name: validation
num_bytes: 30333
num_examples: 86
- name: test
num_bytes: 304980
num_examples: 760
download_size: 231606
dataset_size: 336750
- config_name: moral_disputes
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 3671
num_examples: 5
- name: validation
num_bytes: 25869
num_examples: 38
- name: test
num_bytes: 214143
num_examples: 327
download_size: 147774
dataset_size: 243683
- config_name: moral_scenarios
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 2657
num_examples: 3
- name: validation
num_bytes: 71335
num_examples: 78
- name: test
num_bytes: 683382
num_examples: 752
download_size: 213484
dataset_size: 757374
- config_name: nutrition
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 4523
num_examples: 5
- name: validation
num_bytes: 17721
num_examples: 32
- name: test
num_bytes: 199634
num_examples: 305
download_size: 138805
dataset_size: 221878
- config_name: philosophy
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 2058
num_examples: 5
- name: validation
num_bytes: 19167
num_examples: 34
- name: test
num_bytes: 161737
num_examples: 302
download_size: 121059
dataset_size: 182962
- config_name: prehistory
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 3992
num_examples: 5
- name: validation
num_bytes: 21214
num_examples: 34
- name: test
num_bytes: 181683
num_examples: 314
download_size: 136059
dataset_size: 206889
- config_name: professional_accounting
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 4568
num_examples: 5
- name: validation
num_bytes: 29165
num_examples: 30
- name: test
num_bytes: 266225
num_examples: 282
download_size: 181436
dataset_size: 299958
- config_name: professional_law
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 9431
num_examples: 4
- name: validation
num_bytes: 363635
num_examples: 145
- name: test
num_bytes: 3285957
num_examples: 1292
download_size: 1993775
dataset_size: 3659023
- config_name: professional_medicine
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 7060
num_examples: 4
- name: validation
num_bytes: 47479
num_examples: 30
- name: test
num_bytes: 446995
num_examples: 265
download_size: 311538
dataset_size: 501534
- config_name: professional_psychology
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 4597
num_examples: 5
- name: validation
num_bytes: 60499
num_examples: 68
- name: test
num_bytes: 471519
num_examples: 604
download_size: 325283
dataset_size: 536615
- config_name: public_relations
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 3165
num_examples: 5
- name: validation
num_bytes: 9669
num_examples: 12
- name: test
num_bytes: 60281
num_examples: 109
download_size: 61213
dataset_size: 73115
- config_name: security_studies
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 11199
num_examples: 5
- name: validation
num_bytes: 47992
num_examples: 27
- name: test
num_bytes: 427743
num_examples: 240
download_size: 282999
dataset_size: 486934
- config_name: sociology
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 3456
num_examples: 5
- name: validation
num_bytes: 14660
num_examples: 21
- name: test
num_bytes: 138231
num_examples: 196
download_size: 111807
dataset_size: 156347
- config_name: us_foreign_policy
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 3440
num_examples: 5
- name: validation
num_bytes: 6883
num_examples: 11
- name: test
num_bytes: 60635
num_examples: 99
download_size: 56816
dataset_size: 70958
- config_name: virology
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 1836
num_examples: 4
- name: validation
num_bytes: 10140
num_examples: 17
- name: test
num_bytes: 82841
num_examples: 165
download_size: 73952
dataset_size: 94817
- config_name: world_religions
features:
- name: en_question
dtype: string
- name: es_question
dtype: string
- name: en_choices
sequence: string
- name: es_choices
sequence: string
- name: en_answer
dtype: int64
- name: es_answer
dtype: int64
splits:
- name: dev
num_bytes: 1357
num_examples: 5
- name: validation
num_bytes: 5669
num_examples: 19
- name: test
num_bytes: 51954
num_examples: 171
download_size: 51989
dataset_size: 58980
configs:
- config_name: abstract_algebra
data_files:
- split: dev
path: abstract_algebra/dev-*
- split: validation
path: abstract_algebra/validation-*
- split: test
path: abstract_algebra/test-*
- config_name: anatomy
data_files:
- split: dev
path: anatomy/dev-*
- split: validation
path: anatomy/validation-*
- split: test
path: anatomy/test-*
- config_name: astronomy
data_files:
- split: dev
path: astronomy/dev-*
- split: validation
path: astronomy/validation-*
- split: test
path: astronomy/test-*
- config_name: business_ethics
data_files:
- split: dev
path: business_ethics/dev-*
- split: validation
path: business_ethics/validation-*
- split: test
path: business_ethics/test-*
- config_name: clinical_knowledge
data_files:
- split: dev
path: clinical_knowledge/dev-*
- split: validation
path: clinical_knowledge/validation-*
- split: test
path: clinical_knowledge/test-*
- config_name: college_biology
data_files:
- split: dev
path: college_biology/dev-*
- split: validation
path: college_biology/validation-*
- split: test
path: college_biology/test-*
- config_name: college_chemistry
data_files:
- split: dev
path: college_chemistry/dev-*
- split: validation
path: college_chemistry/validation-*
- split: test
path: college_chemistry/test-*
- config_name: college_computer_science
data_files:
- split: dev
path: college_computer_science/dev-*
- split: validation
path: college_computer_science/validation-*
- split: test
path: college_computer_science/test-*
- config_name: college_mathematics
data_files:
- split: dev
path: college_mathematics/dev-*
- split: validation
path: college_mathematics/validation-*
- split: test
path: college_mathematics/test-*
- config_name: college_medicine
data_files:
- split: dev
path: college_medicine/dev-*
- split: validation
path: college_medicine/validation-*
- split: test
path: college_medicine/test-*
- config_name: college_physics
data_files:
- split: dev
path: college_physics/dev-*
- split: validation
path: college_physics/validation-*
- split: test
path: college_physics/test-*
- config_name: computer_security
data_files:
- split: dev
path: computer_security/dev-*
- split: validation
path: computer_security/validation-*
- split: test
path: computer_security/test-*
- config_name: conceptual_physics
data_files:
- split: dev
path: conceptual_physics/dev-*
- split: validation
path: conceptual_physics/validation-*
- split: test
path: conceptual_physics/test-*
- config_name: econometrics
data_files:
- split: dev
path: econometrics/dev-*
- split: validation
path: econometrics/validation-*
- split: test
path: econometrics/test-*
- config_name: electrical_engineering
data_files:
- split: dev
path: electrical_engineering/dev-*
- split: validation
path: electrical_engineering/validation-*
- split: test
path: electrical_engineering/test-*
- config_name: elementary_mathematics
data_files:
- split: dev
path: elementary_mathematics/dev-*
- split: validation
path: elementary_mathematics/validation-*
- split: test
path: elementary_mathematics/test-*
- config_name: formal_logic
data_files:
- split: dev
path: formal_logic/dev-*
- split: validation
path: formal_logic/validation-*
- split: test
path: formal_logic/test-*
- config_name: global_facts
data_files:
- split: dev
path: global_facts/dev-*
- split: validation
path: global_facts/validation-*
- split: test
path: global_facts/test-*
- config_name: high_school_biology
data_files:
- split: dev
path: high_school_biology/dev-*
- split: validation
path: high_school_biology/validation-*
- split: test
path: high_school_biology/test-*
- config_name: high_school_chemistry
data_files:
- split: dev
path: high_school_chemistry/dev-*
- split: validation
path: high_school_chemistry/validation-*
- split: test
path: high_school_chemistry/test-*
- config_name: high_school_computer_science
data_files:
- split: dev
path: high_school_computer_science/dev-*
- split: validation
path: high_school_computer_science/validation-*
- split: test
path: high_school_computer_science/test-*
- config_name: high_school_european_history
data_files:
- split: dev
path: high_school_european_history/dev-*
- split: validation
path: high_school_european_history/validation-*
- split: test
path: high_school_european_history/test-*
- config_name: high_school_geography
data_files:
- split: dev
path: high_school_geography/dev-*
- split: validation
path: high_school_geography/validation-*
- split: test
path: high_school_geography/test-*
- config_name: high_school_government_and_politics
data_files:
- split: dev
path: high_school_government_and_politics/dev-*
- split: validation
path: high_school_government_and_politics/validation-*
- split: test
path: high_school_government_and_politics/test-*
- config_name: high_school_macroeconomics
data_files:
- split: dev
path: high_school_macroeconomics/dev-*
- split: validation
path: high_school_macroeconomics/validation-*
- split: test
path: high_school_macroeconomics/test-*
- config_name: high_school_mathematics
data_files:
- split: dev
path: high_school_mathematics/dev-*
- split: validation
path: high_school_mathematics/validation-*
- split: test
path: high_school_mathematics/test-*
- config_name: high_school_microeconomics
data_files:
- split: dev
path: high_school_microeconomics/dev-*
- split: validation
path: high_school_microeconomics/validation-*
- split: test
path: high_school_microeconomics/test-*
- config_name: high_school_physics
data_files:
- split: dev
path: high_school_physics/dev-*
- split: validation
path: high_school_physics/validation-*
- split: test
path: high_school_physics/test-*
- config_name: high_school_psychology
data_files:
- split: dev
path: high_school_psychology/dev-*
- split: validation
path: high_school_psychology/validation-*
- split: test
path: high_school_psychology/test-*
- config_name: high_school_statistics
data_files:
- split: dev
path: high_school_statistics/dev-*
- split: validation
path: high_school_statistics/validation-*
- split: test
path: high_school_statistics/test-*
- config_name: high_school_us_history
data_files:
- split: dev
path: high_school_us_history/dev-*
- split: validation
path: high_school_us_history/validation-*
- split: test
path: high_school_us_history/test-*
- config_name: high_school_world_history
data_files:
- split: dev
path: high_school_world_history/dev-*
- split: validation
path: high_school_world_history/validation-*
- split: test
path: high_school_world_history/test-*
- config_name: human_aging
data_files:
- split: dev
path: human_aging/dev-*
- split: validation
path: human_aging/validation-*
- split: test
path: human_aging/test-*
- config_name: human_sexuality
data_files:
- split: dev
path: human_sexuality/dev-*
- split: validation
path: human_sexuality/validation-*
- split: test
path: human_sexuality/test-*
- config_name: international_law
data_files:
- split: dev
path: international_law/dev-*
- split: validation
path: international_law/validation-*
- split: test
path: international_law/test-*
- config_name: jurisprudence
data_files:
- split: dev
path: jurisprudence/dev-*
- split: validation
path: jurisprudence/validation-*
- split: test
path: jurisprudence/test-*
- config_name: logical_fallacies
data_files:
- split: dev
path: logical_fallacies/dev-*
- split: validation
path: logical_fallacies/validation-*
- split: test
path: logical_fallacies/test-*
- config_name: machine_learning
data_files:
- split: dev
path: machine_learning/dev-*
- split: validation
path: machine_learning/validation-*
- split: test
path: machine_learning/test-*
- config_name: management
data_files:
- split: dev
path: management/dev-*
- split: validation
path: management/validation-*
- split: test
path: management/test-*
- config_name: marketing
data_files:
- split: dev
path: marketing/dev-*
- split: validation
path: marketing/validation-*
- split: test
path: marketing/test-*
- config_name: medical_genetics
data_files:
- split: dev
path: medical_genetics/dev-*
- split: validation
path: medical_genetics/validation-*
- split: test
path: medical_genetics/test-*
- config_name: miscellaneous
data_files:
- split: dev
path: miscellaneous/dev-*
- split: validation
path: miscellaneous/validation-*
- split: test
path: miscellaneous/test-*
- config_name: moral_disputes
data_files:
- split: dev
path: moral_disputes/dev-*
- split: validation
path: moral_disputes/validation-*
- split: test
path: moral_disputes/test-*
- config_name: moral_scenarios
data_files:
- split: dev
path: moral_scenarios/dev-*
- split: validation
path: moral_scenarios/validation-*
- split: test
path: moral_scenarios/test-*
- config_name: nutrition
data_files:
- split: dev
path: nutrition/dev-*
- split: validation
path: nutrition/validation-*
- split: test
path: nutrition/test-*
- config_name: philosophy
data_files:
- split: dev
path: philosophy/dev-*
- split: validation
path: philosophy/validation-*
- split: test
path: philosophy/test-*
- config_name: prehistory
data_files:
- split: dev
path: prehistory/dev-*
- split: validation
path: prehistory/validation-*
- split: test
path: prehistory/test-*
- config_name: professional_accounting
data_files:
- split: dev
path: professional_accounting/dev-*
- split: validation
path: professional_accounting/validation-*
- split: test
path: professional_accounting/test-*
- config_name: professional_law
data_files:
- split: dev
path: professional_law/dev-*
- split: validation
path: professional_law/validation-*
- split: test
path: professional_law/test-*
- config_name: professional_medicine
data_files:
- split: dev
path: professional_medicine/dev-*
- split: validation
path: professional_medicine/validation-*
- split: test
path: professional_medicine/test-*
- config_name: professional_psychology
data_files:
- split: dev
path: professional_psychology/dev-*
- split: validation
path: professional_psychology/validation-*
- split: test
path: professional_psychology/test-*
- config_name: public_relations
data_files:
- split: dev
path: public_relations/dev-*
- split: validation
path: public_relations/validation-*
- split: test
path: public_relations/test-*
- config_name: security_studies
data_files:
- split: dev
path: security_studies/dev-*
- split: validation
path: security_studies/validation-*
- split: test
path: security_studies/test-*
- config_name: sociology
data_files:
- split: dev
path: sociology/dev-*
- split: validation
path: sociology/validation-*
- split: test
path: sociology/test-*
- config_name: us_foreign_policy
data_files:
- split: dev
path: us_foreign_policy/dev-*
- split: validation
path: us_foreign_policy/validation-*
- split: test
path: us_foreign_policy/test-*
- config_name: virology
data_files:
- split: dev
path: virology/dev-*
- split: validation
path: virology/validation-*
- split: test
path: virology/test-*
- config_name: world_religions
data_files:
- split: dev
path: world_religions/dev-*
- split: validation
path: world_religions/validation-*
- split: test
path: world_religions/test-*
---
# MMLU translated to Spanish
This dataset was generated by the Natural Language Processing Group of the University of Oregon, where they used the
original MMLU dataset in English and translated it into different languages using ChatGPT.
This dataset only contains the Spanish translation, but the following languages are also covered within the original
subsets posted by the University of Oregon at http://nlp.uoregon.edu/download/okapi-eval/datasets/.
## Disclaimer
All the credits for this dataset go to the original authors of MMLU (licensed as MIT), and to the authors of
this translation via ChatGPT (licensed as CC BY NC 4.0, allowing only non-commercial use).
## References
* [Measuring Massive Multitask Language Understanding](https://arxiv.org/abs/2009.03300)
* [Okapi: Instruction-tuned Large Language Models in Multiple Languages with Reinforcement Learning from Human Feedback](https://arxiv.org/abs/2307.16039) |
openbmb/DCAD-2000 | openbmb | "2025-04-04T09:51:21Z" | 15,764 | 4 | [
"multilinguality:multilingual",
"language:eng",
"language:rus",
"language:cmn",
"language:deu",
"language:spa",
"language:fra",
"language:jpn",
"language:ita",
"language:por",
"language:pol",
"language:nld",
"language:ind",
"language:tur",
"language:vie",
"language:fas",
"language:kor",
"language:swe",
"language:hun",
"language:ell",
"language:ukr",
"language:tha",
"language:aze",
"language:arb",
"language:slv",
"language:cat",
"language:fin",
"language:hbs",
"language:ces",
"language:fil",
"language:guj",
"language:nob",
"language:mal",
"language:bul",
"language:kan",
"language:hin",
"language:tam",
"language:ara",
"language:kaz",
"language:sqi",
"language:pan",
"language:est",
"language:srp",
"language:heb",
"language:isl",
"language:mlt",
"language:mkd",
"language:bos",
"language:lit",
"language:kat",
"language:hrv",
"language:ben",
"language:glg",
"language:ceb",
"language:ron",
"language:msa",
"language:hye",
"language:tgk",
"language:mar",
"language:bel",
"language:nep",
"language:urd",
"language:slk",
"language:mon",
"language:swa",
"language:dan",
"language:eus",
"language:azj",
"language:sin",
"language:lat",
"language:als",
"language:tel",
"language:afr",
"language:ekk",
"language:ltz",
"language:zsm",
"language:som",
"language:cym",
"language:und",
"language:kir",
"language:nor",
"language:uzb",
"language:mya",
"language:epo",
"language:hau",
"language:ary",
"language:gle",
"language:lvs",
"language:nno",
"language:ory",
"language:amh",
"language:tat",
"language:khm",
"language:arz",
"language:pus",
"language:lav",
"language:war",
"language:mlg",
"language:uzn",
"language:hat",
"language:zul",
"language:kur",
"language:tgl",
"language:div",
"language:fry",
"language:sna",
"language:fao",
"language:sun",
"language:snd",
"language:cos",
"language:bak",
"language:ckb",
"language:asm",
"language:ast",
"language:ibo",
"language:mri",
"language:ars",
"language:anp",
"language:khk",
"language:lao",
"language:ori",
"language:yid",
"language:hmn",
"language:jav",
"language:nya",
"language:tir",
"language:uig",
"language:wln",
"language:yor",
"language:swh",
"language:gla",
"language:xho",
"language:bre",
"language:sot",
"language:che",
"language:nan",
"language:bew",
"language:glk",
"language:smo",
"language:orm",
"language:zho",
"language:haw",
"language:pnb",
"language:oci",
"language:chv",
"language:kin",
"language:pcm",
"language:kaa",
"language:lug",
"language:pbt",
"language:gsw",
"language:min",
"language:tuk",
"language:lim",
"language:kmr",
"language:roh",
"language:vec",
"language:san",
"language:gmh",
"language:nds",
"language:sco",
"language:azb",
"language:tsn",
"language:sah",
"language:pri",
"language:pap",
"language:plt",
"language:lmo",
"language:bod",
"language:yue",
"language:bar",
"language:hif",
"language:arg",
"language:pms",
"language:nrm",
"language:vol",
"language:aeb",
"language:ido",
"language:pam",
"language:kal",
"language:lus",
"language:new",
"language:sme",
"language:nso",
"language:lld",
"language:ina",
"language:scn",
"language:hyw",
"language:abk",
"language:bpy",
"language:lin",
"language:oss",
"language:que",
"language:szl",
"language:sdh",
"language:mww",
"language:mhr",
"language:rmy",
"language:diq",
"language:srd",
"language:ydd",
"language:run",
"language:hsb",
"language:wol",
"language:kiu",
"language:ilo",
"language:cak",
"language:acm",
"language:grc",
"language:xmf",
"language:vls",
"language:tso",
"language:crh",
"language:nav",
"language:mwl",
"language:grn",
"language:mui",
"language:mvf",
"language:udm",
"language:mzn",
"language:bcl",
"language:ban",
"language:afb",
"language:cnh",
"language:cbk",
"language:tyv",
"language:ful",
"language:zea",
"language:frp",
"language:frr",
"language:pfl",
"language:lij",
"language:apc",
"language:mai",
"language:pon",
"language:eml",
"language:uzs",
"language:nde",
"language:hil",
"language:mam",
"language:gom",
"language:dag",
"language:nap",
"language:bjn",
"language:hbo",
"language:tet",
"language:inh",
"language:ven",
"language:gaz",
"language:tpi",
"language:ajp",
"language:sat",
"language:wuu",
"language:bam",
"language:vep",
"language:glv",
"language:tzo",
"language:bik",
"language:acr",
"language:twi",
"language:myv",
"language:ewe",
"language:poh",
"language:ile",
"language:rue",
"language:ace",
"language:kha",
"language:ksh",
"language:hui",
"language:gor",
"language:krc",
"language:lfn",
"language:tls",
"language:fur",
"language:gag",
"language:chk",
"language:hmo",
"language:kek",
"language:fon",
"language:ssw",
"language:quh",
"language:meu",
"language:csb",
"language:ksd",
"language:ext",
"language:rop",
"language:aln",
"language:kab",
"language:shn",
"language:tbz",
"language:knv",
"language:bqc",
"language:npi",
"language:rug",
"language:kom",
"language:kpg",
"language:kbd",
"language:dzo",
"language:wal",
"language:zlm",
"language:mzh",
"language:hac",
"language:tum",
"language:sgs",
"language:fij",
"language:tuc",
"language:jbo",
"language:iba",
"language:pcd",
"language:kjh",
"language:ixl",
"language:ndo",
"language:suz",
"language:mfe",
"language:bxr",
"language:ava",
"language:bho",
"language:brh",
"language:ctd",
"language:aym",
"language:ady",
"language:nbl",
"language:hus",
"language:gug",
"language:bat",
"language:hne",
"language:srn",
"language:ctu",
"language:ton",
"language:bug",
"language:aoj",
"language:jam",
"language:lez",
"language:xav",
"language:luo",
"language:ltg",
"language:bih",
"language:mrj",
"language:avk",
"language:kos",
"language:kbp",
"language:alt",
"language:cfm",
"language:goh",
"language:lad",
"language:cor",
"language:quc",
"language:kpv",
"language:aka",
"language:hak",
"language:rmn",
"language:pls",
"language:sag",
"language:rcf",
"language:gym",
"language:kik",
"language:pag",
"language:arn",
"language:ngu",
"language:dsb",
"language:tca",
"language:mni",
"language:naq",
"language:pdc",
"language:mps",
"language:olo",
"language:fro",
"language:fit",
"language:cdo",
"language:kac",
"language:mdf",
"language:mad",
"language:cab",
"language:srm",
"language:toj",
"language:zai",
"language:guc",
"language:smn",
"language:ote",
"language:nch",
"language:prs",
"language:bis",
"language:cuk",
"language:bzj",
"language:map",
"language:djk",
"language:ncj",
"language:mco",
"language:shp",
"language:ang",
"language:seh",
"language:ikk",
"language:qvi",
"language:yom",
"language:top",
"language:awa",
"language:ike",
"language:mau",
"language:kum",
"language:mos",
"language:tly",
"language:ayr",
"language:tdt",
"language:yap",
"language:evn",
"language:tah",
"language:xal",
"language:szy",
"language:quw",
"language:stq",
"language:tlh",
"language:otq",
"language:nov",
"language:skr",
"language:yua",
"language:raw",
"language:fiu",
"language:enm",
"language:bbc",
"language:non",
"language:ber",
"language:kea",
"language:bua",
"language:kua",
"language:zza",
"language:csy",
"language:mnw",
"language:dtp",
"language:nah",
"language:yao",
"language:nog",
"language:abt",
"language:meo",
"language:ame",
"language:mbf",
"language:chm",
"language:kas",
"language:crs",
"language:tcy",
"language:lbe",
"language:cni",
"language:rom",
"language:zom",
"language:blk",
"language:efi",
"language:ada",
"language:koi",
"language:bbj",
"language:trv",
"language:ach",
"language:mgh",
"language:gcr",
"language:krl",
"language:iso",
"language:lki",
"language:atj",
"language:cmo",
"language:chu",
"language:gur",
"language:iku",
"language:sma",
"language:lrc",
"language:gos",
"language:dty",
"language:nzi",
"language:btx",
"language:brx",
"language:bts",
"language:nyu",
"language:ckt",
"language:guw",
"language:pck",
"language:quy",
"language:gcf",
"language:ksw",
"language:nia",
"language:bal",
"language:nhe",
"language:hwc",
"language:mup",
"language:dyu",
"language:ami",
"language:tay",
"language:gaa",
"language:loz",
"language:abq",
"language:pnt",
"language:ium",
"language:abs",
"language:cmr",
"language:shi",
"language:tzm",
"language:gil",
"language:ewo",
"language:kon",
"language:sms",
"language:ape",
"language:tzh",
"language:bci",
"language:acd",
"language:tab",
"language:fuv",
"language:swc",
"language:tcz",
"language:sgc",
"language:aaz",
"language:tvl",
"language:zha",
"language:pib",
"language:pli",
"language:bru",
"language:quz",
"language:cha",
"language:aeu",
"language:nqo",
"language:mer",
"language:acf",
"language:rmc",
"language:msi",
"language:smj",
"language:kng",
"language:dhv",
"language:doi",
"language:zap",
"language:hla",
"language:cac",
"language:pdt",
"language:mbt",
"language:taq",
"language:bgp",
"language:tyz",
"language:aau",
"language:mah",
"language:nak",
"language:kmb",
"language:mjw",
"language:fuf",
"language:bba",
"language:amu",
"language:alz",
"language:cop",
"language:rar",
"language:emp",
"language:arc",
"language:nyn",
"language:rut",
"language:lub",
"language:tzj",
"language:fat",
"language:lzh",
"language:aak",
"language:nnb",
"language:lhu",
"language:bjv",
"language:bum",
"language:maq",
"language:roa",
"language:bvr",
"language:kcg",
"language:tok",
"language:yrk",
"language:khs",
"language:rup",
"language:qub",
"language:jac",
"language:hmr",
"language:sid",
"language:chr",
"language:amp",
"language:qug",
"language:mas",
"language:fkv",
"language:bem",
"language:cav",
"language:her",
"language:kwn",
"language:ify",
"language:gpe",
"language:zpa",
"language:agu",
"language:ppk",
"language:tiv",
"language:ahk",
"language:pis",
"language:hns",
"language:sus",
"language:bon",
"language:sja",
"language:kri",
"language:dik",
"language:wrk",
"language:got",
"language:dar",
"language:gui",
"language:mak",
"language:jiv",
"language:kjb",
"language:maz",
"language:bsn",
"language:dov",
"language:cok",
"language:rwo",
"language:mag",
"language:hnj",
"language:krj",
"language:teo",
"language:enq",
"language:aom",
"language:ffm",
"language:din",
"language:ubu",
"language:pwn",
"language:gum",
"language:sny",
"language:twu",
"language:med",
"language:acn",
"language:rme",
"language:cbs",
"language:abx",
"language:ndc",
"language:ibb",
"language:pih",
"language:zty",
"language:rad",
"language:aby",
"language:zne",
"language:cui",
"language:lua",
"language:yby",
"language:bno",
"language:niu",
"language:agd",
"language:nij",
"language:agg",
"language:yre",
"language:fip",
"language:qve",
"language:tsg",
"language:skg",
"language:adj",
"language:mfq",
"language:agx",
"language:kmg",
"language:umb",
"language:cnk",
"language:nhw",
"language:maa",
"language:kwy",
"language:jra",
"language:syr",
"language:inb",
"language:pau",
"language:xsm",
"language:vap",
"language:alp",
"language:des",
"language:syl",
"language:mkn",
"language:gan",
"language:qwh",
"language:jvn",
"language:bib",
"language:mpx",
"language:bin",
"language:klv",
"language:xon",
"language:jbu",
"language:bdh",
"language:knj",
"language:ptu",
"language:mmn",
"language:mwq",
"language:sop",
"language:cgc",
"language:dop",
"language:mgr",
"language:rmo",
"language:tbc",
"language:kyq",
"language:zpu",
"language:kkc",
"language:acu",
"language:ata",
"language:mif",
"language:tsz",
"language:mbi",
"language:imo",
"language:fuh",
"language:zyp",
"language:apr",
"language:att",
"language:bus",
"language:kqp",
"language:koo",
"language:bas",
"language:urh",
"language:bbr",
"language:msy",
"language:mdy",
"language:nas",
"language:lee",
"language:mcu",
"language:dig",
"language:izr",
"language:nfr",
"language:hto",
"language:mzw",
"language:qxh",
"language:chz",
"language:guh",
"language:ded",
"language:bvz",
"language:kkj",
"language:gux",
"language:ikt",
"language:bnp",
"language:bgt",
"language:quf",
"language:dwr",
"language:nod",
"language:yal",
"language:bmr",
"language:ljp",
"language:xla",
"language:zyb",
"language:gof",
"language:niv",
"language:nrf",
"language:npy",
"language:ura",
"language:dob",
"language:gfk",
"language:gdr",
"language:sdc",
"language:pjt",
"language:spp",
"language:irk",
"language:cpa",
"language:knc",
"language:gvl",
"language:agr",
"language:njo",
"language:xrb",
"language:kao",
"language:zpz",
"language:bgr",
"language:rnd",
"language:ntu",
"language:aai",
"language:gai",
"language:dgi",
"language:kez",
"language:dad",
"language:kmh",
"language:aii",
"language:ifk",
"language:aji",
"language:noa",
"language:bhl",
"language:ztq",
"language:gde",
"language:dnj",
"language:men",
"language:miq",
"language:isd",
"language:lgg",
"language:ncx",
"language:gub",
"language:itv",
"language:tac",
"language:cce",
"language:kwi",
"language:nhu",
"language:sml",
"language:pah",
"language:sas",
"language:cwt",
"language:spy",
"language:enb",
"language:fai",
"language:cas",
"language:sue",
"language:yss",
"language:ozm",
"language:bku",
"language:biv",
"language:akp",
"language:dts",
"language:kdr",
"language:fal",
"language:mmo",
"language:byr",
"language:cbv",
"language:cbu",
"language:eza",
"language:kgp",
"language:sda",
"language:gbo",
"language:mfi",
"language:bdd",
"language:boj",
"language:blh",
"language:ibg",
"language:agn",
"language:vmy",
"language:sua",
"language:gul",
"language:rap",
"language:bmh",
"language:sxn",
"language:dyi",
"language:tdx",
"language:car",
"language:heh",
"language:dgc",
"language:sgb",
"language:bpr",
"language:gbi",
"language:avu",
"language:udu",
"language:yli",
"language:cjs",
"language:gnn",
"language:nhi",
"language:urk",
"language:kpy",
"language:qup",
"language:qvh",
"language:pir",
"language:tsc",
"language:thk",
"language:grt",
"language:kqn",
"language:hay",
"language:mqb",
"language:tbg",
"language:bss",
"language:trn",
"language:jae",
"language:myy",
"language:ipk",
"language:bao",
"language:qvz",
"language:dua",
"language:mwv",
"language:nct",
"language:kyc",
"language:tuo",
"language:tee",
"language:mop",
"language:mxt",
"language:qvm",
"language:gkn",
"language:lam",
"language:ntr",
"language:sbe",
"language:dyo",
"language:lex",
"language:smk",
"language:coe",
"language:kpe",
"language:xbi",
"language:atb",
"language:tcf",
"language:bfo",
"language:cbc",
"language:swg",
"language:cnt",
"language:izz",
"language:knf",
"language:mpp",
"language:kki",
"language:nho",
"language:kbr",
"language:bmu",
"language:hig",
"language:qxr",
"language:orv",
"language:pma",
"language:zpl",
"language:kpr",
"language:sig",
"language:mxp",
"language:lef",
"language:mlp",
"language:soq",
"language:cre",
"language:aey",
"language:qvs",
"language:sur",
"language:suk",
"language:cax",
"language:alq",
"language:chw",
"language:dww",
"language:gog",
"language:viv",
"language:qvw",
"language:ted",
"language:yuj",
"language:ziw",
"language:kto",
"language:mcq",
"language:sim",
"language:lue",
"language:cya",
"language:knk",
"language:tpt",
"language:taj",
"language:blz",
"language:mil",
"language:kde",
"language:kpz",
"language:hag",
"language:vun",
"language:kus",
"language:lid",
"language:tos",
"language:bfd",
"language:chd",
"language:cle",
"language:yon",
"language:bgs",
"language:lsm",
"language:msb",
"language:ota",
"language:mxq",
"language:myw",
"language:aso",
"language:esu",
"language:kdi",
"language:bhp",
"language:mfz",
"language:sgw",
"language:xsi",
"language:gun",
"language:kxc",
"language:mux",
"language:otw",
"language:cjp",
"language:nyo",
"language:zia",
"language:cco",
"language:ojb",
"language:ktu",
"language:cpy",
"language:txu",
"language:bjr",
"language:bud",
"language:ken",
"language:bzh",
"language:ipi",
"language:crx",
"language:cko",
"language:gnd",
"language:kix",
"language:mmx",
"language:esk",
"language:kud",
"language:toh",
"language:byx",
"language:mtp",
"language:kyz",
"language:lmp",
"language:tiy",
"language:ccp",
"language:sbl",
"language:tcs",
"language:usp",
"language:buk",
"language:hvn",
"language:amm",
"language:kzj",
"language:gng",
"language:ter",
"language:kij",
"language:ngl",
"language:pab",
"language:pad",
"language:kca",
"language:meq",
"language:niq",
"language:dah",
"language:lia",
"language:kss",
"language:bch",
"language:khz",
"language:nop",
"language:yle",
"language:ain",
"language:pem",
"language:yml",
"language:gaw",
"language:kze",
"language:tby",
"language:cso",
"language:ifb",
"language:rki",
"language:aly",
"language:kmu",
"language:mhl",
"language:kmo",
"language:mva",
"language:opm",
"language:rtm",
"language:xtn",
"language:dgr",
"language:ksr",
"language:snp",
"language:tvk",
"language:zpv",
"language:hrx",
"language:kwj",
"language:muh",
"language:kak",
"language:kpw",
"language:akb",
"language:ybb",
"language:kjs",
"language:wuv",
"language:laj",
"language:ogo",
"language:nim",
"language:czt",
"language:aoz",
"language:rro",
"language:yuw",
"language:heg",
"language:mrw",
"language:zac",
"language:kpf",
"language:leu",
"language:nwi",
"language:awx",
"language:gso",
"language:keo",
"language:tte",
"language:ceg",
"language:nhy",
"language:bhw",
"language:gnw",
"language:nin",
"language:xmv",
"language:crm",
"language:mxv",
"language:ubr",
"language:gld",
"language:bzi",
"language:ann",
"language:lac",
"language:mej",
"language:zpc",
"language:dje",
"language:caq",
"language:guk",
"language:wos",
"language:msm",
"language:zaw",
"language:cap",
"language:mox",
"language:zab",
"language:mca",
"language:wrs",
"language:hae",
"language:nss",
"language:rav",
"language:uvh",
"language:dga",
"language:ppo",
"language:arq",
"language:bim",
"language:lwg",
"language:kms",
"language:kne",
"language:tkl",
"language:caf",
"language:zat",
"language:aba",
"language:chf",
"language:lew",
"language:npl",
"language:uvl",
"language:rmq",
"language:mek",
"language:snc",
"language:nsn",
"language:amn",
"language:abz",
"language:cek",
"language:ong",
"language:txq",
"language:bjp",
"language:gvf",
"language:crk",
"language:snf",
"language:nhg",
"language:kqw",
"language:mic",
"language:mie",
"language:nus",
"language:plu",
"language:idu",
"language:sll",
"language:thl",
"language:bef",
"language:kwd",
"language:mqy",
"language:caa",
"language:aia",
"language:wer",
"language:agm",
"language:mbl",
"language:mbs",
"language:adh",
"language:hub",
"language:mav",
"language:nab",
"language:bqp",
"language:guo",
"language:auy",
"language:big",
"language:nii",
"language:row",
"language:kim",
"language:mti",
"language:otm",
"language:prg",
"language:icr",
"language:mcp",
"language:tbo",
"language:chy",
"language:oji",
"language:kpx",
"language:lbb",
"language:pao",
"language:xuo",
"language:zgh",
"language:wls",
"language:aca",
"language:apy",
"language:wnc",
"language:ssg",
"language:eve",
"language:mee",
"language:yut",
"language:kxm",
"language:mur",
"language:mcd",
"language:cto",
"language:mqj",
"language:usa",
"language:emi",
"language:liv",
"language:guz",
"language:tgp",
"language:ncl",
"language:azg",
"language:kck",
"language:apb",
"language:kbm",
"language:sgz",
"language:wsk",
"language:gbm",
"language:csw",
"language:ctp",
"language:eri",
"language:vid",
"language:bsq",
"language:dak",
"language:tif",
"language:xsr",
"language:mwp",
"language:roo",
"language:wbp",
"language:gdn",
"language:tgo",
"language:zpm",
"language:mih",
"language:qvc",
"language:tod",
"language:abn",
"language:cao",
"language:kbc",
"language:kog",
"language:moh",
"language:leh",
"language:nsu",
"language:okv",
"language:tih",
"language:zos",
"language:llg",
"language:gmv",
"language:kyf",
"language:mhx",
"language:mvp",
"language:naf",
"language:ino",
"language:dng",
"language:mnk",
"language:oke",
"language:omw",
"language:tnk",
"language:xtd",
"language:agt",
"language:cbr",
"language:eko",
"language:nhk",
"language:hot",
"language:iou",
"language:mpm",
"language:myx",
"language:ndh",
"language:pbb",
"language:rng",
"language:cri",
"language:bvd",
"language:kmz",
"language:cta",
"language:blw",
"language:clu",
"language:cof",
"language:kje",
"language:nmf",
"language:kbh",
"language:qxn",
"language:ria",
"language:wed",
"language:zar",
"language:huv",
"language:lsi",
"language:msk",
"language:nbq",
"language:ati",
"language:lww",
"language:yva",
"language:amr",
"language:lmk",
"language:tim",
"language:bbb",
"language:bzd",
"language:djr",
"language:kwf",
"language:nuy",
"language:poi",
"language:ptp",
"language:atd",
"language:bps",
"language:lcm",
"language:mbh",
"language:nuj",
"language:ayp",
"language:cwe",
"language:geb",
"language:aoi",
"language:mto",
"language:cbi",
"language:gvn",
"language:kqf",
"language:rkb",
"language:xed",
"language:zas",
"language:apz",
"language:cpb",
"language:ese",
"language:gdg",
"language:iws",
"language:apu",
"language:cag",
"language:kdc",
"language:nou",
"language:too",
"language:xmm",
"language:beq",
"language:hnn",
"language:myk",
"language:sby",
"language:arl",
"language:ncu",
"language:rml",
"language:gam",
"language:kvn",
"language:mbc",
"language:mfy",
"language:urt",
"language:luc",
"language:mio",
"language:snn",
"language:tar",
"language:tku",
"language:upv",
"language:kjg",
"language:btd",
"language:cut",
"language:mwn",
"language:tue",
"language:adz",
"language:kyg",
"language:rel",
"language:tew",
"language:ttc",
"language:nkf",
"language:yrb",
"language:fub",
"language:nsm",
"language:nyf",
"language:bkd",
"language:cpc",
"language:lif",
"language:mwc",
"language:apw",
"language:azz",
"language:zpo",
"language:faa",
"language:ood",
"language:toc",
"language:wmt",
"language:ajg",
"language:nko",
"language:ssd",
"language:sxb",
"language:zxx",
"language:not",
"language:amk",
"language:hch",
"language:kqc",
"language:aui",
"language:llb",
"language:avt",
"language:kaq",
"language:tna",
"language:zpt",
"language:bwd",
"language:stn",
"language:kgk",
"language:mck",
"language:bhg",
"language:ifu",
"language:kkl",
"language:zao",
"language:huu",
"language:sil",
"language:trq",
"language:tuf",
"language:wnu",
"language:amf",
"language:fan",
"language:zdj",
"language:bco",
"language:kdl",
"language:lln",
"language:mit",
"language:yaq",
"language:ake",
"language:ige",
"language:mna",
"language:nhx",
"language:njn",
"language:wba",
"language:zaa",
"language:syb",
"language:qxo",
"language:rgu",
"language:wiu",
"language:bla",
"language:etr",
"language:miy",
"language:mjc",
"language:nif",
"language:pov",
"language:qxl",
"language:sey",
"language:bxh",
"language:ddg",
"language:mks",
"language:nki",
"language:pot",
"language:prf",
"language:hra",
"language:law",
"language:plw",
"language:zpi",
"language:atg",
"language:daf",
"language:drg",
"language:dru",
"language:ngp",
"language:boa",
"language:cbt",
"language:dhm",
"language:ikw",
"language:kqe",
"language:lbj",
"language:loq",
"language:xsb",
"language:yad",
"language:kyu",
"language:mcb",
"language:tks",
"language:cnl",
"language:far",
"language:miz",
"language:maj",
"language:mgm",
"language:mib",
"language:mza",
"language:nhr",
"language:smt",
"language:ish",
"language:mlu",
"language:yka",
"language:anv",
"language:ign",
"language:mir",
"language:tav",
"language:gyr",
"language:klt",
"language:apn",
"language:wbm",
"language:cjk",
"language:dln",
"language:mcf",
"language:twx",
"language:ldi",
"language:poy",
"language:yaa",
"language:zae",
"language:bdq",
"language:con",
"language:dnw",
"language:gah",
"language:lcp",
"language:mbj",
"language:xog",
"language:yan",
"language:sdq",
"language:sgh",
"language:gwi",
"language:lbk",
"language:ses",
"language:ycn",
"language:kau",
"language:hix",
"language:mig",
"language:mlh",
"language:aer",
"language:amx",
"language:auc",
"language:cly",
"language:dsh",
"language:gnb",
"language:neb",
"language:ttt",
"language:jic",
"language:xtm",
"language:tll",
"language:apt",
"language:kgr",
"language:pio",
"language:pmf",
"language:akh",
"language:box",
"language:cub",
"language:cux",
"language:mrq",
"language:bsp",
"language:for",
"language:kny",
"language:uri",
"language:zsr",
"language:pce",
"language:cme",
"language:cot",
"language:kxw",
"language:omb",
"language:srq",
"language:vag",
"language:cpu",
"language:kmy",
"language:ksc",
"language:lai",
"language:nnp",
"language:ruf",
"language:spm",
"language:tro",
"language:ttj",
"language:waj",
"language:raj",
"language:ian",
"language:ivb",
"language:kmm",
"language:kzf",
"language:maw",
"language:tuv",
"language:whk",
"language:aph",
"language:anm",
"language:mny",
"language:trp",
"language:are",
"language:esi",
"language:mhi",
"language:obo",
"language:bwu",
"language:lwo",
"language:mrg",
"language:old",
"language:zav",
"language:snk",
"language:daa",
"language:fuq",
"language:lud",
"language:nlg",
"language:sck",
"language:tbl",
"language:khb",
"language:tpu",
"language:fue",
"language:tmd",
"language:whg",
"language:blt",
"language:gqr",
"language:kub",
"language:ots",
"language:tpp",
"language:var",
"language:hbb",
"language:krr",
"language:alj",
"language:ebk",
"language:goa",
"language:hav",
"language:kmk",
"language:kqo",
"language:muy",
"language:njb",
"language:snw",
"language:wib",
"language:tkr",
"language:ksp",
"language:lip",
"language:mog",
"language:nla",
"language:pss",
"language:zad",
"language:awi",
"language:frd",
"language:kei",
"language:kmd",
"language:mim",
"language:adi",
"language:any",
"language:arp",
"language:biu",
"language:bvc",
"language:ghs",
"language:ttq",
"language:vmk",
"language:zpj",
"language:crl",
"language:dtb",
"language:dug",
"language:kam",
"language:nmo",
"language:qva",
"language:rhg",
"language:taw",
"language:tui",
"language:uth",
"language:wap",
"language:zpg",
"language:brb",
"language:bqj",
"language:cjv",
"language:cnw",
"language:kgf",
"language:mfh",
"language:mns",
"language:nnw",
"language:ntp",
"language:oto",
"language:dks",
"language:mpt",
"language:nmw",
"language:sba",
"language:sfw",
"language:shu",
"language:ury",
"language:yrl",
"language:xkg",
"language:iqw",
"language:kpj",
"language:nfa",
"language:nph",
"language:nre",
"language:wsg",
"language:xsu",
"language:nut",
"language:bmk",
"language:cdf",
"language:enl",
"language:hop",
"language:ife",
"language:lnd",
"language:mrv",
"language:nzm",
"language:tem",
"language:gwc",
"language:udi",
"language:ajz",
"language:kru",
"language:qvo",
"language:sld",
"language:yup",
"language:kwu",
"language:tdg",
"language:ade",
"language:bcw",
"language:bgz",
"language:cul",
"language:fmu",
"language:gvc",
"language:ktj",
"language:lis",
"language:mev",
"language:msc",
"language:mta",
"language:nbc",
"language:ncq",
"language:way",
"language:wlv",
"language:wlx",
"language:jmx",
"language:wni",
"language:aim",
"language:byv",
"language:cgg",
"language:fud",
"language:jmc",
"language:ker",
"language:kia",
"language:lom",
"language:tnc",
"language:bwx",
"language:gjn",
"language:guq",
"language:lap",
"language:mip",
"language:mkl",
"language:mtg",
"language:mzk",
"language:njm",
"language:nse",
"language:pne",
"language:pww",
"language:zam",
"language:zca",
"language:the",
"language:dis",
"language:lgl",
"language:mbd",
"language:mda",
"language:mhy",
"language:nst",
"language:pkb",
"language:pmq",
"language:poe",
"language:swk",
"language:trs",
"language:vot",
"language:yam",
"language:zpq",
"language:atq",
"language:bav",
"language:cho",
"language:chq",
"language:cjo",
"language:due",
"language:duo",
"language:etu",
"language:ifa",
"language:kby",
"language:khq",
"language:kno",
"language:lem",
"language:mcn",
"language:mkz",
"language:mnb",
"language:myb",
"language:niy",
"language:pxm",
"language:rub",
"language:sju",
"language:tmc",
"language:tpm",
"language:wob",
"language:nxa",
"language:sdk",
"language:bhz",
"language:bth",
"language:crj",
"language:eka",
"language:khy",
"language:led",
"language:met",
"language:mgo",
"language:mus",
"language:mzm",
"language:naw",
"language:sri",
"language:tbk",
"language:xbr",
"language:tnl",
"language:tnn",
"language:awb",
"language:bsc",
"language:iri",
"language:kpq",
"language:ktm",
"language:loe",
"language:lun",
"language:mfg",
"language:moa",
"language:ndj",
"language:otn",
"language:sjo",
"language:wat",
"language:bkm",
"language:tuz",
"language:ybi",
"language:gbr",
"language:gej",
"language:giz",
"language:gna",
"language:gwr",
"language:kex",
"language:kle",
"language:knx",
"language:ktz",
"language:kvj",
"language:lob",
"language:mhw",
"language:mph",
"language:pmx",
"language:qus",
"language:tcc",
"language:azn",
"language:bra",
"language:gal",
"language:nlv",
"language:stk",
"language:yux",
"language:dip",
"language:ivv",
"language:kdj",
"language:kff",
"language:mnf",
"language:moc",
"language:mpg",
"language:mua",
"language:nma",
"language:rim",
"language:rjs",
"language:rnl",
"language:ssx",
"language:tig",
"language:urw",
"language:idt",
"language:mrn",
"language:oki",
"language:bbk",
"language:bea",
"language:bkq",
"language:btt",
"language:cou",
"language:ddn",
"language:did",
"language:diu",
"language:ilb",
"language:ksj",
"language:lhi",
"language:ndi",
"language:ngb",
"language:njz",
"language:nmz",
"language:npo",
"language:nwb",
"language:pbi",
"language:sbd",
"language:soe",
"language:tpw",
"language:twb",
"language:bfn",
"language:boz",
"language:lwl",
"language:psp",
"language:sgd",
"language:ymp",
"language:bkl",
"language:bkv",
"language:csk",
"language:enx",
"language:ess",
"language:gud",
"language:guu",
"language:kdh",
"language:krx",
"language:kzn",
"language:maf",
"language:max",
"language:mgc",
"language:mvn",
"language:mwm",
"language:nbe",
"language:nlc",
"language:nxd",
"language:pse",
"language:sbs",
"language:tlb",
"language:wwa",
"language:yim",
"language:bkx",
"language:bze",
"language:kbq",
"language:mry",
"language:tkd",
"language:ybh",
"language:abi",
"language:ayo",
"language:bex",
"language:bom",
"language:bov",
"language:dhg",
"language:jaa",
"language:kqs",
"language:kqy",
"language:ksb",
"language:ldn",
"language:log",
"language:lol",
"language:mfk",
"language:mtj",
"language:ndz",
"language:ngc",
"language:nio",
"language:nri",
"language:nuz",
"language:pps",
"language:shk",
"language:tlj",
"language:vut",
"language:xnn",
"language:bcc",
"language:cuh",
"language:hro",
"language:nco",
"language:nxl",
"language:pex",
"language:tnp",
"language:vif",
"language:aha",
"language:avn",
"language:bmv",
"language:chj",
"language:crt",
"language:dos",
"language:eto",
"language:gya",
"language:hlt",
"language:jun",
"language:ktb",
"language:lgm",
"language:mnx",
"language:mor",
"language:mug",
"language:nng",
"language:nnh",
"language:nsa",
"language:nyk",
"language:otd",
"language:oym",
"language:phm",
"language:prq",
"language:saj",
"language:szb",
"language:tap",
"language:tnr",
"language:tzl",
"language:wew",
"language:yas",
"language:yuz",
"language:baw",
"language:bwt",
"language:bxa",
"language:fli",
"language:kqr",
"language:lan",
"language:lgr",
"language:lhm",
"language:mlk",
"language:mve",
"language:odk",
"language:pea",
"language:saq",
"language:sea",
"language:sps",
"language:tio",
"language:yin",
"language:adl",
"language:agw",
"language:ald",
"language:asg",
"language:bnj",
"language:bwi",
"language:bwq",
"language:dbq",
"language:dow",
"language:fad",
"language:hoc",
"language:kql",
"language:ksf",
"language:lea",
"language:mse",
"language:mzl",
"language:nhd",
"language:nnl",
"language:nwx",
"language:pbc",
"language:pny",
"language:tul",
"language:aaa",
"language:abc",
"language:agq",
"language:ags",
"language:azo",
"language:bag",
"language:bax",
"language:bce",
"language:bfm",
"language:bgf",
"language:bhs",
"language:bkc",
"language:bkh",
"language:bob",
"language:bqm",
"language:bri",
"language:brv",
"language:buo",
"language:bya",
"language:chp",
"language:cim",
"language:clo",
"language:cuv",
"language:dmg",
"language:dtr",
"language:eee",
"language:ekm",
"language:enc",
"language:gbj",
"language:gou",
"language:hao",
"language:hna",
"language:hre",
"language:isu",
"language:jgo",
"language:kbx",
"language:kvt",
"language:kxp",
"language:lfa",
"language:lkb",
"language:lns",
"language:loh",
"language:lts",
"language:luy",
"language:mdr",
"language:mfj",
"language:mgg",
"language:mgq",
"language:mle",
"language:mlw",
"language:mmu",
"language:mne",
"language:muv",
"language:mxu",
"language:nge",
"language:ngn",
"language:njy",
"language:nsk",
"language:nwe",
"language:nza",
"language:pcg",
"language:pdu",
"language:pnz",
"language:pwg",
"language:qaa",
"language:snl",
"language:sox",
"language:ssn",
"language:syw",
"language:tbj",
"language:tdb",
"language:thy",
"language:tnt",
"language:tom",
"language:tpl",
"language:tsb",
"language:tvs",
"language:udg",
"language:unr",
"language:wbr",
"language:wms",
"language:wnk",
"language:wtk",
"language:xmd",
"language:xmg",
"language:xty",
"language:yav",
"language:yea",
"language:zuh",
"license:other",
"arxiv:2502.11546",
"region:us",
"multilingual"
] | null | "2025-03-25T04:55:24Z" | ---
license: other
pretty_name: dcad-2000
license_name: license
license_link: LICENSE
configs:
- config_name: eng_Latn
data_files:
- split: train
path: eng_Latn/*_keep.jsonl
- config_name: rus_Cyrl
data_files:
- split: train
path: rus_Cyrl/*_keep.jsonl
- config_name: cmn_Hani
data_files:
- split: train
path: cmn_Hani/*_keep.jsonl
- config_name: deu_Latn
data_files:
- split: train
path: deu_Latn/*_keep.jsonl
- config_name: spa_Latn
data_files:
- split: train
path: spa_Latn/*_keep.jsonl
- config_name: fra_Latn
data_files:
- split: train
path: fra_Latn/*_keep.jsonl
- config_name: jpn_Jpan
data_files:
- split: train
path: jpn_Jpan/*_keep.jsonl
- config_name: ita_Latn
data_files:
- split: train
path: ita_Latn/*_keep.jsonl
- config_name: por_Latn
data_files:
- split: train
path: por_Latn/*_keep.jsonl
- config_name: pol_Latn
data_files:
- split: train
path: pol_Latn/*_keep.jsonl
- config_name: nld_Latn
data_files:
- split: train
path: nld_Latn/*_keep.jsonl
- config_name: ind_Latn
data_files:
- split: train
path: ind_Latn/*_keep.jsonl
- config_name: tur_Latn
data_files:
- split: train
path: tur_Latn/*_keep.jsonl
- config_name: vie_Latn
data_files:
- split: train
path: vie_Latn/*_keep.jsonl
- config_name: fas_Arab
data_files:
- split: train
path: fas_Arab/*_keep.jsonl
- config_name: kor_Hang
data_files:
- split: train
path: kor_Hang/*_keep.jsonl
- config_name: swe_Latn
data_files:
- split: train
path: swe_Latn/*_keep.jsonl
- config_name: hun_Latn
data_files:
- split: train
path: hun_Latn/*_keep.jsonl
- config_name: ell_Grek
data_files:
- split: train
path: ell_Grek/*_keep.jsonl
- config_name: ukr_Cyrl
data_files:
- split: train
path: ukr_Cyrl/*_keep.jsonl
- config_name: tha_Thai
data_files:
- split: train
path: tha_Thai/*_keep.jsonl
- config_name: aze_Latn
data_files:
- split: train
path: aze_Latn/*_keep.jsonl
- config_name: arb_Arab
data_files:
- split: train
path: arb_Arab/*_keep.jsonl
- config_name: slv_Latn
data_files:
- split: train
path: slv_Latn/*_keep.jsonl
- config_name: cat_Latn
data_files:
- split: train
path: cat_Latn/*_keep.jsonl
- config_name: fin_Latn
data_files:
- split: train
path: fin_Latn/*_keep.jsonl
- config_name: hbs_Latn
data_files:
- split: train
path: hbs_Latn/*_keep.jsonl
- config_name: ces_Latn
data_files:
- split: train
path: ces_Latn/*_keep.jsonl
- config_name: fil_Latn
data_files:
- split: train
path: fil_Latn/*_keep.jsonl
- config_name: guj_Gujr
data_files:
- split: train
path: guj_Gujr/*_keep.jsonl
- config_name: nob_Latn
data_files:
- split: train
path: nob_Latn/*_keep.jsonl
- config_name: mal_Mlym
data_files:
- split: train
path: mal_Mlym/*_keep.jsonl
- config_name: bul_Cyrl
data_files:
- split: train
path: bul_Cyrl/*_keep.jsonl
- config_name: kan_Knda
data_files:
- split: train
path: kan_Knda/*_keep.jsonl
- config_name: hin_Deva
data_files:
- split: train
path: hin_Deva/*_keep.jsonl
- config_name: tam_Taml
data_files:
- split: train
path: tam_Taml/*_keep.jsonl
- config_name: ara_Arab
data_files:
- split: train
path: ara_Arab/*_keep.jsonl
- config_name: kaz_Cyrl
data_files:
- split: train
path: kaz_Cyrl/*_keep.jsonl
- config_name: sqi_Latn
data_files:
- split: train
path: sqi_Latn/*_keep.jsonl
- config_name: pan_Guru
data_files:
- split: train
path: pan_Guru/*_keep.jsonl
- config_name: est_Latn
data_files:
- split: train
path: est_Latn/*_keep.jsonl
- config_name: srp_Cyrl
data_files:
- split: train
path: srp_Cyrl/*_keep.jsonl
- config_name: heb_Hebr
data_files:
- split: train
path: heb_Hebr/*_keep.jsonl
- config_name: isl_Latn
data_files:
- split: train
path: isl_Latn/*_keep.jsonl
- config_name: mlt_Latn
data_files:
- split: train
path: mlt_Latn/*_keep.jsonl
- config_name: mkd_Cyrl
data_files:
- split: train
path: mkd_Cyrl/*_keep.jsonl
- config_name: bos_Latn
data_files:
- split: train
path: bos_Latn/*_keep.jsonl
- config_name: lit_Latn
data_files:
- split: train
path: lit_Latn/*_keep.jsonl
- config_name: kat_Geor
data_files:
- split: train
path: kat_Geor/*_keep.jsonl
- config_name: hrv_Latn
data_files:
- split: train
path: hrv_Latn/*_keep.jsonl
- config_name: ben_Beng
data_files:
- split: train
path: ben_Beng/*_keep.jsonl
- config_name: glg_Latn
data_files:
- split: train
path: glg_Latn/*_keep.jsonl
- config_name: ceb_Latn
data_files:
- split: train
path: ceb_Latn/*_keep.jsonl
- config_name: ron_Latn
data_files:
- split: train
path: ron_Latn/*_keep.jsonl
- config_name: msa_Latn
data_files:
- split: train
path: msa_Latn/*_keep.jsonl
- config_name: hye_Armn
data_files:
- split: train
path: hye_Armn/*_keep.jsonl
- config_name: tgk_Cyrl
data_files:
- split: train
path: tgk_Cyrl/*_keep.jsonl
- config_name: mar_Deva
data_files:
- split: train
path: mar_Deva/*_keep.jsonl
- config_name: bel_Cyrl
data_files:
- split: train
path: bel_Cyrl/*_keep.jsonl
- config_name: nep_Deva
data_files:
- split: train
path: nep_Deva/*_keep.jsonl
- config_name: urd_Arab
data_files:
- split: train
path: urd_Arab/*_keep.jsonl
- config_name: slk_Latn
data_files:
- split: train
path: slk_Latn/*_keep.jsonl
- config_name: mon_Cyrl
data_files:
- split: train
path: mon_Cyrl/*_keep.jsonl
- config_name: swa_Latn
data_files:
- split: train
path: swa_Latn/*_keep.jsonl
- config_name: dan_Latn
data_files:
- split: train
path: dan_Latn/*_keep.jsonl
- config_name: eus_Latn
data_files:
- split: train
path: eus_Latn/*_keep.jsonl
- config_name: azj_Latn
data_files:
- split: train
path: azj_Latn/*_keep.jsonl
- config_name: sin_Sinh
data_files:
- split: train
path: sin_Sinh/*_keep.jsonl
- config_name: lat_Latn
data_files:
- split: train
path: lat_Latn/*_keep.jsonl
- config_name: als_Latn
data_files:
- split: train
path: als_Latn/*_keep.jsonl
- config_name: tel_Telu
data_files:
- split: train
path: tel_Telu/*_keep.jsonl
- config_name: afr_Latn
data_files:
- split: train
path: afr_Latn/*_keep.jsonl
- config_name: ekk_Latn
data_files:
- split: train
path: ekk_Latn/*_keep.jsonl
- config_name: ltz_Latn
data_files:
- split: train
path: ltz_Latn/*_keep.jsonl
- config_name: zsm_Latn
data_files:
- split: train
path: zsm_Latn/*_keep.jsonl
- config_name: som_Latn
data_files:
- split: train
path: som_Latn/*_keep.jsonl
- config_name: cym_Latn
data_files:
- split: train
path: cym_Latn/*_keep.jsonl
- config_name: und_Kana
data_files:
- split: train
path: und_Kana/*_keep.jsonl
- config_name: kir_Cyrl
data_files:
- split: train
path: kir_Cyrl/*_keep.jsonl
- config_name: nor_Latn
data_files:
- split: train
path: nor_Latn/*_keep.jsonl
- config_name: uzb_Latn
data_files:
- split: train
path: uzb_Latn/*_keep.jsonl
- config_name: mya_Mymr
data_files:
- split: train
path: mya_Mymr/*_keep.jsonl
- config_name: epo_Latn
data_files:
- split: train
path: epo_Latn/*_keep.jsonl
- config_name: hau_Latn
data_files:
- split: train
path: hau_Latn/*_keep.jsonl
- config_name: ary_Arab
data_files:
- split: train
path: ary_Arab/*_keep.jsonl
- config_name: gle_Latn
data_files:
- split: train
path: gle_Latn/*_keep.jsonl
- config_name: lvs_Latn
data_files:
- split: train
path: lvs_Latn/*_keep.jsonl
- config_name: nno_Latn
data_files:
- split: train
path: nno_Latn/*_keep.jsonl
- config_name: und_Bamu
data_files:
- split: train
path: und_Bamu/*_keep.jsonl
- config_name: ory_Orya
data_files:
- split: train
path: ory_Orya/*_keep.jsonl
- config_name: amh_Ethi
data_files:
- split: train
path: amh_Ethi/*_keep.jsonl
- config_name: tat_Cyrl
data_files:
- split: train
path: tat_Cyrl/*_keep.jsonl
- config_name: khm_Khmr
data_files:
- split: train
path: khm_Khmr/*_keep.jsonl
- config_name: und_Copt
data_files:
- split: train
path: und_Copt/*_keep.jsonl
- config_name: und_Tang
data_files:
- split: train
path: und_Tang/*_keep.jsonl
- config_name: und_Xsux
data_files:
- split: train
path: und_Xsux/*_keep.jsonl
- config_name: arz_Arab
data_files:
- split: train
path: arz_Arab/*_keep.jsonl
- config_name: pus_Arab
data_files:
- split: train
path: pus_Arab/*_keep.jsonl
- config_name: lav_Latn
data_files:
- split: train
path: lav_Latn/*_keep.jsonl
- config_name: hbs_Cyrl
data_files:
- split: train
path: hbs_Cyrl/*_keep.jsonl
- config_name: und_Yiii
data_files:
- split: train
path: und_Yiii/*_keep.jsonl
- config_name: war_Latn
data_files:
- split: train
path: war_Latn/*_keep.jsonl
- config_name: multi_Latn
data_files:
- split: train
path: multi_Latn/*_keep.jsonl
- config_name: und_Hira
data_files:
- split: train
path: und_Hira/*_keep.jsonl
- config_name: mlg_Latn
data_files:
- split: train
path: mlg_Latn/*_keep.jsonl
- config_name: uzn_Cyrl
data_files:
- split: train
path: uzn_Cyrl/*_keep.jsonl
- config_name: hat_Latn
data_files:
- split: train
path: hat_Latn/*_keep.jsonl
- config_name: zul_Latn
data_files:
- split: train
path: zul_Latn/*_keep.jsonl
- config_name: kur_Latn
data_files:
- split: train
path: kur_Latn/*_keep.jsonl
- config_name: tgl_Latn
data_files:
- split: train
path: tgl_Latn/*_keep.jsonl
- config_name: uzb_Cyrl
data_files:
- split: train
path: uzb_Cyrl/*_keep.jsonl
- config_name: div_Thaa
data_files:
- split: train
path: div_Thaa/*_keep.jsonl
- config_name: und_Laoo
data_files:
- split: train
path: und_Laoo/*_keep.jsonl
- config_name: fry_Latn
data_files:
- split: train
path: fry_Latn/*_keep.jsonl
- config_name: sna_Latn
data_files:
- split: train
path: sna_Latn/*_keep.jsonl
- config_name: und_Cyrl
data_files:
- split: train
path: und_Cyrl/*_keep.jsonl
- config_name: fao_Latn
data_files:
- split: train
path: fao_Latn/*_keep.jsonl
- config_name: sun_Latn
data_files:
- split: train
path: sun_Latn/*_keep.jsonl
- config_name: und_Kits
data_files:
- split: train
path: und_Kits/*_keep.jsonl
- config_name: und_Hluw
data_files:
- split: train
path: und_Hluw/*_keep.jsonl
- config_name: snd_Arab
data_files:
- split: train
path: snd_Arab/*_keep.jsonl
- config_name: cos_Latn
data_files:
- split: train
path: cos_Latn/*_keep.jsonl
- config_name: bak_Cyrl
data_files:
- split: train
path: bak_Cyrl/*_keep.jsonl
- config_name: ckb_Arab
data_files:
- split: train
path: ckb_Arab/*_keep.jsonl
- config_name: asm_Beng
data_files:
- split: train
path: asm_Beng/*_keep.jsonl
- config_name: und_Grek
data_files:
- split: train
path: und_Grek/*_keep.jsonl
- config_name: jpn_Japn
data_files:
- split: train
path: jpn_Japn/*_keep.jsonl
- config_name: ast_Latn
data_files:
- split: train
path: ast_Latn/*_keep.jsonl
- config_name: ibo_Latn
data_files:
- split: train
path: ibo_Latn/*_keep.jsonl
- config_name: mri_Latn
data_files:
- split: train
path: mri_Latn/*_keep.jsonl
- config_name: und_Lina
data_files:
- split: train
path: und_Lina/*_keep.jsonl
- config_name: ars_Arab
data_files:
- split: train
path: ars_Arab/*_keep.jsonl
- config_name: und_Shrd
data_files:
- split: train
path: und_Shrd/*_keep.jsonl
- config_name: anp_Deva
data_files:
- split: train
path: anp_Deva/*_keep.jsonl
- config_name: khk_Cyrl
data_files:
- split: train
path: khk_Cyrl/*_keep.jsonl
- config_name: und_Samr
data_files:
- split: train
path: und_Samr/*_keep.jsonl
- config_name: lao_Laoo
data_files:
- split: train
path: lao_Laoo/*_keep.jsonl
- config_name: ori_Orya
data_files:
- split: train
path: ori_Orya/*_keep.jsonl
- config_name: und_Cans
data_files:
- split: train
path: und_Cans/*_keep.jsonl
- config_name: yid_Hebr
data_files:
- split: train
path: yid_Hebr/*_keep.jsonl
- config_name: hmn_Latn
data_files:
- split: train
path: hmn_Latn/*_keep.jsonl
- config_name: jav_Latn
data_files:
- split: train
path: jav_Latn/*_keep.jsonl
- config_name: nya_Latn
data_files:
- split: train
path: nya_Latn/*_keep.jsonl
- config_name: und_Mend
data_files:
- split: train
path: und_Mend/*_keep.jsonl
- config_name: und_Adlm
data_files:
- split: train
path: und_Adlm/*_keep.jsonl
- config_name: und_Egyp
data_files:
- split: train
path: und_Egyp/*_keep.jsonl
- config_name: tir_Ethi
data_files:
- split: train
path: tir_Ethi/*_keep.jsonl
- config_name: uig_Arab
data_files:
- split: train
path: uig_Arab/*_keep.jsonl
- config_name: wln_Latn
data_files:
- split: train
path: wln_Latn/*_keep.jsonl
- config_name: und_Syrc
data_files:
- split: train
path: und_Syrc/*_keep.jsonl
- config_name: yor_Latn
data_files:
- split: train
path: yor_Latn/*_keep.jsonl
- config_name: swh_Latn
data_files:
- split: train
path: swh_Latn/*_keep.jsonl
- config_name: gla_Latn
data_files:
- split: train
path: gla_Latn/*_keep.jsonl
- config_name: xho_Latn
data_files:
- split: train
path: xho_Latn/*_keep.jsonl
- config_name: uzn_Latn
data_files:
- split: train
path: uzn_Latn/*_keep.jsonl
- config_name: bre_Latn
data_files:
- split: train
path: bre_Latn/*_keep.jsonl
- config_name: sot_Latn
data_files:
- split: train
path: sot_Latn/*_keep.jsonl
- config_name: che_Cyrl
data_files:
- split: train
path: che_Cyrl/*_keep.jsonl
- config_name: nan_Latn
data_files:
- split: train
path: nan_Latn/*_keep.jsonl
- config_name: tel_Latn
data_files:
- split: train
path: tel_Latn/*_keep.jsonl
- config_name: bew_Latn
data_files:
- split: train
path: bew_Latn/*_keep.jsonl
- config_name: glk_Arab
data_files:
- split: train
path: glk_Arab/*_keep.jsonl
- config_name: smo_Latn
data_files:
- split: train
path: smo_Latn/*_keep.jsonl
- config_name: orm_Latn
data_files:
- split: train
path: orm_Latn/*_keep.jsonl
- config_name: zho_Hani
data_files:
- split: train
path: zho_Hani/*_keep.jsonl
- config_name: haw_Latn
data_files:
- split: train
path: haw_Latn/*_keep.jsonl
- config_name: pnb_Arab
data_files:
- split: train
path: pnb_Arab/*_keep.jsonl
- config_name: und_Linb
data_files:
- split: train
path: und_Linb/*_keep.jsonl
- config_name: oci_Latn
data_files:
- split: train
path: oci_Latn/*_keep.jsonl
- config_name: chv_Cyrl
data_files:
- split: train
path: chv_Cyrl/*_keep.jsonl
- config_name: kin_Latn
data_files:
- split: train
path: kin_Latn/*_keep.jsonl
- config_name: und_Brai
data_files:
- split: train
path: und_Brai/*_keep.jsonl
- config_name: srp_Latn
data_files:
- split: train
path: srp_Latn/*_keep.jsonl
- config_name: und_Hung
data_files:
- split: train
path: und_Hung/*_keep.jsonl
- config_name: und_Sgnw
data_files:
- split: train
path: und_Sgnw/*_keep.jsonl
- config_name: pcm_Latn
data_files:
- split: train
path: pcm_Latn/*_keep.jsonl
- config_name: kaa_Cyrl
data_files:
- split: train
path: kaa_Cyrl/*_keep.jsonl
- config_name: lug_Latn
data_files:
- split: train
path: lug_Latn/*_keep.jsonl
- config_name: pbt_Arab
data_files:
- split: train
path: pbt_Arab/*_keep.jsonl
- config_name: aze_Arab
data_files:
- split: train
path: aze_Arab/*_keep.jsonl
- config_name: gsw_Latn
data_files:
- split: train
path: gsw_Latn/*_keep.jsonl
- config_name: min_Latn
data_files:
- split: train
path: min_Latn/*_keep.jsonl
- config_name: tuk_Latn
data_files:
- split: train
path: tuk_Latn/*_keep.jsonl
- config_name: lim_Latn
data_files:
- split: train
path: lim_Latn/*_keep.jsonl
- config_name: kmr_Latn
data_files:
- split: train
path: kmr_Latn/*_keep.jsonl
- config_name: und_Nshu
data_files:
- split: train
path: und_Nshu/*_keep.jsonl
- config_name: roh_Latn
data_files:
- split: train
path: roh_Latn/*_keep.jsonl
- config_name: und_Bali
data_files:
- split: train
path: und_Bali/*_keep.jsonl
- config_name: und_Lana
data_files:
- split: train
path: und_Lana/*_keep.jsonl
- config_name: vec_Latn
data_files:
- split: train
path: vec_Latn/*_keep.jsonl
- config_name: san_Deva
data_files:
- split: train
path: san_Deva/*_keep.jsonl
- config_name: und_Modi
data_files:
- split: train
path: und_Modi/*_keep.jsonl
- config_name: gmh_Latn
data_files:
- split: train
path: gmh_Latn/*_keep.jsonl
- config_name: nds_Latn
data_files:
- split: train
path: nds_Latn/*_keep.jsonl
- config_name: sco_Latn
data_files:
- split: train
path: sco_Latn/*_keep.jsonl
- config_name: und_Mong
data_files:
- split: train
path: und_Mong/*_keep.jsonl
- config_name: und_Ethi
data_files:
- split: train
path: und_Ethi/*_keep.jsonl
- config_name: azb_Arab
data_files:
- split: train
path: azb_Arab/*_keep.jsonl
- config_name: tsn_Latn
data_files:
- split: train
path: tsn_Latn/*_keep.jsonl
- config_name: rus_Latn
data_files:
- split: train
path: rus_Latn/*_keep.jsonl
- config_name: und_Hebr
data_files:
- split: train
path: und_Hebr/*_keep.jsonl
- config_name: mon_Latn
data_files:
- split: train
path: mon_Latn/*_keep.jsonl
- config_name: und_Saur
data_files:
- split: train
path: und_Saur/*_keep.jsonl
- config_name: tgk_Latn
data_files:
- split: train
path: tgk_Latn/*_keep.jsonl
- config_name: sah_Cyrl
data_files:
- split: train
path: sah_Cyrl/*_keep.jsonl
- config_name: pri_Latn
data_files:
- split: train
path: pri_Latn/*_keep.jsonl
- config_name: pap_Latn
data_files:
- split: train
path: pap_Latn/*_keep.jsonl
- config_name: plt_Latn
data_files:
- split: train
path: plt_Latn/*_keep.jsonl
- config_name: lmo_Latn
data_files:
- split: train
path: lmo_Latn/*_keep.jsonl
- config_name: bod_Tibt
data_files:
- split: train
path: bod_Tibt/*_keep.jsonl
- config_name: und_Vaii
data_files:
- split: train
path: und_Vaii/*_keep.jsonl
- config_name: yue_Hani
data_files:
- split: train
path: yue_Hani/*_keep.jsonl
- config_name: und_Dupl
data_files:
- split: train
path: und_Dupl/*_keep.jsonl
- config_name: und_Glag
data_files:
- split: train
path: und_Glag/*_keep.jsonl
- config_name: und_Thai
data_files:
- split: train
path: und_Thai/*_keep.jsonl
- config_name: bar_Latn
data_files:
- split: train
path: bar_Latn/*_keep.jsonl
- config_name: und_Thaa
data_files:
- split: train
path: und_Thaa/*_keep.jsonl
- config_name: hif_Latn
data_files:
- split: train
path: hif_Latn/*_keep.jsonl
- config_name: und_Runr
data_files:
- split: train
path: und_Runr/*_keep.jsonl
- config_name: arg_Latn
data_files:
- split: train
path: arg_Latn/*_keep.jsonl
- config_name: pms_Latn
data_files:
- split: train
path: pms_Latn/*_keep.jsonl
- config_name: kat_Latn
data_files:
- split: train
path: kat_Latn/*_keep.jsonl
- config_name: nrm_Latn
data_files:
- split: train
path: nrm_Latn/*_keep.jsonl
- config_name: vol_Latn
data_files:
- split: train
path: vol_Latn/*_keep.jsonl
- config_name: aeb_Arab
data_files:
- split: train
path: aeb_Arab/*_keep.jsonl
- config_name: und_Khmr
data_files:
- split: train
path: und_Khmr/*_keep.jsonl
- config_name: und_Geor
data_files:
- split: train
path: und_Geor/*_keep.jsonl
- config_name: ido_Latn
data_files:
- split: train
path: ido_Latn/*_keep.jsonl
- config_name: pam_Latn
data_files:
- split: train
path: pam_Latn/*_keep.jsonl
- config_name: kal_Latn
data_files:
- split: train
path: kal_Latn/*_keep.jsonl
- config_name: und_Dsrt
data_files:
- split: train
path: und_Dsrt/*_keep.jsonl
- config_name: und_Tibt
data_files:
- split: train
path: und_Tibt/*_keep.jsonl
- config_name: und_Mymr
data_files:
- split: train
path: und_Mymr/*_keep.jsonl
- config_name: lus_Latn
data_files:
- split: train
path: lus_Latn/*_keep.jsonl
- config_name: und_Armn
data_files:
- split: train
path: und_Armn/*_keep.jsonl
- config_name: und_Mroo
data_files:
- split: train
path: und_Mroo/*_keep.jsonl
- config_name: und_Bopo
data_files:
- split: train
path: und_Bopo/*_keep.jsonl
- config_name: new_Deva
data_files:
- split: train
path: new_Deva/*_keep.jsonl
- config_name: sme_Latn
data_files:
- split: train
path: sme_Latn/*_keep.jsonl
- config_name: und_Khar
data_files:
- split: train
path: und_Khar/*_keep.jsonl
- config_name: und_Mtei
data_files:
- split: train
path: und_Mtei/*_keep.jsonl
- config_name: nso_Latn
data_files:
- split: train
path: nso_Latn/*_keep.jsonl
- config_name: lld_Latn
data_files:
- split: train
path: lld_Latn/*_keep.jsonl
- config_name: ina_Latn
data_files:
- split: train
path: ina_Latn/*_keep.jsonl
- config_name: scn_Latn
data_files:
- split: train
path: scn_Latn/*_keep.jsonl
- config_name: und_Deva
data_files:
- split: train
path: und_Deva/*_keep.jsonl
- config_name: und_Brah
data_files:
- split: train
path: und_Brah/*_keep.jsonl
- config_name: und_Bhks
data_files:
- split: train
path: und_Bhks/*_keep.jsonl
- config_name: hyw_Armn
data_files:
- split: train
path: hyw_Armn/*_keep.jsonl
- config_name: abk_Cyrl
data_files:
- split: train
path: abk_Cyrl/*_keep.jsonl
- config_name: bew_Cyrl
data_files:
- split: train
path: bew_Cyrl/*_keep.jsonl
- config_name: und_Telu
data_files:
- split: train
path: und_Telu/*_keep.jsonl
- config_name: und_Merc
data_files:
- split: train
path: und_Merc/*_keep.jsonl
- config_name: bpy_Beng
data_files:
- split: train
path: bpy_Beng/*_keep.jsonl
- config_name: tgk_Arab
data_files:
- split: train
path: tgk_Arab/*_keep.jsonl
- config_name: lin_Latn
data_files:
- split: train
path: lin_Latn/*_keep.jsonl
- config_name: oss_Cyrl
data_files:
- split: train
path: oss_Cyrl/*_keep.jsonl
- config_name: que_Latn
data_files:
- split: train
path: que_Latn/*_keep.jsonl
- config_name: szl_Latn
data_files:
- split: train
path: szl_Latn/*_keep.jsonl
- config_name: sdh_Arab
data_files:
- split: train
path: sdh_Arab/*_keep.jsonl
- config_name: mww_Latn
data_files:
- split: train
path: mww_Latn/*_keep.jsonl
- config_name: mhr_Cyrl
data_files:
- split: train
path: mhr_Cyrl/*_keep.jsonl
- config_name: und_Hmnp
data_files:
- split: train
path: und_Hmnp/*_keep.jsonl
- config_name: und_Orya
data_files:
- split: train
path: und_Orya/*_keep.jsonl
- config_name: und_Kali
data_files:
- split: train
path: und_Kali/*_keep.jsonl
- config_name: rmy_Latn
data_files:
- split: train
path: rmy_Latn/*_keep.jsonl
- config_name: diq_Latn
data_files:
- split: train
path: diq_Latn/*_keep.jsonl
- config_name: srd_Latn
data_files:
- split: train
path: srd_Latn/*_keep.jsonl
- config_name: und_Plrd
data_files:
- split: train
path: und_Plrd/*_keep.jsonl
- config_name: und_Phag
data_files:
- split: train
path: und_Phag/*_keep.jsonl
- config_name: ydd_Hebr
data_files:
- split: train
path: ydd_Hebr/*_keep.jsonl
- config_name: und_Hmng
data_files:
- split: train
path: und_Hmng/*_keep.jsonl
- config_name: run_Latn
data_files:
- split: train
path: run_Latn/*_keep.jsonl
- config_name: und_Nkoo
data_files:
- split: train
path: und_Nkoo/*_keep.jsonl
- config_name: hsb_Latn
data_files:
- split: train
path: hsb_Latn/*_keep.jsonl
- config_name: und_Lisu
data_files:
- split: train
path: und_Lisu/*_keep.jsonl
- config_name: wol_Latn
data_files:
- split: train
path: wol_Latn/*_keep.jsonl
- config_name: und_Gran
data_files:
- split: train
path: und_Gran/*_keep.jsonl
- config_name: urd_Latn
data_files:
- split: train
path: urd_Latn/*_keep.jsonl
- config_name: und_Cher
data_files:
- split: train
path: und_Cher/*_keep.jsonl
- config_name: kiu_Latn
data_files:
- split: train
path: kiu_Latn/*_keep.jsonl
- config_name: ilo_Latn
data_files:
- split: train
path: ilo_Latn/*_keep.jsonl
- config_name: und_Taml
data_files:
- split: train
path: und_Taml/*_keep.jsonl
- config_name: cak_Latn
data_files:
- split: train
path: cak_Latn/*_keep.jsonl
- config_name: und_Gonm
data_files:
- split: train
path: und_Gonm/*_keep.jsonl
- config_name: acm_Arab
data_files:
- split: train
path: acm_Arab/*_keep.jsonl
- config_name: und_Tnsa
data_files:
- split: train
path: und_Tnsa/*_keep.jsonl
- config_name: und_Cprt
data_files:
- split: train
path: und_Cprt/*_keep.jsonl
- config_name: grc_Grek
data_files:
- split: train
path: grc_Grek/*_keep.jsonl
- config_name: xmf_Geor
data_files:
- split: train
path: xmf_Geor/*_keep.jsonl
- config_name: vls_Latn
data_files:
- split: train
path: vls_Latn/*_keep.jsonl
- config_name: und_Gujr
data_files:
- split: train
path: und_Gujr/*_keep.jsonl
- config_name: und_Cari
data_files:
- split: train
path: und_Cari/*_keep.jsonl
- config_name: tso_Latn
data_files:
- split: train
path: tso_Latn/*_keep.jsonl
- config_name: crh_Latn
data_files:
- split: train
path: crh_Latn/*_keep.jsonl
- config_name: nav_Latn
data_files:
- split: train
path: nav_Latn/*_keep.jsonl
- config_name: mwl_Latn
data_files:
- split: train
path: mwl_Latn/*_keep.jsonl
- config_name: und_Diak
data_files:
- split: train
path: und_Diak/*_keep.jsonl
- config_name: tat_Latn
data_files:
- split: train
path: tat_Latn/*_keep.jsonl
- config_name: grn_Latn
data_files:
- split: train
path: grn_Latn/*_keep.jsonl
- config_name: mui_Latn
data_files:
- split: train
path: mui_Latn/*_keep.jsonl
- config_name: hin_Latn
data_files:
- split: train
path: hin_Latn/*_keep.jsonl
- config_name: mvf_Mong
data_files:
- split: train
path: mvf_Mong/*_keep.jsonl
- config_name: udm_Cyrl
data_files:
- split: train
path: udm_Cyrl/*_keep.jsonl
- config_name: mzn_Arab
data_files:
- split: train
path: mzn_Arab/*_keep.jsonl
- config_name: und_Marc
data_files:
- split: train
path: und_Marc/*_keep.jsonl
- config_name: und_Talu
data_files:
- split: train
path: und_Talu/*_keep.jsonl
- config_name: und_Vith
data_files:
- split: train
path: und_Vith/*_keep.jsonl
- config_name: bcl_Latn
data_files:
- split: train
path: bcl_Latn/*_keep.jsonl
- config_name: und_Nagm
data_files:
- split: train
path: und_Nagm/*_keep.jsonl
- config_name: und_Mani
data_files:
- split: train
path: und_Mani/*_keep.jsonl
- config_name: ban_Latn
data_files:
- split: train
path: ban_Latn/*_keep.jsonl
- config_name: und_Mlym
data_files:
- split: train
path: und_Mlym/*_keep.jsonl
- config_name: afb_Arab
data_files:
- split: train
path: afb_Arab/*_keep.jsonl
- config_name: und_Java
data_files:
- split: train
path: und_Java/*_keep.jsonl
- config_name: cnh_Latn
data_files:
- split: train
path: cnh_Latn/*_keep.jsonl
- config_name: und_Ahom
data_files:
- split: train
path: und_Ahom/*_keep.jsonl
- config_name: cbk_Latn
data_files:
- split: train
path: cbk_Latn/*_keep.jsonl
- config_name: kaa_Latn
data_files:
- split: train
path: kaa_Latn/*_keep.jsonl
- config_name: tyv_Cyrl
data_files:
- split: train
path: tyv_Cyrl/*_keep.jsonl
- config_name: kur_Arab
data_files:
- split: train
path: kur_Arab/*_keep.jsonl
- config_name: ful_Latn
data_files:
- split: train
path: ful_Latn/*_keep.jsonl
- config_name: und_Beng
data_files:
- split: train
path: und_Beng/*_keep.jsonl
- config_name: zea_Latn
data_files:
- split: train
path: zea_Latn/*_keep.jsonl
- config_name: frp_Latn
data_files:
- split: train
path: frp_Latn/*_keep.jsonl
- config_name: frr_Latn
data_files:
- split: train
path: frr_Latn/*_keep.jsonl
- config_name: pfl_Latn
data_files:
- split: train
path: pfl_Latn/*_keep.jsonl
- config_name: lij_Latn
data_files:
- split: train
path: lij_Latn/*_keep.jsonl
- config_name: apc_Arab
data_files:
- split: train
path: apc_Arab/*_keep.jsonl
- config_name: mai_Deva
data_files:
- split: train
path: mai_Deva/*_keep.jsonl
- config_name: und_Wara
data_files:
- split: train
path: und_Wara/*_keep.jsonl
- config_name: und_Palm
data_files:
- split: train
path: und_Palm/*_keep.jsonl
- config_name: pon_Latn
data_files:
- split: train
path: pon_Latn/*_keep.jsonl
- config_name: bak_Latn
data_files:
- split: train
path: bak_Latn/*_keep.jsonl
- config_name: eml_Latn
data_files:
- split: train
path: eml_Latn/*_keep.jsonl
- config_name: uzs_Arab
data_files:
- split: train
path: uzs_Arab/*_keep.jsonl
- config_name: nde_Latn
data_files:
- split: train
path: nde_Latn/*_keep.jsonl
- config_name: hil_Latn
data_files:
- split: train
path: hil_Latn/*_keep.jsonl
- config_name: mam_Latn
data_files:
- split: train
path: mam_Latn/*_keep.jsonl
- config_name: gom_Latn
data_files:
- split: train
path: gom_Latn/*_keep.jsonl
- config_name: dag_Latn
data_files:
- split: train
path: dag_Latn/*_keep.jsonl
- config_name: nap_Latn
data_files:
- split: train
path: nap_Latn/*_keep.jsonl
- config_name: bjn_Latn
data_files:
- split: train
path: bjn_Latn/*_keep.jsonl
- config_name: und_Khoj
data_files:
- split: train
path: und_Khoj/*_keep.jsonl
- config_name: hbo_Hebr
data_files:
- split: train
path: hbo_Hebr/*_keep.jsonl
- config_name: tet_Latn
data_files:
- split: train
path: tet_Latn/*_keep.jsonl
- config_name: inh_Cyrl
data_files:
- split: train
path: inh_Cyrl/*_keep.jsonl
- config_name: ven_Latn
data_files:
- split: train
path: ven_Latn/*_keep.jsonl
- config_name: gaz_Latn
data_files:
- split: train
path: gaz_Latn/*_keep.jsonl
- config_name: tpi_Latn
data_files:
- split: train
path: tpi_Latn/*_keep.jsonl
- config_name: und_Sidd
data_files:
- split: train
path: und_Sidd/*_keep.jsonl
- config_name: ajp_Arab
data_files:
- split: train
path: ajp_Arab/*_keep.jsonl
- config_name: sat_Olck
data_files:
- split: train
path: sat_Olck/*_keep.jsonl
- config_name: wuu_Hani
data_files:
- split: train
path: wuu_Hani/*_keep.jsonl
- config_name: bam_Latn
data_files:
- split: train
path: bam_Latn/*_keep.jsonl
- config_name: vep_Latn
data_files:
- split: train
path: vep_Latn/*_keep.jsonl
- config_name: glv_Latn
data_files:
- split: train
path: glv_Latn/*_keep.jsonl
- config_name: tzo_Latn
data_files:
- split: train
path: tzo_Latn/*_keep.jsonl
- config_name: bik_Latn
data_files:
- split: train
path: bik_Latn/*_keep.jsonl
- config_name: und_Rohg
data_files:
- split: train
path: und_Rohg/*_keep.jsonl
- config_name: acr_Latn
data_files:
- split: train
path: acr_Latn/*_keep.jsonl
- config_name: twi_Latn
data_files:
- split: train
path: twi_Latn/*_keep.jsonl
- config_name: myv_Cyrl
data_files:
- split: train
path: myv_Cyrl/*_keep.jsonl
- config_name: ewe_Latn
data_files:
- split: train
path: ewe_Latn/*_keep.jsonl
- config_name: poh_Latn
data_files:
- split: train
path: poh_Latn/*_keep.jsonl
- config_name: und_Avst
data_files:
- split: train
path: und_Avst/*_keep.jsonl
- config_name: ile_Latn
data_files:
- split: train
path: ile_Latn/*_keep.jsonl
- config_name: rue_Cyrl
data_files:
- split: train
path: rue_Cyrl/*_keep.jsonl
- config_name: und_Ougr
data_files:
- split: train
path: und_Ougr/*_keep.jsonl
- config_name: und_Wcho
data_files:
- split: train
path: und_Wcho/*_keep.jsonl
- config_name: ace_Latn
data_files:
- split: train
path: ace_Latn/*_keep.jsonl
- config_name: kha_Latn
data_files:
- split: train
path: kha_Latn/*_keep.jsonl
- config_name: und_Yezi
data_files:
- split: train
path: und_Yezi/*_keep.jsonl
- config_name: und_Ital
data_files:
- split: train
path: und_Ital/*_keep.jsonl
- config_name: und_Kthi
data_files:
- split: train
path: und_Kthi/*_keep.jsonl
- config_name: und_Takr
data_files:
- split: train
path: und_Takr/*_keep.jsonl
- config_name: ksh_Latn
data_files:
- split: train
path: ksh_Latn/*_keep.jsonl
- config_name: hui_Latn
data_files:
- split: train
path: hui_Latn/*_keep.jsonl
- config_name: gor_Latn
data_files:
- split: train
path: gor_Latn/*_keep.jsonl
- config_name: krc_Cyrl
data_files:
- split: train
path: krc_Cyrl/*_keep.jsonl
- config_name: lfn_Latn
data_files:
- split: train
path: lfn_Latn/*_keep.jsonl
- config_name: tls_Latn
data_files:
- split: train
path: tls_Latn/*_keep.jsonl
- config_name: fur_Latn
data_files:
- split: train
path: fur_Latn/*_keep.jsonl
- config_name: und_Tavt
data_files:
- split: train
path: und_Tavt/*_keep.jsonl
- config_name: gag_Latn
data_files:
- split: train
path: gag_Latn/*_keep.jsonl
- config_name: chk_Latn
data_files:
- split: train
path: chk_Latn/*_keep.jsonl
- config_name: hmo_Latn
data_files:
- split: train
path: hmo_Latn/*_keep.jsonl
- config_name: kek_Latn
data_files:
- split: train
path: kek_Latn/*_keep.jsonl
- config_name: fon_Latn
data_files:
- split: train
path: fon_Latn/*_keep.jsonl
- config_name: ssw_Latn
data_files:
- split: train
path: ssw_Latn/*_keep.jsonl
- config_name: quh_Latn
data_files:
- split: train
path: quh_Latn/*_keep.jsonl
- config_name: meu_Latn
data_files:
- split: train
path: meu_Latn/*_keep.jsonl
- config_name: csb_Latn
data_files:
- split: train
path: csb_Latn/*_keep.jsonl
- config_name: ksd_Latn
data_files:
- split: train
path: ksd_Latn/*_keep.jsonl
- config_name: ext_Latn
data_files:
- split: train
path: ext_Latn/*_keep.jsonl
- config_name: rop_Latn
data_files:
- split: train
path: rop_Latn/*_keep.jsonl
- config_name: und_Tfng
data_files:
- split: train
path: und_Tfng/*_keep.jsonl
- config_name: aln_Latn
data_files:
- split: train
path: aln_Latn/*_keep.jsonl
- config_name: kab_Latn
data_files:
- split: train
path: kab_Latn/*_keep.jsonl
- config_name: shn_Mymr
data_files:
- split: train
path: shn_Mymr/*_keep.jsonl
- config_name: tbz_Latn
data_files:
- split: train
path: tbz_Latn/*_keep.jsonl
- config_name: knv_Latn
data_files:
- split: train
path: knv_Latn/*_keep.jsonl
- config_name: bqc_Latn
data_files:
- split: train
path: bqc_Latn/*_keep.jsonl
- config_name: und_Elba
data_files:
- split: train
path: und_Elba/*_keep.jsonl
- config_name: und_Zanb
data_files:
- split: train
path: und_Zanb/*_keep.jsonl
- config_name: npi_Deva
data_files:
- split: train
path: npi_Deva/*_keep.jsonl
- config_name: rug_Latn
data_files:
- split: train
path: rug_Latn/*_keep.jsonl
- config_name: und_Tale
data_files:
- split: train
path: und_Tale/*_keep.jsonl
- config_name: kom_Cyrl
data_files:
- split: train
path: kom_Cyrl/*_keep.jsonl
- config_name: kpg_Latn
data_files:
- split: train
path: kpg_Latn/*_keep.jsonl
- config_name: kbd_Cyrl
data_files:
- split: train
path: kbd_Cyrl/*_keep.jsonl
- config_name: dzo_Tibt
data_files:
- split: train
path: dzo_Tibt/*_keep.jsonl
- config_name: wal_Latn
data_files:
- split: train
path: wal_Latn/*_keep.jsonl
- config_name: zlm_Latn
data_files:
- split: train
path: zlm_Latn/*_keep.jsonl
- config_name: und_Soyo
data_files:
- split: train
path: und_Soyo/*_keep.jsonl
- config_name: mzh_Latn
data_files:
- split: train
path: mzh_Latn/*_keep.jsonl
- config_name: pan_Latn
data_files:
- split: train
path: pan_Latn/*_keep.jsonl
- config_name: hac_Arab
data_files:
- split: train
path: hac_Arab/*_keep.jsonl
- config_name: und_Sogo
data_files:
- split: train
path: und_Sogo/*_keep.jsonl
- config_name: tum_Latn
data_files:
- split: train
path: tum_Latn/*_keep.jsonl
- config_name: sgs_Latn
data_files:
- split: train
path: sgs_Latn/*_keep.jsonl
- config_name: fij_Latn
data_files:
- split: train
path: fij_Latn/*_keep.jsonl
- config_name: tuc_Latn
data_files:
- split: train
path: tuc_Latn/*_keep.jsonl
- config_name: und_Dogr
data_files:
- split: train
path: und_Dogr/*_keep.jsonl
- config_name: und_Kawi
data_files:
- split: train
path: und_Kawi/*_keep.jsonl
- config_name: jbo_Latn
data_files:
- split: train
path: jbo_Latn/*_keep.jsonl
- config_name: iba_Latn
data_files:
- split: train
path: iba_Latn/*_keep.jsonl
- config_name: pcd_Latn
data_files:
- split: train
path: pcd_Latn/*_keep.jsonl
- config_name: kjh_Cyrl
data_files:
- split: train
path: kjh_Cyrl/*_keep.jsonl
- config_name: ixl_Latn
data_files:
- split: train
path: ixl_Latn/*_keep.jsonl
- config_name: und_Phli
data_files:
- split: train
path: und_Phli/*_keep.jsonl
- config_name: und_Cham
data_files:
- split: train
path: und_Cham/*_keep.jsonl
- config_name: und_Guru
data_files:
- split: train
path: und_Guru/*_keep.jsonl
- config_name: ndo_Latn
data_files:
- split: train
path: ndo_Latn/*_keep.jsonl
- config_name: suz_Deva
data_files:
- split: train
path: suz_Deva/*_keep.jsonl
- config_name: und_Nbat
data_files:
- split: train
path: und_Nbat/*_keep.jsonl
- config_name: und_Nand
data_files:
- split: train
path: und_Nand/*_keep.jsonl
- config_name: mfe_Latn
data_files:
- split: train
path: mfe_Latn/*_keep.jsonl
- config_name: bxr_Cyrl
data_files:
- split: train
path: bxr_Cyrl/*_keep.jsonl
- config_name: ava_Cyrl
data_files:
- split: train
path: ava_Cyrl/*_keep.jsonl
- config_name: und_Osma
data_files:
- split: train
path: und_Osma/*_keep.jsonl
- config_name: bho_Deva
data_files:
- split: train
path: bho_Deva/*_keep.jsonl
- config_name: brh_Arab
data_files:
- split: train
path: brh_Arab/*_keep.jsonl
- config_name: und_Sind
data_files:
- split: train
path: und_Sind/*_keep.jsonl
- config_name: ctd_Latn
data_files:
- split: train
path: ctd_Latn/*_keep.jsonl
- config_name: aym_Latn
data_files:
- split: train
path: aym_Latn/*_keep.jsonl
- config_name: ady_Cyrl
data_files:
- split: train
path: ady_Cyrl/*_keep.jsonl
- config_name: nbl_Latn
data_files:
- split: train
path: nbl_Latn/*_keep.jsonl
- config_name: gom_Deva
data_files:
- split: train
path: gom_Deva/*_keep.jsonl
- config_name: hus_Latn
data_files:
- split: train
path: hus_Latn/*_keep.jsonl
- config_name: gug_Latn
data_files:
- split: train
path: gug_Latn/*_keep.jsonl
- config_name: und_Pauc
data_files:
- split: train
path: und_Pauc/*_keep.jsonl
- config_name: und_Sogd
data_files:
- split: train
path: und_Sogd/*_keep.jsonl
- config_name: bat_Latn
data_files:
- split: train
path: bat_Latn/*_keep.jsonl
- config_name: guj_Deva
data_files:
- split: train
path: guj_Deva/*_keep.jsonl
- config_name: hne_Deva
data_files:
- split: train
path: hne_Deva/*_keep.jsonl
- config_name: srn_Latn
data_files:
- split: train
path: srn_Latn/*_keep.jsonl
- config_name: ctu_Latn
data_files:
- split: train
path: ctu_Latn/*_keep.jsonl
- config_name: san_Latn
data_files:
- split: train
path: san_Latn/*_keep.jsonl
- config_name: ton_Latn
data_files:
- split: train
path: ton_Latn/*_keep.jsonl
- config_name: bug_Latn
data_files:
- split: train
path: bug_Latn/*_keep.jsonl
- config_name: aoj_Latn
data_files:
- split: train
path: aoj_Latn/*_keep.jsonl
- config_name: jam_Latn
data_files:
- split: train
path: jam_Latn/*_keep.jsonl
- config_name: und_Sylo
data_files:
- split: train
path: und_Sylo/*_keep.jsonl
- config_name: lez_Cyrl
data_files:
- split: train
path: lez_Cyrl/*_keep.jsonl
- config_name: xav_Latn
data_files:
- split: train
path: xav_Latn/*_keep.jsonl
- config_name: luo_Latn
data_files:
- split: train
path: luo_Latn/*_keep.jsonl
- config_name: ltg_Latn
data_files:
- split: train
path: ltg_Latn/*_keep.jsonl
- config_name: bih_Deva
data_files:
- split: train
path: bih_Deva/*_keep.jsonl
- config_name: uig_Cyrl
data_files:
- split: train
path: uig_Cyrl/*_keep.jsonl
- config_name: mrj_Cyrl
data_files:
- split: train
path: mrj_Cyrl/*_keep.jsonl
- config_name: avk_Latn
data_files:
- split: train
path: avk_Latn/*_keep.jsonl
- config_name: kos_Latn
data_files:
- split: train
path: kos_Latn/*_keep.jsonl
- config_name: kbp_Latn
data_files:
- split: train
path: kbp_Latn/*_keep.jsonl
- config_name: alt_Cyrl
data_files:
- split: train
path: alt_Cyrl/*_keep.jsonl
- config_name: cfm_Latn
data_files:
- split: train
path: cfm_Latn/*_keep.jsonl
- config_name: goh_Latn
data_files:
- split: train
path: goh_Latn/*_keep.jsonl
- config_name: lad_Latn
data_files:
- split: train
path: lad_Latn/*_keep.jsonl
- config_name: und_Rjng
data_files:
- split: train
path: und_Rjng/*_keep.jsonl
- config_name: cor_Latn
data_files:
- split: train
path: cor_Latn/*_keep.jsonl
- config_name: quc_Latn
data_files:
- split: train
path: quc_Latn/*_keep.jsonl
- config_name: und_Goth
data_files:
- split: train
path: und_Goth/*_keep.jsonl
- config_name: kpv_Cyrl
data_files:
- split: train
path: kpv_Cyrl/*_keep.jsonl
- config_name: und_Chrs
data_files:
- split: train
path: und_Chrs/*_keep.jsonl
- config_name: aka_Latn
data_files:
- split: train
path: aka_Latn/*_keep.jsonl
- config_name: und_Phlp
data_files:
- split: train
path: und_Phlp/*_keep.jsonl
- config_name: hak_Latn
data_files:
- split: train
path: hak_Latn/*_keep.jsonl
- config_name: rmn_Latn
data_files:
- split: train
path: rmn_Latn/*_keep.jsonl
- config_name: pls_Latn
data_files:
- split: train
path: pls_Latn/*_keep.jsonl
- config_name: sag_Latn
data_files:
- split: train
path: sag_Latn/*_keep.jsonl
- config_name: min_Arab
data_files:
- split: train
path: min_Arab/*_keep.jsonl
- config_name: und_Tglg
data_files:
- split: train
path: und_Tglg/*_keep.jsonl
- config_name: rcf_Latn
data_files:
- split: train
path: rcf_Latn/*_keep.jsonl
- config_name: som_Arab
data_files:
- split: train
path: som_Arab/*_keep.jsonl
- config_name: gym_Latn
data_files:
- split: train
path: gym_Latn/*_keep.jsonl
- config_name: und_Mand
data_files:
- split: train
path: und_Mand/*_keep.jsonl
- config_name: kik_Latn
data_files:
- split: train
path: kik_Latn/*_keep.jsonl
- config_name: pag_Latn
data_files:
- split: train
path: pag_Latn/*_keep.jsonl
- config_name: arn_Latn
data_files:
- split: train
path: arn_Latn/*_keep.jsonl
- config_name: und_Shaw
data_files:
- split: train
path: und_Shaw/*_keep.jsonl
- config_name: ngu_Latn
data_files:
- split: train
path: ngu_Latn/*_keep.jsonl
- config_name: dsb_Latn
data_files:
- split: train
path: dsb_Latn/*_keep.jsonl
- config_name: tca_Latn
data_files:
- split: train
path: tca_Latn/*_keep.jsonl
- config_name: mni_Mtei
data_files:
- split: train
path: mni_Mtei/*_keep.jsonl
- config_name: naq_Latn
data_files:
- split: train
path: naq_Latn/*_keep.jsonl
- config_name: pdc_Latn
data_files:
- split: train
path: pdc_Latn/*_keep.jsonl
- config_name: mps_Latn
data_files:
- split: train
path: mps_Latn/*_keep.jsonl
- config_name: und_Hatr
data_files:
- split: train
path: und_Hatr/*_keep.jsonl
- config_name: olo_Latn
data_files:
- split: train
path: olo_Latn/*_keep.jsonl
- config_name: fro_Latn
data_files:
- split: train
path: fro_Latn/*_keep.jsonl
- config_name: fit_Latn
data_files:
- split: train
path: fit_Latn/*_keep.jsonl
- config_name: crh_Cyrl
data_files:
- split: train
path: crh_Cyrl/*_keep.jsonl
- config_name: cdo_Latn
data_files:
- split: train
path: cdo_Latn/*_keep.jsonl
- config_name: kac_Latn
data_files:
- split: train
path: kac_Latn/*_keep.jsonl
- config_name: mdf_Cyrl
data_files:
- split: train
path: mdf_Cyrl/*_keep.jsonl
- config_name: mad_Latn
data_files:
- split: train
path: mad_Latn/*_keep.jsonl
- config_name: cab_Latn
data_files:
- split: train
path: cab_Latn/*_keep.jsonl
- config_name: srm_Latn
data_files:
- split: train
path: srm_Latn/*_keep.jsonl
- config_name: toj_Latn
data_files:
- split: train
path: toj_Latn/*_keep.jsonl
- config_name: zai_Latn
data_files:
- split: train
path: zai_Latn/*_keep.jsonl
- config_name: guc_Latn
data_files:
- split: train
path: guc_Latn/*_keep.jsonl
- config_name: smn_Latn
data_files:
- split: train
path: smn_Latn/*_keep.jsonl
- config_name: ote_Latn
data_files:
- split: train
path: ote_Latn/*_keep.jsonl
- config_name: nch_Latn
data_files:
- split: train
path: nch_Latn/*_keep.jsonl
- config_name: und_Bugi
data_files:
- split: train
path: und_Bugi/*_keep.jsonl
- config_name: prs_Arab
data_files:
- split: train
path: prs_Arab/*_keep.jsonl
- config_name: tuk_Arab
data_files:
- split: train
path: tuk_Arab/*_keep.jsonl
- config_name: bis_Latn
data_files:
- split: train
path: bis_Latn/*_keep.jsonl
- config_name: cuk_Latn
data_files:
- split: train
path: cuk_Latn/*_keep.jsonl
- config_name: bzj_Latn
data_files:
- split: train
path: bzj_Latn/*_keep.jsonl
- config_name: map_Latn
data_files:
- split: train
path: map_Latn/*_keep.jsonl
- config_name: djk_Latn
data_files:
- split: train
path: djk_Latn/*_keep.jsonl
- config_name: ncj_Latn
data_files:
- split: train
path: ncj_Latn/*_keep.jsonl
- config_name: und_Olck
data_files:
- split: train
path: und_Olck/*_keep.jsonl
- config_name: mco_Latn
data_files:
- split: train
path: mco_Latn/*_keep.jsonl
- config_name: shp_Latn
data_files:
- split: train
path: shp_Latn/*_keep.jsonl
- config_name: und_Tagb
data_files:
- split: train
path: und_Tagb/*_keep.jsonl
- config_name: ang_Latn
data_files:
- split: train
path: ang_Latn/*_keep.jsonl
- config_name: seh_Latn
data_files:
- split: train
path: seh_Latn/*_keep.jsonl
- config_name: ikk_Latn
data_files:
- split: train
path: ikk_Latn/*_keep.jsonl
- config_name: qvi_Latn
data_files:
- split: train
path: qvi_Latn/*_keep.jsonl
- config_name: yom_Latn
data_files:
- split: train
path: yom_Latn/*_keep.jsonl
- config_name: top_Latn
data_files:
- split: train
path: top_Latn/*_keep.jsonl
- config_name: awa_Deva
data_files:
- split: train
path: awa_Deva/*_keep.jsonl
- config_name: ike_Cans
data_files:
- split: train
path: ike_Cans/*_keep.jsonl
- config_name: mau_Latn
data_files:
- split: train
path: mau_Latn/*_keep.jsonl
- config_name: kum_Cyrl
data_files:
- split: train
path: kum_Cyrl/*_keep.jsonl
- config_name: mos_Latn
data_files:
- split: train
path: mos_Latn/*_keep.jsonl
- config_name: tly_Latn
data_files:
- split: train
path: tly_Latn/*_keep.jsonl
- config_name: und_Prti
data_files:
- split: train
path: und_Prti/*_keep.jsonl
- config_name: ayr_Latn
data_files:
- split: train
path: ayr_Latn/*_keep.jsonl
- config_name: tdt_Latn
data_files:
- split: train
path: tdt_Latn/*_keep.jsonl
- config_name: yap_Latn
data_files:
- split: train
path: yap_Latn/*_keep.jsonl
- config_name: evn_Cyrl
data_files:
- split: train
path: evn_Cyrl/*_keep.jsonl
- config_name: tah_Latn
data_files:
- split: train
path: tah_Latn/*_keep.jsonl
- config_name: xal_Cyrl
data_files:
- split: train
path: xal_Cyrl/*_keep.jsonl
- config_name: szy_Latn
data_files:
- split: train
path: szy_Latn/*_keep.jsonl
- config_name: quw_Latn
data_files:
- split: train
path: quw_Latn/*_keep.jsonl
- config_name: stq_Latn
data_files:
- split: train
path: stq_Latn/*_keep.jsonl
- config_name: tlh_Latn
data_files:
- split: train
path: tlh_Latn/*_keep.jsonl
- config_name: otq_Latn
data_files:
- split: train
path: otq_Latn/*_keep.jsonl
- config_name: und_Narb
data_files:
- split: train
path: und_Narb/*_keep.jsonl
- config_name: nov_Latn
data_files:
- split: train
path: nov_Latn/*_keep.jsonl
- config_name: skr_Arab
data_files:
- split: train
path: skr_Arab/*_keep.jsonl
- config_name: yua_Latn
data_files:
- split: train
path: yua_Latn/*_keep.jsonl
- config_name: raw_Latn
data_files:
- split: train
path: raw_Latn/*_keep.jsonl
- config_name: und_Sarb
data_files:
- split: train
path: und_Sarb/*_keep.jsonl
- config_name: fiu_Latn
data_files:
- split: train
path: fiu_Latn/*_keep.jsonl
- config_name: enm_Latn
data_files:
- split: train
path: enm_Latn/*_keep.jsonl
- config_name: bbc_Latn
data_files:
- split: train
path: bbc_Latn/*_keep.jsonl
- config_name: und_Lydi
data_files:
- split: train
path: und_Lydi/*_keep.jsonl
- config_name: non_Latn
data_files:
- split: train
path: non_Latn/*_keep.jsonl
- config_name: und_Ugar
data_files:
- split: train
path: und_Ugar/*_keep.jsonl
- config_name: ber_Latn
data_files:
- split: train
path: ber_Latn/*_keep.jsonl
- config_name: kea_Latn
data_files:
- split: train
path: kea_Latn/*_keep.jsonl
- config_name: uig_Latn
data_files:
- split: train
path: uig_Latn/*_keep.jsonl
- config_name: bua_Cyrl
data_files:
- split: train
path: bua_Cyrl/*_keep.jsonl
- config_name: kua_Latn
data_files:
- split: train
path: kua_Latn/*_keep.jsonl
- config_name: zza_Latn
data_files:
- split: train
path: zza_Latn/*_keep.jsonl
- config_name: csy_Latn
data_files:
- split: train
path: csy_Latn/*_keep.jsonl
- config_name: mnw_Mymr
data_files:
- split: train
path: mnw_Mymr/*_keep.jsonl
- config_name: dtp_Latn
data_files:
- split: train
path: dtp_Latn/*_keep.jsonl
- config_name: nah_Latn
data_files:
- split: train
path: nah_Latn/*_keep.jsonl
- config_name: yao_Latn
data_files:
- split: train
path: yao_Latn/*_keep.jsonl
- config_name: azj_Cyrl
data_files:
- split: train
path: azj_Cyrl/*_keep.jsonl
- config_name: hau_Arab
data_files:
- split: train
path: hau_Arab/*_keep.jsonl
- config_name: nog_Cyrl
data_files:
- split: train
path: nog_Cyrl/*_keep.jsonl
- config_name: abt_Latn
data_files:
- split: train
path: abt_Latn/*_keep.jsonl
- config_name: meo_Latn
data_files:
- split: train
path: meo_Latn/*_keep.jsonl
- config_name: ame_Latn
data_files:
- split: train
path: ame_Latn/*_keep.jsonl
- config_name: mbf_Latn
data_files:
- split: train
path: mbf_Latn/*_keep.jsonl
- config_name: chm_Cyrl
data_files:
- split: train
path: chm_Cyrl/*_keep.jsonl
- config_name: arb_Latn
data_files:
- split: train
path: arb_Latn/*_keep.jsonl
- config_name: kas_Arab
data_files:
- split: train
path: kas_Arab/*_keep.jsonl
- config_name: tam_Latn
data_files:
- split: train
path: tam_Latn/*_keep.jsonl
- config_name: crs_Latn
data_files:
- split: train
path: crs_Latn/*_keep.jsonl
- config_name: tcy_Knda
data_files:
- split: train
path: tcy_Knda/*_keep.jsonl
- config_name: lbe_Cyrl
data_files:
- split: train
path: lbe_Cyrl/*_keep.jsonl
- config_name: cni_Latn
data_files:
- split: train
path: cni_Latn/*_keep.jsonl
- config_name: ben_Latn
data_files:
- split: train
path: ben_Latn/*_keep.jsonl
- config_name: rom_Latn
data_files:
- split: train
path: rom_Latn/*_keep.jsonl
- config_name: zom_Latn
data_files:
- split: train
path: zom_Latn/*_keep.jsonl
- config_name: blk_Mymr
data_files:
- split: train
path: blk_Mymr/*_keep.jsonl
- config_name: efi_Latn
data_files:
- split: train
path: efi_Latn/*_keep.jsonl
- config_name: ada_Latn
data_files:
- split: train
path: ada_Latn/*_keep.jsonl
- config_name: und_Buhd
data_files:
- split: train
path: und_Buhd/*_keep.jsonl
- config_name: koi_Cyrl
data_files:
- split: train
path: koi_Cyrl/*_keep.jsonl
- config_name: und_Perm
data_files:
- split: train
path: und_Perm/*_keep.jsonl
- config_name: bbj_Latn
data_files:
- split: train
path: bbj_Latn/*_keep.jsonl
- config_name: trv_Latn
data_files:
- split: train
path: trv_Latn/*_keep.jsonl
- config_name: ach_Latn
data_files:
- split: train
path: ach_Latn/*_keep.jsonl
- config_name: mgh_Latn
data_files:
- split: train
path: mgh_Latn/*_keep.jsonl
- config_name: kas_Latn
data_files:
- split: train
path: kas_Latn/*_keep.jsonl
- config_name: gcr_Latn
data_files:
- split: train
path: gcr_Latn/*_keep.jsonl
- config_name: krl_Latn
data_files:
- split: train
path: krl_Latn/*_keep.jsonl
- config_name: iso_Latn
data_files:
- split: train
path: iso_Latn/*_keep.jsonl
- config_name: lki_Arab
data_files:
- split: train
path: lki_Arab/*_keep.jsonl
- config_name: atj_Latn
data_files:
- split: train
path: atj_Latn/*_keep.jsonl
- config_name: cmo_Latn
data_files:
- split: train
path: cmo_Latn/*_keep.jsonl
- config_name: chu_Cyrl
data_files:
- split: train
path: chu_Cyrl/*_keep.jsonl
- config_name: gur_Latn
data_files:
- split: train
path: gur_Latn/*_keep.jsonl
- config_name: mni_Beng
data_files:
- split: train
path: mni_Beng/*_keep.jsonl
- config_name: iku_Cans
data_files:
- split: train
path: iku_Cans/*_keep.jsonl
- config_name: sma_Latn
data_files:
- split: train
path: sma_Latn/*_keep.jsonl
- config_name: lrc_Arab
data_files:
- split: train
path: lrc_Arab/*_keep.jsonl
- config_name: gos_Latn
data_files:
- split: train
path: gos_Latn/*_keep.jsonl
- config_name: dty_Deva
data_files:
- split: train
path: dty_Deva/*_keep.jsonl
- config_name: nzi_Latn
data_files:
- split: train
path: nzi_Latn/*_keep.jsonl
- config_name: btx_Latn
data_files:
- split: train
path: btx_Latn/*_keep.jsonl
- config_name: brx_Deva
data_files:
- split: train
path: brx_Deva/*_keep.jsonl
- config_name: bts_Latn
data_files:
- split: train
path: bts_Latn/*_keep.jsonl
- config_name: nyu_Latn
data_files:
- split: train
path: nyu_Latn/*_keep.jsonl
- config_name: ckt_Latn
data_files:
- split: train
path: ckt_Latn/*_keep.jsonl
- config_name: guw_Latn
data_files:
- split: train
path: guw_Latn/*_keep.jsonl
- config_name: pck_Latn
data_files:
- split: train
path: pck_Latn/*_keep.jsonl
- config_name: quy_Latn
data_files:
- split: train
path: quy_Latn/*_keep.jsonl
- config_name: gcf_Latn
data_files:
- split: train
path: gcf_Latn/*_keep.jsonl
- config_name: ksw_Mymr
data_files:
- split: train
path: ksw_Mymr/*_keep.jsonl
- config_name: nia_Latn
data_files:
- split: train
path: nia_Latn/*_keep.jsonl
- config_name: bal_Arab
data_files:
- split: train
path: bal_Arab/*_keep.jsonl
- config_name: nhe_Latn
data_files:
- split: train
path: nhe_Latn/*_keep.jsonl
- config_name: hwc_Latn
data_files:
- split: train
path: hwc_Latn/*_keep.jsonl
- config_name: mup_Deva
data_files:
- split: train
path: mup_Deva/*_keep.jsonl
- config_name: dyu_Latn
data_files:
- split: train
path: dyu_Latn/*_keep.jsonl
- config_name: ami_Latn
data_files:
- split: train
path: ami_Latn/*_keep.jsonl
- config_name: tay_Latn
data_files:
- split: train
path: tay_Latn/*_keep.jsonl
- config_name: gaa_Latn
data_files:
- split: train
path: gaa_Latn/*_keep.jsonl
- config_name: loz_Latn
data_files:
- split: train
path: loz_Latn/*_keep.jsonl
- config_name: abq_Cyrl
data_files:
- split: train
path: abq_Cyrl/*_keep.jsonl
- config_name: pnt_Grek
data_files:
- split: train
path: pnt_Grek/*_keep.jsonl
- config_name: snd_Latn
data_files:
- split: train
path: snd_Latn/*_keep.jsonl
- config_name: ium_Latn
data_files:
- split: train
path: ium_Latn/*_keep.jsonl
- config_name: abs_Latn
data_files:
- split: train
path: abs_Latn/*_keep.jsonl
- config_name: cmr_Latn
data_files:
- split: train
path: cmr_Latn/*_keep.jsonl
- config_name: shi_Latn
data_files:
- split: train
path: shi_Latn/*_keep.jsonl
- config_name: tzm_Tfng
data_files:
- split: train
path: tzm_Tfng/*_keep.jsonl
- config_name: gil_Latn
data_files:
- split: train
path: gil_Latn/*_keep.jsonl
- config_name: ewo_Latn
data_files:
- split: train
path: ewo_Latn/*_keep.jsonl
- config_name: kon_Latn
data_files:
- split: train
path: kon_Latn/*_keep.jsonl
- config_name: sms_Latn
data_files:
- split: train
path: sms_Latn/*_keep.jsonl
- config_name: ape_Latn
data_files:
- split: train
path: ape_Latn/*_keep.jsonl
- config_name: tzh_Latn
data_files:
- split: train
path: tzh_Latn/*_keep.jsonl
- config_name: bci_Latn
data_files:
- split: train
path: bci_Latn/*_keep.jsonl
- config_name: acd_Latn
data_files:
- split: train
path: acd_Latn/*_keep.jsonl
- config_name: tab_Cyrl
data_files:
- split: train
path: tab_Cyrl/*_keep.jsonl
- config_name: fuv_Latn
data_files:
- split: train
path: fuv_Latn/*_keep.jsonl
- config_name: swc_Latn
data_files:
- split: train
path: swc_Latn/*_keep.jsonl
- config_name: tcz_Latn
data_files:
- split: train
path: tcz_Latn/*_keep.jsonl
- config_name: und_Elym
data_files:
- split: train
path: und_Elym/*_keep.jsonl
- config_name: sgc_Latn
data_files:
- split: train
path: sgc_Latn/*_keep.jsonl
- config_name: aaz_Latn
data_files:
- split: train
path: aaz_Latn/*_keep.jsonl
- config_name: tly_Arab
data_files:
- split: train
path: tly_Arab/*_keep.jsonl
- config_name: tvl_Latn
data_files:
- split: train
path: tvl_Latn/*_keep.jsonl
- config_name: zha_Latn
data_files:
- split: train
path: zha_Latn/*_keep.jsonl
- config_name: pib_Latn
data_files:
- split: train
path: pib_Latn/*_keep.jsonl
- config_name: pli_Deva
data_files:
- split: train
path: pli_Deva/*_keep.jsonl
- config_name: bru_Latn
data_files:
- split: train
path: bru_Latn/*_keep.jsonl
- config_name: quz_Latn
data_files:
- split: train
path: quz_Latn/*_keep.jsonl
- config_name: cha_Latn
data_files:
- split: train
path: cha_Latn/*_keep.jsonl
- config_name: aeu_Latn
data_files:
- split: train
path: aeu_Latn/*_keep.jsonl
- config_name: bjn_Arab
data_files:
- split: train
path: bjn_Arab/*_keep.jsonl
- config_name: nqo_Nkoo
data_files:
- split: train
path: nqo_Nkoo/*_keep.jsonl
- config_name: mer_Latn
data_files:
- split: train
path: mer_Latn/*_keep.jsonl
- config_name: acf_Latn
data_files:
- split: train
path: acf_Latn/*_keep.jsonl
- config_name: rmc_Latn
data_files:
- split: train
path: rmc_Latn/*_keep.jsonl
- config_name: msi_Latn
data_files:
- split: train
path: msi_Latn/*_keep.jsonl
- config_name: smj_Latn
data_files:
- split: train
path: smj_Latn/*_keep.jsonl
- config_name: kng_Latn
data_files:
- split: train
path: kng_Latn/*_keep.jsonl
- config_name: dhv_Latn
data_files:
- split: train
path: dhv_Latn/*_keep.jsonl
- config_name: doi_Deva
data_files:
- split: train
path: doi_Deva/*_keep.jsonl
- config_name: zap_Latn
data_files:
- split: train
path: zap_Latn/*_keep.jsonl
- config_name: hla_Latn
data_files:
- split: train
path: hla_Latn/*_keep.jsonl
- config_name: cac_Latn
data_files:
- split: train
path: cac_Latn/*_keep.jsonl
- config_name: pdt_Latn
data_files:
- split: train
path: pdt_Latn/*_keep.jsonl
- config_name: mbt_Latn
data_files:
- split: train
path: mbt_Latn/*_keep.jsonl
- config_name: taq_Latn
data_files:
- split: train
path: taq_Latn/*_keep.jsonl
- config_name: bgp_Latn
data_files:
- split: train
path: bgp_Latn/*_keep.jsonl
- config_name: tyz_Latn
data_files:
- split: train
path: tyz_Latn/*_keep.jsonl
- config_name: aau_Latn
data_files:
- split: train
path: aau_Latn/*_keep.jsonl
- config_name: mah_Latn
data_files:
- split: train
path: mah_Latn/*_keep.jsonl
- config_name: nak_Latn
data_files:
- split: train
path: nak_Latn/*_keep.jsonl
- config_name: kmb_Latn
data_files:
- split: train
path: kmb_Latn/*_keep.jsonl
- config_name: mjw_Latn
data_files:
- split: train
path: mjw_Latn/*_keep.jsonl
- config_name: fuf_Latn
data_files:
- split: train
path: fuf_Latn/*_keep.jsonl
- config_name: bba_Latn
data_files:
- split: train
path: bba_Latn/*_keep.jsonl
- config_name: ron_Cyrl
data_files:
- split: train
path: ron_Cyrl/*_keep.jsonl
- config_name: amu_Latn
data_files:
- split: train
path: amu_Latn/*_keep.jsonl
- config_name: alz_Latn
data_files:
- split: train
path: alz_Latn/*_keep.jsonl
- config_name: cop_Copt
data_files:
- split: train
path: cop_Copt/*_keep.jsonl
- config_name: rar_Latn
data_files:
- split: train
path: rar_Latn/*_keep.jsonl
- config_name: emp_Latn
data_files:
- split: train
path: emp_Latn/*_keep.jsonl
- config_name: arc_Syrc
data_files:
- split: train
path: arc_Syrc/*_keep.jsonl
- config_name: nyn_Latn
data_files:
- split: train
path: nyn_Latn/*_keep.jsonl
- config_name: rut_Cyrl
data_files:
- split: train
path: rut_Cyrl/*_keep.jsonl
- config_name: lub_Latn
data_files:
- split: train
path: lub_Latn/*_keep.jsonl
- config_name: tzj_Latn
data_files:
- split: train
path: tzj_Latn/*_keep.jsonl
- config_name: fat_Latn
data_files:
- split: train
path: fat_Latn/*_keep.jsonl
- config_name: lzh_Hani
data_files:
- split: train
path: lzh_Hani/*_keep.jsonl
- config_name: aak_Latn
data_files:
- split: train
path: aak_Latn/*_keep.jsonl
- config_name: nnb_Latn
data_files:
- split: train
path: nnb_Latn/*_keep.jsonl
- config_name: lhu_Latn
data_files:
- split: train
path: lhu_Latn/*_keep.jsonl
- config_name: bjv_Latn
data_files:
- split: train
path: bjv_Latn/*_keep.jsonl
- config_name: bum_Latn
data_files:
- split: train
path: bum_Latn/*_keep.jsonl
- config_name: maq_Latn
data_files:
- split: train
path: maq_Latn/*_keep.jsonl
- config_name: roa_Latn
data_files:
- split: train
path: roa_Latn/*_keep.jsonl
- config_name: bvr_Latn
data_files:
- split: train
path: bvr_Latn/*_keep.jsonl
- config_name: kcg_Latn
data_files:
- split: train
path: kcg_Latn/*_keep.jsonl
- config_name: tok_Latn
data_files:
- split: train
path: tok_Latn/*_keep.jsonl
- config_name: yrk_Cyrl
data_files:
- split: train
path: yrk_Cyrl/*_keep.jsonl
- config_name: khs_Latn
data_files:
- split: train
path: khs_Latn/*_keep.jsonl
- config_name: rup_Latn
data_files:
- split: train
path: rup_Latn/*_keep.jsonl
- config_name: tuk_Cyrl
data_files:
- split: train
path: tuk_Cyrl/*_keep.jsonl
- config_name: qub_Latn
data_files:
- split: train
path: qub_Latn/*_keep.jsonl
- config_name: jac_Latn
data_files:
- split: train
path: jac_Latn/*_keep.jsonl
- config_name: hmr_Latn
data_files:
- split: train
path: hmr_Latn/*_keep.jsonl
- config_name: sid_Latn
data_files:
- split: train
path: sid_Latn/*_keep.jsonl
- config_name: chr_Cher
data_files:
- split: train
path: chr_Cher/*_keep.jsonl
- config_name: amp_Latn
data_files:
- split: train
path: amp_Latn/*_keep.jsonl
- config_name: qug_Latn
data_files:
- split: train
path: qug_Latn/*_keep.jsonl
- config_name: mas_Latn
data_files:
- split: train
path: mas_Latn/*_keep.jsonl
- config_name: fkv_Latn
data_files:
- split: train
path: fkv_Latn/*_keep.jsonl
- config_name: mal_Latn
data_files:
- split: train
path: mal_Latn/*_keep.jsonl
- config_name: bem_Latn
data_files:
- split: train
path: bem_Latn/*_keep.jsonl
- config_name: cav_Latn
data_files:
- split: train
path: cav_Latn/*_keep.jsonl
- config_name: her_Latn
data_files:
- split: train
path: her_Latn/*_keep.jsonl
- config_name: kwn_Latn
data_files:
- split: train
path: kwn_Latn/*_keep.jsonl
- config_name: ify_Latn
data_files:
- split: train
path: ify_Latn/*_keep.jsonl
- config_name: asm_Latn
data_files:
- split: train
path: asm_Latn/*_keep.jsonl
- config_name: gpe_Latn
data_files:
- split: train
path: gpe_Latn/*_keep.jsonl
- config_name: zpa_Latn
data_files:
- split: train
path: zpa_Latn/*_keep.jsonl
- config_name: agu_Latn
data_files:
- split: train
path: agu_Latn/*_keep.jsonl
- config_name: ppk_Latn
data_files:
- split: train
path: ppk_Latn/*_keep.jsonl
- config_name: tiv_Latn
data_files:
- split: train
path: tiv_Latn/*_keep.jsonl
- config_name: npi_Latn
data_files:
- split: train
path: npi_Latn/*_keep.jsonl
- config_name: ahk_Latn
data_files:
- split: train
path: ahk_Latn/*_keep.jsonl
- config_name: pis_Latn
data_files:
- split: train
path: pis_Latn/*_keep.jsonl
- config_name: hns_Latn
data_files:
- split: train
path: hns_Latn/*_keep.jsonl
- config_name: sus_Latn
data_files:
- split: train
path: sus_Latn/*_keep.jsonl
- config_name: hak_Hani
data_files:
- split: train
path: hak_Hani/*_keep.jsonl
- config_name: bon_Latn
data_files:
- split: train
path: bon_Latn/*_keep.jsonl
- config_name: sja_Latn
data_files:
- split: train
path: sja_Latn/*_keep.jsonl
- config_name: kas_Deva
data_files:
- split: train
path: kas_Deva/*_keep.jsonl
- config_name: mar_Latn
data_files:
- split: train
path: mar_Latn/*_keep.jsonl
- config_name: kri_Latn
data_files:
- split: train
path: kri_Latn/*_keep.jsonl
- config_name: dik_Latn
data_files:
- split: train
path: dik_Latn/*_keep.jsonl
- config_name: wrk_Latn
data_files:
- split: train
path: wrk_Latn/*_keep.jsonl
- config_name: got_Goth
data_files:
- split: train
path: got_Goth/*_keep.jsonl
- config_name: dar_Cyrl
data_files:
- split: train
path: dar_Cyrl/*_keep.jsonl
- config_name: gui_Latn
data_files:
- split: train
path: gui_Latn/*_keep.jsonl
- config_name: mak_Latn
data_files:
- split: train
path: mak_Latn/*_keep.jsonl
- config_name: jiv_Latn
data_files:
- split: train
path: jiv_Latn/*_keep.jsonl
- config_name: kjb_Latn
data_files:
- split: train
path: kjb_Latn/*_keep.jsonl
- config_name: maz_Latn
data_files:
- split: train
path: maz_Latn/*_keep.jsonl
- config_name: bsn_Latn
data_files:
- split: train
path: bsn_Latn/*_keep.jsonl
- config_name: dov_Latn
data_files:
- split: train
path: dov_Latn/*_keep.jsonl
- config_name: cok_Latn
data_files:
- split: train
path: cok_Latn/*_keep.jsonl
- config_name: rwo_Latn
data_files:
- split: train
path: rwo_Latn/*_keep.jsonl
- config_name: mag_Deva
data_files:
- split: train
path: mag_Deva/*_keep.jsonl
- config_name: hnj_Latn
data_files:
- split: train
path: hnj_Latn/*_keep.jsonl
- config_name: krj_Latn
data_files:
- split: train
path: krj_Latn/*_keep.jsonl
- config_name: teo_Latn
data_files:
- split: train
path: teo_Latn/*_keep.jsonl
- config_name: enq_Latn
data_files:
- split: train
path: enq_Latn/*_keep.jsonl
- config_name: aom_Latn
data_files:
- split: train
path: aom_Latn/*_keep.jsonl
- config_name: ffm_Latn
data_files:
- split: train
path: ffm_Latn/*_keep.jsonl
- config_name: din_Latn
data_files:
- split: train
path: din_Latn/*_keep.jsonl
- config_name: ubu_Latn
data_files:
- split: train
path: ubu_Latn/*_keep.jsonl
- config_name: pwn_Latn
data_files:
- split: train
path: pwn_Latn/*_keep.jsonl
- config_name: gum_Latn
data_files:
- split: train
path: gum_Latn/*_keep.jsonl
- config_name: sny_Latn
data_files:
- split: train
path: sny_Latn/*_keep.jsonl
- config_name: twu_Latn
data_files:
- split: train
path: twu_Latn/*_keep.jsonl
- config_name: med_Latn
data_files:
- split: train
path: med_Latn/*_keep.jsonl
- config_name: acn_Latn
data_files:
- split: train
path: acn_Latn/*_keep.jsonl
- config_name: rme_Latn
data_files:
- split: train
path: rme_Latn/*_keep.jsonl
- config_name: cbs_Latn
data_files:
- split: train
path: cbs_Latn/*_keep.jsonl
- config_name: abx_Latn
data_files:
- split: train
path: abx_Latn/*_keep.jsonl
- config_name: ndc_Latn
data_files:
- split: train
path: ndc_Latn/*_keep.jsonl
- config_name: ibb_Latn
data_files:
- split: train
path: ibb_Latn/*_keep.jsonl
- config_name: pih_Latn
data_files:
- split: train
path: pih_Latn/*_keep.jsonl
- config_name: zty_Latn
data_files:
- split: train
path: zty_Latn/*_keep.jsonl
- config_name: rad_Latn
data_files:
- split: train
path: rad_Latn/*_keep.jsonl
- config_name: aby_Latn
data_files:
- split: train
path: aby_Latn/*_keep.jsonl
- config_name: zne_Latn
data_files:
- split: train
path: zne_Latn/*_keep.jsonl
- config_name: cui_Latn
data_files:
- split: train
path: cui_Latn/*_keep.jsonl
- config_name: lua_Latn
data_files:
- split: train
path: lua_Latn/*_keep.jsonl
- config_name: yby_Latn
data_files:
- split: train
path: yby_Latn/*_keep.jsonl
- config_name: bno_Latn
data_files:
- split: train
path: bno_Latn/*_keep.jsonl
- config_name: niu_Latn
data_files:
- split: train
path: niu_Latn/*_keep.jsonl
- config_name: agd_Latn
data_files:
- split: train
path: agd_Latn/*_keep.jsonl
- config_name: nij_Latn
data_files:
- split: train
path: nij_Latn/*_keep.jsonl
- config_name: agg_Latn
data_files:
- split: train
path: agg_Latn/*_keep.jsonl
- config_name: yre_Latn
data_files:
- split: train
path: yre_Latn/*_keep.jsonl
- config_name: fip_Latn
data_files:
- split: train
path: fip_Latn/*_keep.jsonl
- config_name: qve_Latn
data_files:
- split: train
path: qve_Latn/*_keep.jsonl
- config_name: tsg_Latn
data_files:
- split: train
path: tsg_Latn/*_keep.jsonl
- config_name: skg_Latn
data_files:
- split: train
path: skg_Latn/*_keep.jsonl
- config_name: adj_Latn
data_files:
- split: train
path: adj_Latn/*_keep.jsonl
- config_name: mfq_Latn
data_files:
- split: train
path: mfq_Latn/*_keep.jsonl
- config_name: agx_Cyrl
data_files:
- split: train
path: agx_Cyrl/*_keep.jsonl
- config_name: kmg_Latn
data_files:
- split: train
path: kmg_Latn/*_keep.jsonl
- config_name: umb_Latn
data_files:
- split: train
path: umb_Latn/*_keep.jsonl
- config_name: cnk_Latn
data_files:
- split: train
path: cnk_Latn/*_keep.jsonl
- config_name: nhw_Latn
data_files:
- split: train
path: nhw_Latn/*_keep.jsonl
- config_name: maa_Latn
data_files:
- split: train
path: maa_Latn/*_keep.jsonl
- config_name: kwy_Latn
data_files:
- split: train
path: kwy_Latn/*_keep.jsonl
- config_name: jra_Latn
data_files:
- split: train
path: jra_Latn/*_keep.jsonl
- config_name: syr_Syrc
data_files:
- split: train
path: syr_Syrc/*_keep.jsonl
- config_name: inb_Latn
data_files:
- split: train
path: inb_Latn/*_keep.jsonl
- config_name: pau_Latn
data_files:
- split: train
path: pau_Latn/*_keep.jsonl
- config_name: xsm_Latn
data_files:
- split: train
path: xsm_Latn/*_keep.jsonl
- config_name: vap_Latn
data_files:
- split: train
path: vap_Latn/*_keep.jsonl
- config_name: alp_Latn
data_files:
- split: train
path: alp_Latn/*_keep.jsonl
- config_name: des_Latn
data_files:
- split: train
path: des_Latn/*_keep.jsonl
- config_name: syl_Latn
data_files:
- split: train
path: syl_Latn/*_keep.jsonl
- config_name: kmr_Cyrl
data_files:
- split: train
path: kmr_Cyrl/*_keep.jsonl
- config_name: mkn_Latn
data_files:
- split: train
path: mkn_Latn/*_keep.jsonl
- config_name: gan_Hani
data_files:
- split: train
path: gan_Hani/*_keep.jsonl
- config_name: qwh_Latn
data_files:
- split: train
path: qwh_Latn/*_keep.jsonl
- config_name: jvn_Latn
data_files:
- split: train
path: jvn_Latn/*_keep.jsonl
- config_name: bib_Latn
data_files:
- split: train
path: bib_Latn/*_keep.jsonl
- config_name: ckt_Cyrl
data_files:
- split: train
path: ckt_Cyrl/*_keep.jsonl
- config_name: mpx_Latn
data_files:
- split: train
path: mpx_Latn/*_keep.jsonl
- config_name: bin_Latn
data_files:
- split: train
path: bin_Latn/*_keep.jsonl
- config_name: klv_Latn
data_files:
- split: train
path: klv_Latn/*_keep.jsonl
- config_name: xon_Latn
data_files:
- split: train
path: xon_Latn/*_keep.jsonl
- config_name: jbu_Latn
data_files:
- split: train
path: jbu_Latn/*_keep.jsonl
- config_name: bdh_Latn
data_files:
- split: train
path: bdh_Latn/*_keep.jsonl
- config_name: knj_Latn
data_files:
- split: train
path: knj_Latn/*_keep.jsonl
- config_name: ptu_Latn
data_files:
- split: train
path: ptu_Latn/*_keep.jsonl
- config_name: mmn_Latn
data_files:
- split: train
path: mmn_Latn/*_keep.jsonl
- config_name: mwq_Latn
data_files:
- split: train
path: mwq_Latn/*_keep.jsonl
- config_name: sop_Latn
data_files:
- split: train
path: sop_Latn/*_keep.jsonl
- config_name: cgc_Latn
data_files:
- split: train
path: cgc_Latn/*_keep.jsonl
- config_name: rmy_Cyrl
data_files:
- split: train
path: rmy_Cyrl/*_keep.jsonl
- config_name: dop_Latn
data_files:
- split: train
path: dop_Latn/*_keep.jsonl
- config_name: mgr_Latn
data_files:
- split: train
path: mgr_Latn/*_keep.jsonl
- config_name: rmo_Latn
data_files:
- split: train
path: rmo_Latn/*_keep.jsonl
- config_name: tbc_Latn
data_files:
- split: train
path: tbc_Latn/*_keep.jsonl
- config_name: kyq_Latn
data_files:
- split: train
path: kyq_Latn/*_keep.jsonl
- config_name: zpu_Latn
data_files:
- split: train
path: zpu_Latn/*_keep.jsonl
- config_name: kkc_Latn
data_files:
- split: train
path: kkc_Latn/*_keep.jsonl
- config_name: acu_Latn
data_files:
- split: train
path: acu_Latn/*_keep.jsonl
- config_name: ata_Latn
data_files:
- split: train
path: ata_Latn/*_keep.jsonl
- config_name: gag_Cyrl
data_files:
- split: train
path: gag_Cyrl/*_keep.jsonl
- config_name: kan_Latn
data_files:
- split: train
path: kan_Latn/*_keep.jsonl
- config_name: mif_Latn
data_files:
- split: train
path: mif_Latn/*_keep.jsonl
- config_name: sus_Arab
data_files:
- split: train
path: sus_Arab/*_keep.jsonl
- config_name: tsz_Latn
data_files:
- split: train
path: tsz_Latn/*_keep.jsonl
- config_name: mbi_Latn
data_files:
- split: train
path: mbi_Latn/*_keep.jsonl
- config_name: imo_Latn
data_files:
- split: train
path: imo_Latn/*_keep.jsonl
- config_name: fuh_Latn
data_files:
- split: train
path: fuh_Latn/*_keep.jsonl
- config_name: zyp_Latn
data_files:
- split: train
path: zyp_Latn/*_keep.jsonl
- config_name: apr_Latn
data_files:
- split: train
path: apr_Latn/*_keep.jsonl
- config_name: att_Latn
data_files:
- split: train
path: att_Latn/*_keep.jsonl
- config_name: bus_Latn
data_files:
- split: train
path: bus_Latn/*_keep.jsonl
- config_name: kqp_Latn
data_files:
- split: train
path: kqp_Latn/*_keep.jsonl
- config_name: koo_Latn
data_files:
- split: train
path: koo_Latn/*_keep.jsonl
- config_name: bas_Latn
data_files:
- split: train
path: bas_Latn/*_keep.jsonl
- config_name: urh_Latn
data_files:
- split: train
path: urh_Latn/*_keep.jsonl
- config_name: bbr_Latn
data_files:
- split: train
path: bbr_Latn/*_keep.jsonl
- config_name: msy_Latn
data_files:
- split: train
path: msy_Latn/*_keep.jsonl
- config_name: mdy_Ethi
data_files:
- split: train
path: mdy_Ethi/*_keep.jsonl
- config_name: nas_Latn
data_files:
- split: train
path: nas_Latn/*_keep.jsonl
- config_name: lee_Latn
data_files:
- split: train
path: lee_Latn/*_keep.jsonl
- config_name: mcu_Latn
data_files:
- split: train
path: mcu_Latn/*_keep.jsonl
- config_name: mrj_Latn
data_files:
- split: train
path: mrj_Latn/*_keep.jsonl
- config_name: dig_Latn
data_files:
- split: train
path: dig_Latn/*_keep.jsonl
- config_name: izr_Latn
data_files:
- split: train
path: izr_Latn/*_keep.jsonl
- config_name: nfr_Latn
data_files:
- split: train
path: nfr_Latn/*_keep.jsonl
- config_name: hto_Latn
data_files:
- split: train
path: hto_Latn/*_keep.jsonl
- config_name: mzw_Latn
data_files:
- split: train
path: mzw_Latn/*_keep.jsonl
- config_name: qxh_Latn
data_files:
- split: train
path: qxh_Latn/*_keep.jsonl
- config_name: chz_Latn
data_files:
- split: train
path: chz_Latn/*_keep.jsonl
- config_name: guh_Latn
data_files:
- split: train
path: guh_Latn/*_keep.jsonl
- config_name: ded_Latn
data_files:
- split: train
path: ded_Latn/*_keep.jsonl
- config_name: bvz_Latn
data_files:
- split: train
path: bvz_Latn/*_keep.jsonl
- config_name: kkj_Latn
data_files:
- split: train
path: kkj_Latn/*_keep.jsonl
- config_name: gux_Latn
data_files:
- split: train
path: gux_Latn/*_keep.jsonl
- config_name: ikt_Latn
data_files:
- split: train
path: ikt_Latn/*_keep.jsonl
- config_name: bnp_Latn
data_files:
- split: train
path: bnp_Latn/*_keep.jsonl
- config_name: bgt_Latn
data_files:
- split: train
path: bgt_Latn/*_keep.jsonl
- config_name: quf_Latn
data_files:
- split: train
path: quf_Latn/*_keep.jsonl
- config_name: dwr_Latn
data_files:
- split: train
path: dwr_Latn/*_keep.jsonl
- config_name: nod_Thai
data_files:
- split: train
path: nod_Thai/*_keep.jsonl
- config_name: yal_Latn
data_files:
- split: train
path: yal_Latn/*_keep.jsonl
- config_name: bmr_Latn
data_files:
- split: train
path: bmr_Latn/*_keep.jsonl
- config_name: ljp_Latn
data_files:
- split: train
path: ljp_Latn/*_keep.jsonl
- config_name: xla_Latn
data_files:
- split: train
path: xla_Latn/*_keep.jsonl
- config_name: zyb_Latn
data_files:
- split: train
path: zyb_Latn/*_keep.jsonl
- config_name: gof_Latn
data_files:
- split: train
path: gof_Latn/*_keep.jsonl
- config_name: niv_Cyrl
data_files:
- split: train
path: niv_Cyrl/*_keep.jsonl
- config_name: nrf_Latn
data_files:
- split: train
path: nrf_Latn/*_keep.jsonl
- config_name: npy_Latn
data_files:
- split: train
path: npy_Latn/*_keep.jsonl
- config_name: ura_Latn
data_files:
- split: train
path: ura_Latn/*_keep.jsonl
- config_name: dob_Latn
data_files:
- split: train
path: dob_Latn/*_keep.jsonl
- config_name: gfk_Latn
data_files:
- split: train
path: gfk_Latn/*_keep.jsonl
- config_name: gdr_Latn
data_files:
- split: train
path: gdr_Latn/*_keep.jsonl
- config_name: sdc_Latn
data_files:
- split: train
path: sdc_Latn/*_keep.jsonl
- config_name: pjt_Latn
data_files:
- split: train
path: pjt_Latn/*_keep.jsonl
- config_name: spp_Latn
data_files:
- split: train
path: spp_Latn/*_keep.jsonl
- config_name: irk_Latn
data_files:
- split: train
path: irk_Latn/*_keep.jsonl
- config_name: cpa_Latn
data_files:
- split: train
path: cpa_Latn/*_keep.jsonl
- config_name: knc_Latn
data_files:
- split: train
path: knc_Latn/*_keep.jsonl
- config_name: gvl_Latn
data_files:
- split: train
path: gvl_Latn/*_keep.jsonl
- config_name: agr_Latn
data_files:
- split: train
path: agr_Latn/*_keep.jsonl
- config_name: njo_Latn
data_files:
- split: train
path: njo_Latn/*_keep.jsonl
- config_name: xrb_Latn
data_files:
- split: train
path: xrb_Latn/*_keep.jsonl
- config_name: kao_Latn
data_files:
- split: train
path: kao_Latn/*_keep.jsonl
- config_name: zpz_Latn
data_files:
- split: train
path: zpz_Latn/*_keep.jsonl
- config_name: bgr_Latn
data_files:
- split: train
path: bgr_Latn/*_keep.jsonl
- config_name: rnd_Latn
data_files:
- split: train
path: rnd_Latn/*_keep.jsonl
- config_name: ntu_Latn
data_files:
- split: train
path: ntu_Latn/*_keep.jsonl
- config_name: aai_Latn
data_files:
- split: train
path: aai_Latn/*_keep.jsonl
- config_name: gai_Latn
data_files:
- split: train
path: gai_Latn/*_keep.jsonl
- config_name: dgi_Latn
data_files:
- split: train
path: dgi_Latn/*_keep.jsonl
- config_name: kez_Latn
data_files:
- split: train
path: kez_Latn/*_keep.jsonl
- config_name: dad_Latn
data_files:
- split: train
path: dad_Latn/*_keep.jsonl
- config_name: kmh_Latn
data_files:
- split: train
path: kmh_Latn/*_keep.jsonl
- config_name: aii_Syrc
data_files:
- split: train
path: aii_Syrc/*_keep.jsonl
- config_name: ifk_Latn
data_files:
- split: train
path: ifk_Latn/*_keep.jsonl
- config_name: aji_Latn
data_files:
- split: train
path: aji_Latn/*_keep.jsonl
- config_name: noa_Latn
data_files:
- split: train
path: noa_Latn/*_keep.jsonl
- config_name: bhl_Latn
data_files:
- split: train
path: bhl_Latn/*_keep.jsonl
- config_name: ztq_Latn
data_files:
- split: train
path: ztq_Latn/*_keep.jsonl
- config_name: rmn_Cyrl
data_files:
- split: train
path: rmn_Cyrl/*_keep.jsonl
- config_name: gde_Latn
data_files:
- split: train
path: gde_Latn/*_keep.jsonl
- config_name: dnj_Latn
data_files:
- split: train
path: dnj_Latn/*_keep.jsonl
- config_name: men_Latn
data_files:
- split: train
path: men_Latn/*_keep.jsonl
- config_name: miq_Latn
data_files:
- split: train
path: miq_Latn/*_keep.jsonl
- config_name: mdf_Latn
data_files:
- split: train
path: mdf_Latn/*_keep.jsonl
- config_name: isd_Latn
data_files:
- split: train
path: isd_Latn/*_keep.jsonl
- config_name: lgg_Latn
data_files:
- split: train
path: lgg_Latn/*_keep.jsonl
- config_name: ncx_Latn
data_files:
- split: train
path: ncx_Latn/*_keep.jsonl
- config_name: gub_Latn
data_files:
- split: train
path: gub_Latn/*_keep.jsonl
- config_name: itv_Latn
data_files:
- split: train
path: itv_Latn/*_keep.jsonl
- config_name: tac_Latn
data_files:
- split: train
path: tac_Latn/*_keep.jsonl
- config_name: cce_Latn
data_files:
- split: train
path: cce_Latn/*_keep.jsonl
- config_name: kwi_Latn
data_files:
- split: train
path: kwi_Latn/*_keep.jsonl
- config_name: nhu_Latn
data_files:
- split: train
path: nhu_Latn/*_keep.jsonl
- config_name: snd_Deva
data_files:
- split: train
path: snd_Deva/*_keep.jsonl
- config_name: sml_Latn
data_files:
- split: train
path: sml_Latn/*_keep.jsonl
- config_name: pah_Latn
data_files:
- split: train
path: pah_Latn/*_keep.jsonl
- config_name: sas_Latn
data_files:
- split: train
path: sas_Latn/*_keep.jsonl
- config_name: cwt_Latn
data_files:
- split: train
path: cwt_Latn/*_keep.jsonl
- config_name: spy_Latn
data_files:
- split: train
path: spy_Latn/*_keep.jsonl
- config_name: enb_Latn
data_files:
- split: train
path: enb_Latn/*_keep.jsonl
- config_name: fai_Latn
data_files:
- split: train
path: fai_Latn/*_keep.jsonl
- config_name: cas_Latn
data_files:
- split: train
path: cas_Latn/*_keep.jsonl
- config_name: sue_Latn
data_files:
- split: train
path: sue_Latn/*_keep.jsonl
- config_name: yss_Latn
data_files:
- split: train
path: yss_Latn/*_keep.jsonl
- config_name: ozm_Latn
data_files:
- split: train
path: ozm_Latn/*_keep.jsonl
- config_name: bku_Latn
data_files:
- split: train
path: bku_Latn/*_keep.jsonl
- config_name: biv_Latn
data_files:
- split: train
path: biv_Latn/*_keep.jsonl
- config_name: akp_Latn
data_files:
- split: train
path: akp_Latn/*_keep.jsonl
- config_name: dts_Latn
data_files:
- split: train
path: dts_Latn/*_keep.jsonl
- config_name: kdr_Latn
data_files:
- split: train
path: kdr_Latn/*_keep.jsonl
- config_name: fal_Latn
data_files:
- split: train
path: fal_Latn/*_keep.jsonl
- config_name: mmo_Latn
data_files:
- split: train
path: mmo_Latn/*_keep.jsonl
- config_name: byr_Latn
data_files:
- split: train
path: byr_Latn/*_keep.jsonl
- config_name: cbv_Latn
data_files:
- split: train
path: cbv_Latn/*_keep.jsonl
- config_name: cbu_Latn
data_files:
- split: train
path: cbu_Latn/*_keep.jsonl
- config_name: eza_Latn
data_files:
- split: train
path: eza_Latn/*_keep.jsonl
- config_name: kgp_Latn
data_files:
- split: train
path: kgp_Latn/*_keep.jsonl
- config_name: sda_Latn
data_files:
- split: train
path: sda_Latn/*_keep.jsonl
- config_name: gbo_Latn
data_files:
- split: train
path: gbo_Latn/*_keep.jsonl
- config_name: mfi_Latn
data_files:
- split: train
path: mfi_Latn/*_keep.jsonl
- config_name: bdd_Latn
data_files:
- split: train
path: bdd_Latn/*_keep.jsonl
- config_name: boj_Latn
data_files:
- split: train
path: boj_Latn/*_keep.jsonl
- config_name: blh_Latn
data_files:
- split: train
path: blh_Latn/*_keep.jsonl
- config_name: ibg_Latn
data_files:
- split: train
path: ibg_Latn/*_keep.jsonl
- config_name: agn_Latn
data_files:
- split: train
path: agn_Latn/*_keep.jsonl
- config_name: vmy_Latn
data_files:
- split: train
path: vmy_Latn/*_keep.jsonl
- config_name: sua_Latn
data_files:
- split: train
path: sua_Latn/*_keep.jsonl
- config_name: gul_Latn
data_files:
- split: train
path: gul_Latn/*_keep.jsonl
- config_name: rap_Latn
data_files:
- split: train
path: rap_Latn/*_keep.jsonl
- config_name: bmh_Latn
data_files:
- split: train
path: bmh_Latn/*_keep.jsonl
- config_name: sxn_Latn
data_files:
- split: train
path: sxn_Latn/*_keep.jsonl
- config_name: dyi_Latn
data_files:
- split: train
path: dyi_Latn/*_keep.jsonl
- config_name: tdx_Latn
data_files:
- split: train
path: tdx_Latn/*_keep.jsonl
- config_name: car_Latn
data_files:
- split: train
path: car_Latn/*_keep.jsonl
- config_name: heh_Latn
data_files:
- split: train
path: heh_Latn/*_keep.jsonl
- config_name: dgc_Latn
data_files:
- split: train
path: dgc_Latn/*_keep.jsonl
- config_name: sgb_Latn
data_files:
- split: train
path: sgb_Latn/*_keep.jsonl
- config_name: bpr_Latn
data_files:
- split: train
path: bpr_Latn/*_keep.jsonl
- config_name: gbi_Latn
data_files:
- split: train
path: gbi_Latn/*_keep.jsonl
- config_name: avu_Latn
data_files:
- split: train
path: avu_Latn/*_keep.jsonl
- config_name: udu_Latn
data_files:
- split: train
path: udu_Latn/*_keep.jsonl
- config_name: yli_Latn
data_files:
- split: train
path: yli_Latn/*_keep.jsonl
- config_name: cjs_Cyrl
data_files:
- split: train
path: cjs_Cyrl/*_keep.jsonl
- config_name: gnn_Latn
data_files:
- split: train
path: gnn_Latn/*_keep.jsonl
- config_name: nhi_Latn
data_files:
- split: train
path: nhi_Latn/*_keep.jsonl
- config_name: urk_Thai
data_files:
- split: train
path: urk_Thai/*_keep.jsonl
- config_name: kpy_Cyrl
data_files:
- split: train
path: kpy_Cyrl/*_keep.jsonl
- config_name: qup_Latn
data_files:
- split: train
path: qup_Latn/*_keep.jsonl
- config_name: qvh_Latn
data_files:
- split: train
path: qvh_Latn/*_keep.jsonl
- config_name: pir_Latn
data_files:
- split: train
path: pir_Latn/*_keep.jsonl
- config_name: tsc_Latn
data_files:
- split: train
path: tsc_Latn/*_keep.jsonl
- config_name: bxr_Latn
data_files:
- split: train
path: bxr_Latn/*_keep.jsonl
- config_name: thk_Latn
data_files:
- split: train
path: thk_Latn/*_keep.jsonl
- config_name: grt_Beng
data_files:
- split: train
path: grt_Beng/*_keep.jsonl
- config_name: kqn_Latn
data_files:
- split: train
path: kqn_Latn/*_keep.jsonl
- config_name: hay_Latn
data_files:
- split: train
path: hay_Latn/*_keep.jsonl
- config_name: mqb_Latn
data_files:
- split: train
path: mqb_Latn/*_keep.jsonl
- config_name: tbg_Latn
data_files:
- split: train
path: tbg_Latn/*_keep.jsonl
- config_name: bss_Latn
data_files:
- split: train
path: bss_Latn/*_keep.jsonl
- config_name: trn_Latn
data_files:
- split: train
path: trn_Latn/*_keep.jsonl
- config_name: jae_Latn
data_files:
- split: train
path: jae_Latn/*_keep.jsonl
- config_name: myy_Latn
data_files:
- split: train
path: myy_Latn/*_keep.jsonl
- config_name: ipk_Latn
data_files:
- split: train
path: ipk_Latn/*_keep.jsonl
- config_name: bao_Latn
data_files:
- split: train
path: bao_Latn/*_keep.jsonl
- config_name: qvz_Latn
data_files:
- split: train
path: qvz_Latn/*_keep.jsonl
- config_name: dua_Latn
data_files:
- split: train
path: dua_Latn/*_keep.jsonl
- config_name: mwv_Latn
data_files:
- split: train
path: mwv_Latn/*_keep.jsonl
- config_name: nct_Latn
data_files:
- split: train
path: nct_Latn/*_keep.jsonl
- config_name: kyc_Latn
data_files:
- split: train
path: kyc_Latn/*_keep.jsonl
- config_name: tuo_Latn
data_files:
- split: train
path: tuo_Latn/*_keep.jsonl
- config_name: tee_Latn
data_files:
- split: train
path: tee_Latn/*_keep.jsonl
- config_name: mop_Latn
data_files:
- split: train
path: mop_Latn/*_keep.jsonl
- config_name: mxt_Latn
data_files:
- split: train
path: mxt_Latn/*_keep.jsonl
- config_name: qvm_Latn
data_files:
- split: train
path: qvm_Latn/*_keep.jsonl
- config_name: gkn_Latn
data_files:
- split: train
path: gkn_Latn/*_keep.jsonl
- config_name: lam_Latn
data_files:
- split: train
path: lam_Latn/*_keep.jsonl
- config_name: ntr_Latn
data_files:
- split: train
path: ntr_Latn/*_keep.jsonl
- config_name: sbe_Latn
data_files:
- split: train
path: sbe_Latn/*_keep.jsonl
- config_name: dyo_Latn
data_files:
- split: train
path: dyo_Latn/*_keep.jsonl
- config_name: lex_Latn
data_files:
- split: train
path: lex_Latn/*_keep.jsonl
- config_name: smk_Latn
data_files:
- split: train
path: smk_Latn/*_keep.jsonl
- config_name: coe_Latn
data_files:
- split: train
path: coe_Latn/*_keep.jsonl
- config_name: kpe_Latn
data_files:
- split: train
path: kpe_Latn/*_keep.jsonl
- config_name: mni_Latn
data_files:
- split: train
path: mni_Latn/*_keep.jsonl
- config_name: xbi_Latn
data_files:
- split: train
path: xbi_Latn/*_keep.jsonl
- config_name: atb_Latn
data_files:
- split: train
path: atb_Latn/*_keep.jsonl
- config_name: tcf_Latn
data_files:
- split: train
path: tcf_Latn/*_keep.jsonl
- config_name: bfo_Latn
data_files:
- split: train
path: bfo_Latn/*_keep.jsonl
- config_name: cbc_Latn
data_files:
- split: train
path: cbc_Latn/*_keep.jsonl
- config_name: swg_Latn
data_files:
- split: train
path: swg_Latn/*_keep.jsonl
- config_name: niv_Latn
data_files:
- split: train
path: niv_Latn/*_keep.jsonl
- config_name: cnt_Latn
data_files:
- split: train
path: cnt_Latn/*_keep.jsonl
- config_name: izz_Latn
data_files:
- split: train
path: izz_Latn/*_keep.jsonl
- config_name: knf_Latn
data_files:
- split: train
path: knf_Latn/*_keep.jsonl
- config_name: mpp_Latn
data_files:
- split: train
path: mpp_Latn/*_keep.jsonl
- config_name: kki_Latn
data_files:
- split: train
path: kki_Latn/*_keep.jsonl
- config_name: nho_Latn
data_files:
- split: train
path: nho_Latn/*_keep.jsonl
- config_name: sat_Latn
data_files:
- split: train
path: sat_Latn/*_keep.jsonl
- config_name: kbr_Latn
data_files:
- split: train
path: kbr_Latn/*_keep.jsonl
- config_name: bmu_Latn
data_files:
- split: train
path: bmu_Latn/*_keep.jsonl
- config_name: hig_Latn
data_files:
- split: train
path: hig_Latn/*_keep.jsonl
- config_name: qxr_Latn
data_files:
- split: train
path: qxr_Latn/*_keep.jsonl
- config_name: orv_Cyrl
data_files:
- split: train
path: orv_Cyrl/*_keep.jsonl
- config_name: pma_Latn
data_files:
- split: train
path: pma_Latn/*_keep.jsonl
- config_name: zpl_Latn
data_files:
- split: train
path: zpl_Latn/*_keep.jsonl
- config_name: kpr_Latn
data_files:
- split: train
path: kpr_Latn/*_keep.jsonl
- config_name: sig_Latn
data_files:
- split: train
path: sig_Latn/*_keep.jsonl
- config_name: ory_Latn
data_files:
- split: train
path: ory_Latn/*_keep.jsonl
- config_name: mxp_Latn
data_files:
- split: train
path: mxp_Latn/*_keep.jsonl
- config_name: lef_Latn
data_files:
- split: train
path: lef_Latn/*_keep.jsonl
- config_name: mlp_Latn
data_files:
- split: train
path: mlp_Latn/*_keep.jsonl
- config_name: soq_Latn
data_files:
- split: train
path: soq_Latn/*_keep.jsonl
- config_name: cre_Latn
data_files:
- split: train
path: cre_Latn/*_keep.jsonl
- config_name: aey_Latn
data_files:
- split: train
path: aey_Latn/*_keep.jsonl
- config_name: qvs_Latn
data_files:
- split: train
path: qvs_Latn/*_keep.jsonl
- config_name: sur_Latn
data_files:
- split: train
path: sur_Latn/*_keep.jsonl
- config_name: knc_Arab
data_files:
- split: train
path: knc_Arab/*_keep.jsonl
- config_name: suk_Latn
data_files:
- split: train
path: suk_Latn/*_keep.jsonl
- config_name: cax_Latn
data_files:
- split: train
path: cax_Latn/*_keep.jsonl
- config_name: alq_Latn
data_files:
- split: train
path: alq_Latn/*_keep.jsonl
- config_name: chw_Latn
data_files:
- split: train
path: chw_Latn/*_keep.jsonl
- config_name: dww_Latn
data_files:
- split: train
path: dww_Latn/*_keep.jsonl
- config_name: gog_Latn
data_files:
- split: train
path: gog_Latn/*_keep.jsonl
- config_name: viv_Latn
data_files:
- split: train
path: viv_Latn/*_keep.jsonl
- config_name: qvw_Latn
data_files:
- split: train
path: qvw_Latn/*_keep.jsonl
- config_name: ted_Latn
data_files:
- split: train
path: ted_Latn/*_keep.jsonl
- config_name: yuj_Latn
data_files:
- split: train
path: yuj_Latn/*_keep.jsonl
- config_name: ziw_Latn
data_files:
- split: train
path: ziw_Latn/*_keep.jsonl
- config_name: kto_Latn
data_files:
- split: train
path: kto_Latn/*_keep.jsonl
- config_name: mcq_Latn
data_files:
- split: train
path: mcq_Latn/*_keep.jsonl
- config_name: sim_Latn
data_files:
- split: train
path: sim_Latn/*_keep.jsonl
- config_name: koi_Latn
data_files:
- split: train
path: koi_Latn/*_keep.jsonl
- config_name: lue_Latn
data_files:
- split: train
path: lue_Latn/*_keep.jsonl
- config_name: cya_Latn
data_files:
- split: train
path: cya_Latn/*_keep.jsonl
- config_name: knk_Latn
data_files:
- split: train
path: knk_Latn/*_keep.jsonl
- config_name: tpt_Latn
data_files:
- split: train
path: tpt_Latn/*_keep.jsonl
- config_name: taj_Deva
data_files:
- split: train
path: taj_Deva/*_keep.jsonl
- config_name: blz_Latn
data_files:
- split: train
path: blz_Latn/*_keep.jsonl
- config_name: mil_Latn
data_files:
- split: train
path: mil_Latn/*_keep.jsonl
- config_name: kde_Latn
data_files:
- split: train
path: kde_Latn/*_keep.jsonl
- config_name: kpz_Latn
data_files:
- split: train
path: kpz_Latn/*_keep.jsonl
- config_name: hag_Latn
data_files:
- split: train
path: hag_Latn/*_keep.jsonl
- config_name: vun_Latn
data_files:
- split: train
path: vun_Latn/*_keep.jsonl
- config_name: kus_Latn
data_files:
- split: train
path: kus_Latn/*_keep.jsonl
- config_name: lid_Latn
data_files:
- split: train
path: lid_Latn/*_keep.jsonl
- config_name: tos_Latn
data_files:
- split: train
path: tos_Latn/*_keep.jsonl
- config_name: bfd_Latn
data_files:
- split: train
path: bfd_Latn/*_keep.jsonl
- config_name: chd_Latn
data_files:
- split: train
path: chd_Latn/*_keep.jsonl
- config_name: cle_Latn
data_files:
- split: train
path: cle_Latn/*_keep.jsonl
- config_name: yon_Latn
data_files:
- split: train
path: yon_Latn/*_keep.jsonl
- config_name: bgs_Latn
data_files:
- split: train
path: bgs_Latn/*_keep.jsonl
- config_name: lsm_Latn
data_files:
- split: train
path: lsm_Latn/*_keep.jsonl
- config_name: msb_Latn
data_files:
- split: train
path: msb_Latn/*_keep.jsonl
- config_name: ota_Arab
data_files:
- split: train
path: ota_Arab/*_keep.jsonl
- config_name: guj_Latn
data_files:
- split: train
path: guj_Latn/*_keep.jsonl
- config_name: mxq_Latn
data_files:
- split: train
path: mxq_Latn/*_keep.jsonl
- config_name: lfn_Cyrl
data_files:
- split: train
path: lfn_Cyrl/*_keep.jsonl
- config_name: myw_Latn
data_files:
- split: train
path: myw_Latn/*_keep.jsonl
- config_name: aso_Latn
data_files:
- split: train
path: aso_Latn/*_keep.jsonl
- config_name: esu_Latn
data_files:
- split: train
path: esu_Latn/*_keep.jsonl
- config_name: kdi_Latn
data_files:
- split: train
path: kdi_Latn/*_keep.jsonl
- config_name: bhp_Latn
data_files:
- split: train
path: bhp_Latn/*_keep.jsonl
- config_name: mfz_Latn
data_files:
- split: train
path: mfz_Latn/*_keep.jsonl
- config_name: sgw_Ethi
data_files:
- split: train
path: sgw_Ethi/*_keep.jsonl
- config_name: xsi_Latn
data_files:
- split: train
path: xsi_Latn/*_keep.jsonl
- config_name: gun_Latn
data_files:
- split: train
path: gun_Latn/*_keep.jsonl
- config_name: kxc_Ethi
data_files:
- split: train
path: kxc_Ethi/*_keep.jsonl
- config_name: mux_Latn
data_files:
- split: train
path: mux_Latn/*_keep.jsonl
- config_name: otw_Latn
data_files:
- split: train
path: otw_Latn/*_keep.jsonl
- config_name: cjp_Latn
data_files:
- split: train
path: cjp_Latn/*_keep.jsonl
- config_name: nyo_Latn
data_files:
- split: train
path: nyo_Latn/*_keep.jsonl
- config_name: zia_Latn
data_files:
- split: train
path: zia_Latn/*_keep.jsonl
- config_name: cco_Latn
data_files:
- split: train
path: cco_Latn/*_keep.jsonl
- config_name: ojb_Cans
data_files:
- split: train
path: ojb_Cans/*_keep.jsonl
- config_name: ktu_Latn
data_files:
- split: train
path: ktu_Latn/*_keep.jsonl
- config_name: cpy_Latn
data_files:
- split: train
path: cpy_Latn/*_keep.jsonl
- config_name: txu_Latn
data_files:
- split: train
path: txu_Latn/*_keep.jsonl
- config_name: bjr_Latn
data_files:
- split: train
path: bjr_Latn/*_keep.jsonl
- config_name: bud_Latn
data_files:
- split: train
path: bud_Latn/*_keep.jsonl
- config_name: ken_Latn
data_files:
- split: train
path: ken_Latn/*_keep.jsonl
- config_name: bzh_Latn
data_files:
- split: train
path: bzh_Latn/*_keep.jsonl
- config_name: ipi_Latn
data_files:
- split: train
path: ipi_Latn/*_keep.jsonl
- config_name: crx_Latn
data_files:
- split: train
path: crx_Latn/*_keep.jsonl
- config_name: cko_Latn
data_files:
- split: train
path: cko_Latn/*_keep.jsonl
- config_name: gnd_Latn
data_files:
- split: train
path: gnd_Latn/*_keep.jsonl
- config_name: kix_Latn
data_files:
- split: train
path: kix_Latn/*_keep.jsonl
- config_name: mmx_Latn
data_files:
- split: train
path: mmx_Latn/*_keep.jsonl
- config_name: esk_Latn
data_files:
- split: train
path: esk_Latn/*_keep.jsonl
- config_name: kud_Latn
data_files:
- split: train
path: kud_Latn/*_keep.jsonl
- config_name: toh_Latn
data_files:
- split: train
path: toh_Latn/*_keep.jsonl
- config_name: byx_Latn
data_files:
- split: train
path: byx_Latn/*_keep.jsonl
- config_name: mtp_Latn
data_files:
- split: train
path: mtp_Latn/*_keep.jsonl
- config_name: kpv_Latn
data_files:
- split: train
path: kpv_Latn/*_keep.jsonl
- config_name: kyz_Latn
data_files:
- split: train
path: kyz_Latn/*_keep.jsonl
- config_name: lmp_Latn
data_files:
- split: train
path: lmp_Latn/*_keep.jsonl
- config_name: tiy_Latn
data_files:
- split: train
path: tiy_Latn/*_keep.jsonl
- config_name: ccp_Latn
data_files:
- split: train
path: ccp_Latn/*_keep.jsonl
- config_name: sbl_Latn
data_files:
- split: train
path: sbl_Latn/*_keep.jsonl
- config_name: tcs_Latn
data_files:
- split: train
path: tcs_Latn/*_keep.jsonl
- config_name: usp_Latn
data_files:
- split: train
path: usp_Latn/*_keep.jsonl
- config_name: buk_Latn
data_files:
- split: train
path: buk_Latn/*_keep.jsonl
- config_name: hvn_Latn
data_files:
- split: train
path: hvn_Latn/*_keep.jsonl
- config_name: amm_Latn
data_files:
- split: train
path: amm_Latn/*_keep.jsonl
- config_name: kzj_Latn
data_files:
- split: train
path: kzj_Latn/*_keep.jsonl
- config_name: gng_Latn
data_files:
- split: train
path: gng_Latn/*_keep.jsonl
- config_name: ter_Latn
data_files:
- split: train
path: ter_Latn/*_keep.jsonl
- config_name: kij_Latn
data_files:
- split: train
path: kij_Latn/*_keep.jsonl
- config_name: ngl_Latn
data_files:
- split: train
path: ngl_Latn/*_keep.jsonl
- config_name: pab_Latn
data_files:
- split: train
path: pab_Latn/*_keep.jsonl
- config_name: pad_Latn
data_files:
- split: train
path: pad_Latn/*_keep.jsonl
- config_name: kca_Cyrl
data_files:
- split: train
path: kca_Cyrl/*_keep.jsonl
- config_name: meq_Latn
data_files:
- split: train
path: meq_Latn/*_keep.jsonl
- config_name: kas_Cyrl
data_files:
- split: train
path: kas_Cyrl/*_keep.jsonl
- config_name: niq_Latn
data_files:
- split: train
path: niq_Latn/*_keep.jsonl
- config_name: dah_Latn
data_files:
- split: train
path: dah_Latn/*_keep.jsonl
- config_name: lia_Latn
data_files:
- split: train
path: lia_Latn/*_keep.jsonl
- config_name: kss_Latn
data_files:
- split: train
path: kss_Latn/*_keep.jsonl
- config_name: bch_Latn
data_files:
- split: train
path: bch_Latn/*_keep.jsonl
- config_name: khz_Latn
data_files:
- split: train
path: khz_Latn/*_keep.jsonl
- config_name: nop_Latn
data_files:
- split: train
path: nop_Latn/*_keep.jsonl
- config_name: yle_Latn
data_files:
- split: train
path: yle_Latn/*_keep.jsonl
- config_name: ain_Latn
data_files:
- split: train
path: ain_Latn/*_keep.jsonl
- config_name: pem_Latn
data_files:
- split: train
path: pem_Latn/*_keep.jsonl
- config_name: yml_Latn
data_files:
- split: train
path: yml_Latn/*_keep.jsonl
- config_name: taq_Tfng
data_files:
- split: train
path: taq_Tfng/*_keep.jsonl
- config_name: gaw_Latn
data_files:
- split: train
path: gaw_Latn/*_keep.jsonl
- config_name: kze_Latn
data_files:
- split: train
path: kze_Latn/*_keep.jsonl
- config_name: tby_Latn
data_files:
- split: train
path: tby_Latn/*_keep.jsonl
- config_name: cso_Latn
data_files:
- split: train
path: cso_Latn/*_keep.jsonl
- config_name: ifb_Latn
data_files:
- split: train
path: ifb_Latn/*_keep.jsonl
- config_name: rki_Mymr
data_files:
- split: train
path: rki_Mymr/*_keep.jsonl
- config_name: aly_Latn
data_files:
- split: train
path: aly_Latn/*_keep.jsonl
- config_name: kmu_Latn
data_files:
- split: train
path: kmu_Latn/*_keep.jsonl
- config_name: mhl_Latn
data_files:
- split: train
path: mhl_Latn/*_keep.jsonl
- config_name: kmo_Latn
data_files:
- split: train
path: kmo_Latn/*_keep.jsonl
- config_name: mva_Latn
data_files:
- split: train
path: mva_Latn/*_keep.jsonl
- config_name: opm_Latn
data_files:
- split: train
path: opm_Latn/*_keep.jsonl
- config_name: rtm_Latn
data_files:
- split: train
path: rtm_Latn/*_keep.jsonl
- config_name: xtn_Latn
data_files:
- split: train
path: xtn_Latn/*_keep.jsonl
- config_name: dgr_Latn
data_files:
- split: train
path: dgr_Latn/*_keep.jsonl
- config_name: ksr_Latn
data_files:
- split: train
path: ksr_Latn/*_keep.jsonl
- config_name: snp_Latn
data_files:
- split: train
path: snp_Latn/*_keep.jsonl
- config_name: tvk_Latn
data_files:
- split: train
path: tvk_Latn/*_keep.jsonl
- config_name: zpv_Latn
data_files:
- split: train
path: zpv_Latn/*_keep.jsonl
- config_name: hrx_Latn
data_files:
- split: train
path: hrx_Latn/*_keep.jsonl
- config_name: kwj_Latn
data_files:
- split: train
path: kwj_Latn/*_keep.jsonl
- config_name: muh_Latn
data_files:
- split: train
path: muh_Latn/*_keep.jsonl
- config_name: kak_Latn
data_files:
- split: train
path: kak_Latn/*_keep.jsonl
- config_name: kpw_Latn
data_files:
- split: train
path: kpw_Latn/*_keep.jsonl
- config_name: akb_Latn
data_files:
- split: train
path: akb_Latn/*_keep.jsonl
- config_name: ybb_Latn
data_files:
- split: train
path: ybb_Latn/*_keep.jsonl
- config_name: kjs_Latn
data_files:
- split: train
path: kjs_Latn/*_keep.jsonl
- config_name: wuv_Latn
data_files:
- split: train
path: wuv_Latn/*_keep.jsonl
- config_name: laj_Latn
data_files:
- split: train
path: laj_Latn/*_keep.jsonl
- config_name: ogo_Latn
data_files:
- split: train
path: ogo_Latn/*_keep.jsonl
- config_name: nim_Latn
data_files:
- split: train
path: nim_Latn/*_keep.jsonl
- config_name: czt_Latn
data_files:
- split: train
path: czt_Latn/*_keep.jsonl
- config_name: aoz_Latn
data_files:
- split: train
path: aoz_Latn/*_keep.jsonl
- config_name: rro_Latn
data_files:
- split: train
path: rro_Latn/*_keep.jsonl
- config_name: yuw_Latn
data_files:
- split: train
path: yuw_Latn/*_keep.jsonl
- config_name: heg_Latn
data_files:
- split: train
path: heg_Latn/*_keep.jsonl
- config_name: mrw_Latn
data_files:
- split: train
path: mrw_Latn/*_keep.jsonl
- config_name: zac_Latn
data_files:
- split: train
path: zac_Latn/*_keep.jsonl
- config_name: kpf_Latn
data_files:
- split: train
path: kpf_Latn/*_keep.jsonl
- config_name: leu_Latn
data_files:
- split: train
path: leu_Latn/*_keep.jsonl
- config_name: nwi_Latn
data_files:
- split: train
path: nwi_Latn/*_keep.jsonl
- config_name: awx_Latn
data_files:
- split: train
path: awx_Latn/*_keep.jsonl
- config_name: gso_Latn
data_files:
- split: train
path: gso_Latn/*_keep.jsonl
- config_name: keo_Latn
data_files:
- split: train
path: keo_Latn/*_keep.jsonl
- config_name: tte_Latn
data_files:
- split: train
path: tte_Latn/*_keep.jsonl
- config_name: ceg_Latn
data_files:
- split: train
path: ceg_Latn/*_keep.jsonl
- config_name: nhy_Latn
data_files:
- split: train
path: nhy_Latn/*_keep.jsonl
- config_name: bhw_Latn
data_files:
- split: train
path: bhw_Latn/*_keep.jsonl
- config_name: gnw_Latn
data_files:
- split: train
path: gnw_Latn/*_keep.jsonl
- config_name: nin_Latn
data_files:
- split: train
path: nin_Latn/*_keep.jsonl
- config_name: xmv_Latn
data_files:
- split: train
path: xmv_Latn/*_keep.jsonl
- config_name: crm_Cans
data_files:
- split: train
path: crm_Cans/*_keep.jsonl
- config_name: mxv_Latn
data_files:
- split: train
path: mxv_Latn/*_keep.jsonl
- config_name: ubr_Latn
data_files:
- split: train
path: ubr_Latn/*_keep.jsonl
- config_name: gld_Cyrl
data_files:
- split: train
path: gld_Cyrl/*_keep.jsonl
- config_name: bzi_Thai
data_files:
- split: train
path: bzi_Thai/*_keep.jsonl
- config_name: ann_Latn
data_files:
- split: train
path: ann_Latn/*_keep.jsonl
- config_name: lac_Latn
data_files:
- split: train
path: lac_Latn/*_keep.jsonl
- config_name: mej_Latn
data_files:
- split: train
path: mej_Latn/*_keep.jsonl
- config_name: zpc_Latn
data_files:
- split: train
path: zpc_Latn/*_keep.jsonl
- config_name: dje_Latn
data_files:
- split: train
path: dje_Latn/*_keep.jsonl
- config_name: caq_Latn
data_files:
- split: train
path: caq_Latn/*_keep.jsonl
- config_name: guk_Ethi
data_files:
- split: train
path: guk_Ethi/*_keep.jsonl
- config_name: wos_Latn
data_files:
- split: train
path: wos_Latn/*_keep.jsonl
- config_name: msm_Latn
data_files:
- split: train
path: msm_Latn/*_keep.jsonl
- config_name: zaw_Latn
data_files:
- split: train
path: zaw_Latn/*_keep.jsonl
- config_name: cap_Latn
data_files:
- split: train
path: cap_Latn/*_keep.jsonl
- config_name: mox_Latn
data_files:
- split: train
path: mox_Latn/*_keep.jsonl
- config_name: zab_Latn
data_files:
- split: train
path: zab_Latn/*_keep.jsonl
- config_name: mca_Latn
data_files:
- split: train
path: mca_Latn/*_keep.jsonl
- config_name: wrs_Latn
data_files:
- split: train
path: wrs_Latn/*_keep.jsonl
- config_name: hae_Latn
data_files:
- split: train
path: hae_Latn/*_keep.jsonl
- config_name: nss_Latn
data_files:
- split: train
path: nss_Latn/*_keep.jsonl
- config_name: rav_Deva
data_files:
- split: train
path: rav_Deva/*_keep.jsonl
- config_name: uvh_Latn
data_files:
- split: train
path: uvh_Latn/*_keep.jsonl
- config_name: dga_Latn
data_files:
- split: train
path: dga_Latn/*_keep.jsonl
- config_name: ppo_Latn
data_files:
- split: train
path: ppo_Latn/*_keep.jsonl
- config_name: nog_Latn
data_files:
- split: train
path: nog_Latn/*_keep.jsonl
- config_name: arq_Arab
data_files:
- split: train
path: arq_Arab/*_keep.jsonl
- config_name: bim_Latn
data_files:
- split: train
path: bim_Latn/*_keep.jsonl
- config_name: lwg_Latn
data_files:
- split: train
path: lwg_Latn/*_keep.jsonl
- config_name: kms_Latn
data_files:
- split: train
path: kms_Latn/*_keep.jsonl
- config_name: kne_Latn
data_files:
- split: train
path: kne_Latn/*_keep.jsonl
- config_name: tkl_Latn
data_files:
- split: train
path: tkl_Latn/*_keep.jsonl
- config_name: ady_Latn
data_files:
- split: train
path: ady_Latn/*_keep.jsonl
- config_name: caf_Latn
data_files:
- split: train
path: caf_Latn/*_keep.jsonl
- config_name: zat_Latn
data_files:
- split: train
path: zat_Latn/*_keep.jsonl
- config_name: aba_Latn
data_files:
- split: train
path: aba_Latn/*_keep.jsonl
- config_name: chf_Latn
data_files:
- split: train
path: chf_Latn/*_keep.jsonl
- config_name: lew_Latn
data_files:
- split: train
path: lew_Latn/*_keep.jsonl
- config_name: npl_Latn
data_files:
- split: train
path: npl_Latn/*_keep.jsonl
- config_name: uvl_Latn
data_files:
- split: train
path: uvl_Latn/*_keep.jsonl
- config_name: cdo_Hani
data_files:
- split: train
path: cdo_Hani/*_keep.jsonl
- config_name: rmq_Latn
data_files:
- split: train
path: rmq_Latn/*_keep.jsonl
- config_name: mek_Latn
data_files:
- split: train
path: mek_Latn/*_keep.jsonl
- config_name: snc_Latn
data_files:
- split: train
path: snc_Latn/*_keep.jsonl
- config_name: nsn_Latn
data_files:
- split: train
path: nsn_Latn/*_keep.jsonl
- config_name: amn_Latn
data_files:
- split: train
path: amn_Latn/*_keep.jsonl
- config_name: abz_Latn
data_files:
- split: train
path: abz_Latn/*_keep.jsonl
- config_name: cek_Latn
data_files:
- split: train
path: cek_Latn/*_keep.jsonl
- config_name: ong_Latn
data_files:
- split: train
path: ong_Latn/*_keep.jsonl
- config_name: txq_Latn
data_files:
- split: train
path: txq_Latn/*_keep.jsonl
- config_name: bjp_Latn
data_files:
- split: train
path: bjp_Latn/*_keep.jsonl
- config_name: gvf_Latn
data_files:
- split: train
path: gvf_Latn/*_keep.jsonl
- config_name: crk_Cans
data_files:
- split: train
path: crk_Cans/*_keep.jsonl
- config_name: snf_Latn
data_files:
- split: train
path: snf_Latn/*_keep.jsonl
- config_name: nhg_Latn
data_files:
- split: train
path: nhg_Latn/*_keep.jsonl
- config_name: kqw_Latn
data_files:
- split: train
path: kqw_Latn/*_keep.jsonl
- config_name: mic_Latn
data_files:
- split: train
path: mic_Latn/*_keep.jsonl
- config_name: mie_Latn
data_files:
- split: train
path: mie_Latn/*_keep.jsonl
- config_name: nus_Latn
data_files:
- split: train
path: nus_Latn/*_keep.jsonl
- config_name: plu_Latn
data_files:
- split: train
path: plu_Latn/*_keep.jsonl
- config_name: idu_Latn
data_files:
- split: train
path: idu_Latn/*_keep.jsonl
- config_name: sll_Latn
data_files:
- split: train
path: sll_Latn/*_keep.jsonl
- config_name: thl_Deva
data_files:
- split: train
path: thl_Deva/*_keep.jsonl
- config_name: bef_Latn
data_files:
- split: train
path: bef_Latn/*_keep.jsonl
- config_name: kwd_Latn
data_files:
- split: train
path: kwd_Latn/*_keep.jsonl
- config_name: mqy_Latn
data_files:
- split: train
path: mqy_Latn/*_keep.jsonl
- config_name: caa_Latn
data_files:
- split: train
path: caa_Latn/*_keep.jsonl
- config_name: aia_Latn
data_files:
- split: train
path: aia_Latn/*_keep.jsonl
- config_name: wer_Latn
data_files:
- split: train
path: wer_Latn/*_keep.jsonl
- config_name: agm_Latn
data_files:
- split: train
path: agm_Latn/*_keep.jsonl
- config_name: mbl_Latn
data_files:
- split: train
path: mbl_Latn/*_keep.jsonl
- config_name: mbs_Latn
data_files:
- split: train
path: mbs_Latn/*_keep.jsonl
- config_name: adh_Latn
data_files:
- split: train
path: adh_Latn/*_keep.jsonl
- config_name: hub_Latn
data_files:
- split: train
path: hub_Latn/*_keep.jsonl
- config_name: mav_Latn
data_files:
- split: train
path: mav_Latn/*_keep.jsonl
- config_name: nab_Latn
data_files:
- split: train
path: nab_Latn/*_keep.jsonl
- config_name: bqp_Latn
data_files:
- split: train
path: bqp_Latn/*_keep.jsonl
- config_name: guo_Latn
data_files:
- split: train
path: guo_Latn/*_keep.jsonl
- config_name: auy_Latn
data_files:
- split: train
path: auy_Latn/*_keep.jsonl
- config_name: big_Latn
data_files:
- split: train
path: big_Latn/*_keep.jsonl
- config_name: nii_Latn
data_files:
- split: train
path: nii_Latn/*_keep.jsonl
- config_name: row_Latn
data_files:
- split: train
path: row_Latn/*_keep.jsonl
- config_name: kim_Cyrl
data_files:
- split: train
path: kim_Cyrl/*_keep.jsonl
- config_name: mti_Latn
data_files:
- split: train
path: mti_Latn/*_keep.jsonl
- config_name: otm_Latn
data_files:
- split: train
path: otm_Latn/*_keep.jsonl
- config_name: prg_Latn
data_files:
- split: train
path: prg_Latn/*_keep.jsonl
- config_name: icr_Latn
data_files:
- split: train
path: icr_Latn/*_keep.jsonl
- config_name: mcp_Latn
data_files:
- split: train
path: mcp_Latn/*_keep.jsonl
- config_name: tbo_Latn
data_files:
- split: train
path: tbo_Latn/*_keep.jsonl
- config_name: chy_Latn
data_files:
- split: train
path: chy_Latn/*_keep.jsonl
- config_name: oji_Latn
data_files:
- split: train
path: oji_Latn/*_keep.jsonl
- config_name: kpx_Latn
data_files:
- split: train
path: kpx_Latn/*_keep.jsonl
- config_name: lbb_Latn
data_files:
- split: train
path: lbb_Latn/*_keep.jsonl
- config_name: pao_Latn
data_files:
- split: train
path: pao_Latn/*_keep.jsonl
- config_name: xuo_Latn
data_files:
- split: train
path: xuo_Latn/*_keep.jsonl
- config_name: zgh_Tfng
data_files:
- split: train
path: zgh_Tfng/*_keep.jsonl
- config_name: wls_Latn
data_files:
- split: train
path: wls_Latn/*_keep.jsonl
- config_name: aca_Latn
data_files:
- split: train
path: aca_Latn/*_keep.jsonl
- config_name: apy_Latn
data_files:
- split: train
path: apy_Latn/*_keep.jsonl
- config_name: wnc_Latn
data_files:
- split: train
path: wnc_Latn/*_keep.jsonl
- config_name: ssg_Latn
data_files:
- split: train
path: ssg_Latn/*_keep.jsonl
- config_name: eve_Cyrl
data_files:
- split: train
path: eve_Cyrl/*_keep.jsonl
- config_name: mee_Latn
data_files:
- split: train
path: mee_Latn/*_keep.jsonl
- config_name: yut_Latn
data_files:
- split: train
path: yut_Latn/*_keep.jsonl
- config_name: chr_Latn
data_files:
- split: train
path: chr_Latn/*_keep.jsonl
- config_name: kxm_Thai
data_files:
- split: train
path: kxm_Thai/*_keep.jsonl
- config_name: mur_Latn
data_files:
- split: train
path: mur_Latn/*_keep.jsonl
- config_name: lad_Hebr
data_files:
- split: train
path: lad_Hebr/*_keep.jsonl
- config_name: mcd_Latn
data_files:
- split: train
path: mcd_Latn/*_keep.jsonl
- config_name: cto_Latn
data_files:
- split: train
path: cto_Latn/*_keep.jsonl
- config_name: mqj_Latn
data_files:
- split: train
path: mqj_Latn/*_keep.jsonl
- config_name: usa_Latn
data_files:
- split: train
path: usa_Latn/*_keep.jsonl
- config_name: emi_Latn
data_files:
- split: train
path: emi_Latn/*_keep.jsonl
- config_name: liv_Latn
data_files:
- split: train
path: liv_Latn/*_keep.jsonl
- config_name: guz_Latn
data_files:
- split: train
path: guz_Latn/*_keep.jsonl
- config_name: tgp_Latn
data_files:
- split: train
path: tgp_Latn/*_keep.jsonl
- config_name: ncl_Latn
data_files:
- split: train
path: ncl_Latn/*_keep.jsonl
- config_name: azg_Latn
data_files:
- split: train
path: azg_Latn/*_keep.jsonl
- config_name: kck_Latn
data_files:
- split: train
path: kck_Latn/*_keep.jsonl
- config_name: apb_Latn
data_files:
- split: train
path: apb_Latn/*_keep.jsonl
- config_name: kbm_Latn
data_files:
- split: train
path: kbm_Latn/*_keep.jsonl
- config_name: sgz_Latn
data_files:
- split: train
path: sgz_Latn/*_keep.jsonl
- config_name: wsk_Latn
data_files:
- split: train
path: wsk_Latn/*_keep.jsonl
- config_name: gbm_Deva
data_files:
- split: train
path: gbm_Deva/*_keep.jsonl
- config_name: csw_Latn
data_files:
- split: train
path: csw_Latn/*_keep.jsonl
- config_name: ctp_Latn
data_files:
- split: train
path: ctp_Latn/*_keep.jsonl
- config_name: eri_Latn
data_files:
- split: train
path: eri_Latn/*_keep.jsonl
- config_name: vid_Latn
data_files:
- split: train
path: vid_Latn/*_keep.jsonl
- config_name: bsq_Latn
data_files:
- split: train
path: bsq_Latn/*_keep.jsonl
- config_name: dak_Latn
data_files:
- split: train
path: dak_Latn/*_keep.jsonl
- config_name: tif_Latn
data_files:
- split: train
path: tif_Latn/*_keep.jsonl
- config_name: xsr_Deva
data_files:
- split: train
path: xsr_Deva/*_keep.jsonl
- config_name: mwp_Latn
data_files:
- split: train
path: mwp_Latn/*_keep.jsonl
- config_name: roo_Latn
data_files:
- split: train
path: roo_Latn/*_keep.jsonl
- config_name: wbp_Latn
data_files:
- split: train
path: wbp_Latn/*_keep.jsonl
- config_name: gdn_Latn
data_files:
- split: train
path: gdn_Latn/*_keep.jsonl
- config_name: tgo_Latn
data_files:
- split: train
path: tgo_Latn/*_keep.jsonl
- config_name: zpm_Latn
data_files:
- split: train
path: zpm_Latn/*_keep.jsonl
- config_name: mih_Latn
data_files:
- split: train
path: mih_Latn/*_keep.jsonl
- config_name: qvc_Latn
data_files:
- split: train
path: qvc_Latn/*_keep.jsonl
- config_name: tod_Latn
data_files:
- split: train
path: tod_Latn/*_keep.jsonl
- config_name: abn_Latn
data_files:
- split: train
path: abn_Latn/*_keep.jsonl
- config_name: cao_Latn
data_files:
- split: train
path: cao_Latn/*_keep.jsonl
- config_name: kbc_Latn
data_files:
- split: train
path: kbc_Latn/*_keep.jsonl
- config_name: kog_Latn
data_files:
- split: train
path: kog_Latn/*_keep.jsonl
- config_name: moh_Latn
data_files:
- split: train
path: moh_Latn/*_keep.jsonl
- config_name: leh_Latn
data_files:
- split: train
path: leh_Latn/*_keep.jsonl
- config_name: nsu_Latn
data_files:
- split: train
path: nsu_Latn/*_keep.jsonl
- config_name: okv_Latn
data_files:
- split: train
path: okv_Latn/*_keep.jsonl
- config_name: tih_Latn
data_files:
- split: train
path: tih_Latn/*_keep.jsonl
- config_name: zos_Latn
data_files:
- split: train
path: zos_Latn/*_keep.jsonl
- config_name: llg_Latn
data_files:
- split: train
path: llg_Latn/*_keep.jsonl
- config_name: gmv_Ethi
data_files:
- split: train
path: gmv_Ethi/*_keep.jsonl
- config_name: kyf_Latn
data_files:
- split: train
path: kyf_Latn/*_keep.jsonl
- config_name: mhx_Latn
data_files:
- split: train
path: mhx_Latn/*_keep.jsonl
- config_name: mvp_Latn
data_files:
- split: train
path: mvp_Latn/*_keep.jsonl
- config_name: naf_Latn
data_files:
- split: train
path: naf_Latn/*_keep.jsonl
- config_name: ino_Latn
data_files:
- split: train
path: ino_Latn/*_keep.jsonl
- config_name: dng_Cyrl
data_files:
- split: train
path: dng_Cyrl/*_keep.jsonl
- config_name: mnk_Latn
data_files:
- split: train
path: mnk_Latn/*_keep.jsonl
- config_name: oke_Latn
data_files:
- split: train
path: oke_Latn/*_keep.jsonl
- config_name: omw_Latn
data_files:
- split: train
path: omw_Latn/*_keep.jsonl
- config_name: tnk_Latn
data_files:
- split: train
path: tnk_Latn/*_keep.jsonl
- config_name: xtd_Latn
data_files:
- split: train
path: xtd_Latn/*_keep.jsonl
- config_name: agt_Latn
data_files:
- split: train
path: agt_Latn/*_keep.jsonl
- config_name: cbr_Latn
data_files:
- split: train
path: cbr_Latn/*_keep.jsonl
- config_name: eko_Latn
data_files:
- split: train
path: eko_Latn/*_keep.jsonl
- config_name: nhk_Latn
data_files:
- split: train
path: nhk_Latn/*_keep.jsonl
- config_name: hot_Latn
data_files:
- split: train
path: hot_Latn/*_keep.jsonl
- config_name: iou_Latn
data_files:
- split: train
path: iou_Latn/*_keep.jsonl
- config_name: mpm_Latn
data_files:
- split: train
path: mpm_Latn/*_keep.jsonl
- config_name: myx_Latn
data_files:
- split: train
path: myx_Latn/*_keep.jsonl
- config_name: ndh_Latn
data_files:
- split: train
path: ndh_Latn/*_keep.jsonl
- config_name: pbb_Latn
data_files:
- split: train
path: pbb_Latn/*_keep.jsonl
- config_name: rng_Latn
data_files:
- split: train
path: rng_Latn/*_keep.jsonl
- config_name: cri_Latn
data_files:
- split: train
path: cri_Latn/*_keep.jsonl
- config_name: bvd_Latn
data_files:
- split: train
path: bvd_Latn/*_keep.jsonl
- config_name: kmz_Latn
data_files:
- split: train
path: kmz_Latn/*_keep.jsonl
- config_name: cta_Latn
data_files:
- split: train
path: cta_Latn/*_keep.jsonl
- config_name: blw_Latn
data_files:
- split: train
path: blw_Latn/*_keep.jsonl
- config_name: clu_Latn
data_files:
- split: train
path: clu_Latn/*_keep.jsonl
- config_name: cof_Latn
data_files:
- split: train
path: cof_Latn/*_keep.jsonl
- config_name: kje_Latn
data_files:
- split: train
path: kje_Latn/*_keep.jsonl
- config_name: nmf_Latn
data_files:
- split: train
path: nmf_Latn/*_keep.jsonl
- config_name: kbh_Latn
data_files:
- split: train
path: kbh_Latn/*_keep.jsonl
- config_name: qxn_Latn
data_files:
- split: train
path: qxn_Latn/*_keep.jsonl
- config_name: ria_Latn
data_files:
- split: train
path: ria_Latn/*_keep.jsonl
- config_name: wed_Latn
data_files:
- split: train
path: wed_Latn/*_keep.jsonl
- config_name: zar_Latn
data_files:
- split: train
path: zar_Latn/*_keep.jsonl
- config_name: huv_Latn
data_files:
- split: train
path: huv_Latn/*_keep.jsonl
- config_name: lsi_Latn
data_files:
- split: train
path: lsi_Latn/*_keep.jsonl
- config_name: msk_Latn
data_files:
- split: train
path: msk_Latn/*_keep.jsonl
- config_name: nbq_Latn
data_files:
- split: train
path: nbq_Latn/*_keep.jsonl
- config_name: ati_Latn
data_files:
- split: train
path: ati_Latn/*_keep.jsonl
- config_name: lww_Latn
data_files:
- split: train
path: lww_Latn/*_keep.jsonl
- config_name: yva_Latn
data_files:
- split: train
path: yva_Latn/*_keep.jsonl
- config_name: amr_Latn
data_files:
- split: train
path: amr_Latn/*_keep.jsonl
- config_name: lmk_Latn
data_files:
- split: train
path: lmk_Latn/*_keep.jsonl
- config_name: tim_Latn
data_files:
- split: train
path: tim_Latn/*_keep.jsonl
- config_name: bbb_Latn
data_files:
- split: train
path: bbb_Latn/*_keep.jsonl
- config_name: bzd_Latn
data_files:
- split: train
path: bzd_Latn/*_keep.jsonl
- config_name: djr_Latn
data_files:
- split: train
path: djr_Latn/*_keep.jsonl
- config_name: kwf_Latn
data_files:
- split: train
path: kwf_Latn/*_keep.jsonl
- config_name: nuy_Latn
data_files:
- split: train
path: nuy_Latn/*_keep.jsonl
- config_name: poi_Latn
data_files:
- split: train
path: poi_Latn/*_keep.jsonl
- config_name: ptp_Latn
data_files:
- split: train
path: ptp_Latn/*_keep.jsonl
- config_name: atd_Latn
data_files:
- split: train
path: atd_Latn/*_keep.jsonl
- config_name: bps_Latn
data_files:
- split: train
path: bps_Latn/*_keep.jsonl
- config_name: lcm_Latn
data_files:
- split: train
path: lcm_Latn/*_keep.jsonl
- config_name: mbh_Latn
data_files:
- split: train
path: mbh_Latn/*_keep.jsonl
- config_name: nuj_Latn
data_files:
- split: train
path: nuj_Latn/*_keep.jsonl
- config_name: ayp_Arab
data_files:
- split: train
path: ayp_Arab/*_keep.jsonl
- config_name: cwe_Latn
data_files:
- split: train
path: cwe_Latn/*_keep.jsonl
- config_name: geb_Latn
data_files:
- split: train
path: geb_Latn/*_keep.jsonl
- config_name: aoi_Latn
data_files:
- split: train
path: aoi_Latn/*_keep.jsonl
- config_name: mto_Latn
data_files:
- split: train
path: mto_Latn/*_keep.jsonl
- config_name: cbi_Latn
data_files:
- split: train
path: cbi_Latn/*_keep.jsonl
- config_name: gvn_Latn
data_files:
- split: train
path: gvn_Latn/*_keep.jsonl
- config_name: kqf_Latn
data_files:
- split: train
path: kqf_Latn/*_keep.jsonl
- config_name: rkb_Latn
data_files:
- split: train
path: rkb_Latn/*_keep.jsonl
- config_name: xed_Latn
data_files:
- split: train
path: xed_Latn/*_keep.jsonl
- config_name: zas_Latn
data_files:
- split: train
path: zas_Latn/*_keep.jsonl
- config_name: apz_Latn
data_files:
- split: train
path: apz_Latn/*_keep.jsonl
- config_name: cpb_Latn
data_files:
- split: train
path: cpb_Latn/*_keep.jsonl
- config_name: ese_Latn
data_files:
- split: train
path: ese_Latn/*_keep.jsonl
- config_name: gdg_Latn
data_files:
- split: train
path: gdg_Latn/*_keep.jsonl
- config_name: iws_Latn
data_files:
- split: train
path: iws_Latn/*_keep.jsonl
- config_name: apu_Latn
data_files:
- split: train
path: apu_Latn/*_keep.jsonl
- config_name: cag_Latn
data_files:
- split: train
path: cag_Latn/*_keep.jsonl
- config_name: kdc_Latn
data_files:
- split: train
path: kdc_Latn/*_keep.jsonl
- config_name: nou_Latn
data_files:
- split: train
path: nou_Latn/*_keep.jsonl
- config_name: too_Latn
data_files:
- split: train
path: too_Latn/*_keep.jsonl
- config_name: xmm_Latn
data_files:
- split: train
path: xmm_Latn/*_keep.jsonl
- config_name: beq_Latn
data_files:
- split: train
path: beq_Latn/*_keep.jsonl
- config_name: hnn_Latn
data_files:
- split: train
path: hnn_Latn/*_keep.jsonl
- config_name: myk_Latn
data_files:
- split: train
path: myk_Latn/*_keep.jsonl
- config_name: sby_Latn
data_files:
- split: train
path: sby_Latn/*_keep.jsonl
- config_name: arl_Latn
data_files:
- split: train
path: arl_Latn/*_keep.jsonl
- config_name: ncu_Latn
data_files:
- split: train
path: ncu_Latn/*_keep.jsonl
- config_name: rml_Latn
data_files:
- split: train
path: rml_Latn/*_keep.jsonl
- config_name: gam_Latn
data_files:
- split: train
path: gam_Latn/*_keep.jsonl
- config_name: kvn_Latn
data_files:
- split: train
path: kvn_Latn/*_keep.jsonl
- config_name: mbc_Latn
data_files:
- split: train
path: mbc_Latn/*_keep.jsonl
- config_name: mfy_Latn
data_files:
- split: train
path: mfy_Latn/*_keep.jsonl
- config_name: urt_Latn
data_files:
- split: train
path: urt_Latn/*_keep.jsonl
- config_name: luc_Latn
data_files:
- split: train
path: luc_Latn/*_keep.jsonl
- config_name: mio_Latn
data_files:
- split: train
path: mio_Latn/*_keep.jsonl
- config_name: snn_Latn
data_files:
- split: train
path: snn_Latn/*_keep.jsonl
- config_name: tar_Latn
data_files:
- split: train
path: tar_Latn/*_keep.jsonl
- config_name: tku_Latn
data_files:
- split: train
path: tku_Latn/*_keep.jsonl
- config_name: upv_Latn
data_files:
- split: train
path: upv_Latn/*_keep.jsonl
- config_name: kjg_Laoo
data_files:
- split: train
path: kjg_Laoo/*_keep.jsonl
- config_name: btd_Latn
data_files:
- split: train
path: btd_Latn/*_keep.jsonl
- config_name: cut_Latn
data_files:
- split: train
path: cut_Latn/*_keep.jsonl
- config_name: mwn_Latn
data_files:
- split: train
path: mwn_Latn/*_keep.jsonl
- config_name: tue_Latn
data_files:
- split: train
path: tue_Latn/*_keep.jsonl
- config_name: adz_Latn
data_files:
- split: train
path: adz_Latn/*_keep.jsonl
- config_name: kyg_Latn
data_files:
- split: train
path: kyg_Latn/*_keep.jsonl
- config_name: rel_Latn
data_files:
- split: train
path: rel_Latn/*_keep.jsonl
- config_name: tew_Latn
data_files:
- split: train
path: tew_Latn/*_keep.jsonl
- config_name: ttc_Latn
data_files:
- split: train
path: ttc_Latn/*_keep.jsonl
- config_name: nkf_Latn
data_files:
- split: train
path: nkf_Latn/*_keep.jsonl
- config_name: yrb_Latn
data_files:
- split: train
path: yrb_Latn/*_keep.jsonl
- config_name: fub_Latn
data_files:
- split: train
path: fub_Latn/*_keep.jsonl
- config_name: nsm_Latn
data_files:
- split: train
path: nsm_Latn/*_keep.jsonl
- config_name: nyf_Latn
data_files:
- split: train
path: nyf_Latn/*_keep.jsonl
- config_name: ava_Latn
data_files:
- split: train
path: ava_Latn/*_keep.jsonl
- config_name: bkd_Latn
data_files:
- split: train
path: bkd_Latn/*_keep.jsonl
- config_name: cpc_Latn
data_files:
- split: train
path: cpc_Latn/*_keep.jsonl
- config_name: lif_Limb
data_files:
- split: train
path: lif_Limb/*_keep.jsonl
- config_name: mwc_Latn
data_files:
- split: train
path: mwc_Latn/*_keep.jsonl
- config_name: gan_Latn
data_files:
- split: train
path: gan_Latn/*_keep.jsonl
- config_name: apw_Latn
data_files:
- split: train
path: apw_Latn/*_keep.jsonl
- config_name: azz_Latn
data_files:
- split: train
path: azz_Latn/*_keep.jsonl
- config_name: zpo_Latn
data_files:
- split: train
path: zpo_Latn/*_keep.jsonl
- config_name: ace_Arab
data_files:
- split: train
path: ace_Arab/*_keep.jsonl
- config_name: faa_Latn
data_files:
- split: train
path: faa_Latn/*_keep.jsonl
- config_name: ood_Latn
data_files:
- split: train
path: ood_Latn/*_keep.jsonl
- config_name: toc_Latn
data_files:
- split: train
path: toc_Latn/*_keep.jsonl
- config_name: wmt_Latn
data_files:
- split: train
path: wmt_Latn/*_keep.jsonl
- config_name: ajg_Latn
data_files:
- split: train
path: ajg_Latn/*_keep.jsonl
- config_name: nko_Latn
data_files:
- split: train
path: nko_Latn/*_keep.jsonl
- config_name: ssd_Latn
data_files:
- split: train
path: ssd_Latn/*_keep.jsonl
- config_name: sxb_Latn
data_files:
- split: train
path: sxb_Latn/*_keep.jsonl
- config_name: zxx_Latn
data_files:
- split: train
path: zxx_Latn/*_keep.jsonl
- config_name: not_Latn
data_files:
- split: train
path: not_Latn/*_keep.jsonl
- config_name: amk_Latn
data_files:
- split: train
path: amk_Latn/*_keep.jsonl
- config_name: hch_Latn
data_files:
- split: train
path: hch_Latn/*_keep.jsonl
- config_name: kqc_Latn
data_files:
- split: train
path: kqc_Latn/*_keep.jsonl
- config_name: aui_Latn
data_files:
- split: train
path: aui_Latn/*_keep.jsonl
- config_name: llb_Latn
data_files:
- split: train
path: llb_Latn/*_keep.jsonl
- config_name: avt_Latn
data_files:
- split: train
path: avt_Latn/*_keep.jsonl
- config_name: kaq_Latn
data_files:
- split: train
path: kaq_Latn/*_keep.jsonl
- config_name: tna_Latn
data_files:
- split: train
path: tna_Latn/*_keep.jsonl
- config_name: zpt_Latn
data_files:
- split: train
path: zpt_Latn/*_keep.jsonl
- config_name: mnw_Latn
data_files:
- split: train
path: mnw_Latn/*_keep.jsonl
- config_name: bwd_Latn
data_files:
- split: train
path: bwd_Latn/*_keep.jsonl
- config_name: lif_Deva
data_files:
- split: train
path: lif_Deva/*_keep.jsonl
- config_name: stn_Latn
data_files:
- split: train
path: stn_Latn/*_keep.jsonl
- config_name: crk_Latn
data_files:
- split: train
path: crk_Latn/*_keep.jsonl
- config_name: kgk_Latn
data_files:
- split: train
path: kgk_Latn/*_keep.jsonl
- config_name: mck_Latn
data_files:
- split: train
path: mck_Latn/*_keep.jsonl
- config_name: bhg_Latn
data_files:
- split: train
path: bhg_Latn/*_keep.jsonl
- config_name: ifu_Latn
data_files:
- split: train
path: ifu_Latn/*_keep.jsonl
- config_name: kkl_Latn
data_files:
- split: train
path: kkl_Latn/*_keep.jsonl
- config_name: zao_Latn
data_files:
- split: train
path: zao_Latn/*_keep.jsonl
- config_name: huu_Latn
data_files:
- split: train
path: huu_Latn/*_keep.jsonl
- config_name: sil_Latn
data_files:
- split: train
path: sil_Latn/*_keep.jsonl
- config_name: trq_Latn
data_files:
- split: train
path: trq_Latn/*_keep.jsonl
- config_name: tuf_Latn
data_files:
- split: train
path: tuf_Latn/*_keep.jsonl
- config_name: wnu_Latn
data_files:
- split: train
path: wnu_Latn/*_keep.jsonl
- config_name: amf_Latn
data_files:
- split: train
path: amf_Latn/*_keep.jsonl
- config_name: fan_Latn
data_files:
- split: train
path: fan_Latn/*_keep.jsonl
- config_name: zdj_Latn
data_files:
- split: train
path: zdj_Latn/*_keep.jsonl
- config_name: bco_Latn
data_files:
- split: train
path: bco_Latn/*_keep.jsonl
- config_name: kdl_Latn
data_files:
- split: train
path: kdl_Latn/*_keep.jsonl
- config_name: lln_Latn
data_files:
- split: train
path: lln_Latn/*_keep.jsonl
- config_name: mit_Latn
data_files:
- split: train
path: mit_Latn/*_keep.jsonl
- config_name: yaq_Latn
data_files:
- split: train
path: yaq_Latn/*_keep.jsonl
- config_name: ake_Latn
data_files:
- split: train
path: ake_Latn/*_keep.jsonl
- config_name: ige_Latn
data_files:
- split: train
path: ige_Latn/*_keep.jsonl
- config_name: mna_Latn
data_files:
- split: train
path: mna_Latn/*_keep.jsonl
- config_name: nhx_Latn
data_files:
- split: train
path: nhx_Latn/*_keep.jsonl
- config_name: njn_Latn
data_files:
- split: train
path: njn_Latn/*_keep.jsonl
- config_name: wba_Latn
data_files:
- split: train
path: wba_Latn/*_keep.jsonl
- config_name: zaa_Latn
data_files:
- split: train
path: zaa_Latn/*_keep.jsonl
- config_name: lez_Latn
data_files:
- split: train
path: lez_Latn/*_keep.jsonl
- config_name: syb_Latn
data_files:
- split: train
path: syb_Latn/*_keep.jsonl
- config_name: qxo_Latn
data_files:
- split: train
path: qxo_Latn/*_keep.jsonl
- config_name: rgu_Latn
data_files:
- split: train
path: rgu_Latn/*_keep.jsonl
- config_name: wiu_Latn
data_files:
- split: train
path: wiu_Latn/*_keep.jsonl
- config_name: bla_Latn
data_files:
- split: train
path: bla_Latn/*_keep.jsonl
- config_name: etr_Latn
data_files:
- split: train
path: etr_Latn/*_keep.jsonl
- config_name: miy_Latn
data_files:
- split: train
path: miy_Latn/*_keep.jsonl
- config_name: mjc_Latn
data_files:
- split: train
path: mjc_Latn/*_keep.jsonl
- config_name: nif_Latn
data_files:
- split: train
path: nif_Latn/*_keep.jsonl
- config_name: pov_Latn
data_files:
- split: train
path: pov_Latn/*_keep.jsonl
- config_name: qxl_Latn
data_files:
- split: train
path: qxl_Latn/*_keep.jsonl
- config_name: sey_Latn
data_files:
- split: train
path: sey_Latn/*_keep.jsonl
- config_name: bxh_Latn
data_files:
- split: train
path: bxh_Latn/*_keep.jsonl
- config_name: ddg_Latn
data_files:
- split: train
path: ddg_Latn/*_keep.jsonl
- config_name: mks_Latn
data_files:
- split: train
path: mks_Latn/*_keep.jsonl
- config_name: nki_Latn
data_files:
- split: train
path: nki_Latn/*_keep.jsonl
- config_name: pot_Latn
data_files:
- split: train
path: pot_Latn/*_keep.jsonl
- config_name: prf_Latn
data_files:
- split: train
path: prf_Latn/*_keep.jsonl
- config_name: hra_Latn
data_files:
- split: train
path: hra_Latn/*_keep.jsonl
- config_name: law_Latn
data_files:
- split: train
path: law_Latn/*_keep.jsonl
- config_name: plw_Latn
data_files:
- split: train
path: plw_Latn/*_keep.jsonl
- config_name: zpi_Latn
data_files:
- split: train
path: zpi_Latn/*_keep.jsonl
- config_name: atg_Latn
data_files:
- split: train
path: atg_Latn/*_keep.jsonl
- config_name: daf_Latn
data_files:
- split: train
path: daf_Latn/*_keep.jsonl
- config_name: drg_Latn
data_files:
- split: train
path: drg_Latn/*_keep.jsonl
- config_name: dru_Latn
data_files:
- split: train
path: dru_Latn/*_keep.jsonl
- config_name: ngp_Latn
data_files:
- split: train
path: ngp_Latn/*_keep.jsonl
- config_name: boa_Latn
data_files:
- split: train
path: boa_Latn/*_keep.jsonl
- config_name: cbt_Latn
data_files:
- split: train
path: cbt_Latn/*_keep.jsonl
- config_name: dhm_Latn
data_files:
- split: train
path: dhm_Latn/*_keep.jsonl
- config_name: ikw_Latn
data_files:
- split: train
path: ikw_Latn/*_keep.jsonl
- config_name: kqe_Latn
data_files:
- split: train
path: kqe_Latn/*_keep.jsonl
- config_name: lbj_Tibt
data_files:
- split: train
path: lbj_Tibt/*_keep.jsonl
- config_name: loq_Latn
data_files:
- split: train
path: loq_Latn/*_keep.jsonl
- config_name: xsb_Latn
data_files:
- split: train
path: xsb_Latn/*_keep.jsonl
- config_name: yad_Latn
data_files:
- split: train
path: yad_Latn/*_keep.jsonl
- config_name: kyu_Latn
data_files:
- split: train
path: kyu_Latn/*_keep.jsonl
- config_name: mcb_Latn
data_files:
- split: train
path: mcb_Latn/*_keep.jsonl
- config_name: tks_Arab
data_files:
- split: train
path: tks_Arab/*_keep.jsonl
- config_name: cnl_Latn
data_files:
- split: train
path: cnl_Latn/*_keep.jsonl
- config_name: far_Latn
data_files:
- split: train
path: far_Latn/*_keep.jsonl
- config_name: miz_Latn
data_files:
- split: train
path: miz_Latn/*_keep.jsonl
- config_name: maj_Latn
data_files:
- split: train
path: maj_Latn/*_keep.jsonl
- config_name: mgm_Latn
data_files:
- split: train
path: mgm_Latn/*_keep.jsonl
- config_name: mib_Latn
data_files:
- split: train
path: mib_Latn/*_keep.jsonl
- config_name: mza_Latn
data_files:
- split: train
path: mza_Latn/*_keep.jsonl
- config_name: nhr_Latn
data_files:
- split: train
path: nhr_Latn/*_keep.jsonl
- config_name: smt_Latn
data_files:
- split: train
path: smt_Latn/*_keep.jsonl
- config_name: ish_Latn
data_files:
- split: train
path: ish_Latn/*_keep.jsonl
- config_name: mlu_Latn
data_files:
- split: train
path: mlu_Latn/*_keep.jsonl
- config_name: yka_Latn
data_files:
- split: train
path: yka_Latn/*_keep.jsonl
- config_name: anv_Latn
data_files:
- split: train
path: anv_Latn/*_keep.jsonl
- config_name: ign_Latn
data_files:
- split: train
path: ign_Latn/*_keep.jsonl
- config_name: mir_Latn
data_files:
- split: train
path: mir_Latn/*_keep.jsonl
- config_name: tav_Latn
data_files:
- split: train
path: tav_Latn/*_keep.jsonl
- config_name: gyr_Latn
data_files:
- split: train
path: gyr_Latn/*_keep.jsonl
- config_name: klt_Latn
data_files:
- split: train
path: klt_Latn/*_keep.jsonl
- config_name: apn_Latn
data_files:
- split: train
path: apn_Latn/*_keep.jsonl
- config_name: wbm_Latn
data_files:
- split: train
path: wbm_Latn/*_keep.jsonl
- config_name: cjk_Latn
data_files:
- split: train
path: cjk_Latn/*_keep.jsonl
- config_name: dln_Latn
data_files:
- split: train
path: dln_Latn/*_keep.jsonl
- config_name: mcf_Latn
data_files:
- split: train
path: mcf_Latn/*_keep.jsonl
- config_name: twx_Latn
data_files:
- split: train
path: twx_Latn/*_keep.jsonl
- config_name: ldi_Latn
data_files:
- split: train
path: ldi_Latn/*_keep.jsonl
- config_name: poy_Latn
data_files:
- split: train
path: poy_Latn/*_keep.jsonl
- config_name: yaa_Latn
data_files:
- split: train
path: yaa_Latn/*_keep.jsonl
- config_name: zae_Latn
data_files:
- split: train
path: zae_Latn/*_keep.jsonl
- config_name: bdq_Latn
data_files:
- split: train
path: bdq_Latn/*_keep.jsonl
- config_name: con_Latn
data_files:
- split: train
path: con_Latn/*_keep.jsonl
- config_name: dnw_Latn
data_files:
- split: train
path: dnw_Latn/*_keep.jsonl
- config_name: gah_Latn
data_files:
- split: train
path: gah_Latn/*_keep.jsonl
- config_name: lcp_Thai
data_files:
- split: train
path: lcp_Thai/*_keep.jsonl
- config_name: mbj_Latn
data_files:
- split: train
path: mbj_Latn/*_keep.jsonl
- config_name: xog_Latn
data_files:
- split: train
path: xog_Latn/*_keep.jsonl
- config_name: yan_Latn
data_files:
- split: train
path: yan_Latn/*_keep.jsonl
- config_name: sdq_Latn
data_files:
- split: train
path: sdq_Latn/*_keep.jsonl
- config_name: sgh_Cyrl
data_files:
- split: train
path: sgh_Cyrl/*_keep.jsonl
- config_name: gwi_Latn
data_files:
- split: train
path: gwi_Latn/*_keep.jsonl
- config_name: lbk_Latn
data_files:
- split: train
path: lbk_Latn/*_keep.jsonl
- config_name: ses_Latn
data_files:
- split: train
path: ses_Latn/*_keep.jsonl
- config_name: ycn_Latn
data_files:
- split: train
path: ycn_Latn/*_keep.jsonl
- config_name: kau_Latn
data_files:
- split: train
path: kau_Latn/*_keep.jsonl
- config_name: hix_Latn
data_files:
- split: train
path: hix_Latn/*_keep.jsonl
- config_name: mig_Latn
data_files:
- split: train
path: mig_Latn/*_keep.jsonl
- config_name: mlh_Latn
data_files:
- split: train
path: mlh_Latn/*_keep.jsonl
- config_name: aer_Latn
data_files:
- split: train
path: aer_Latn/*_keep.jsonl
- config_name: amx_Latn
data_files:
- split: train
path: amx_Latn/*_keep.jsonl
- config_name: auc_Latn
data_files:
- split: train
path: auc_Latn/*_keep.jsonl
- config_name: cly_Latn
data_files:
- split: train
path: cly_Latn/*_keep.jsonl
- config_name: dsh_Latn
data_files:
- split: train
path: dsh_Latn/*_keep.jsonl
- config_name: gnb_Latn
data_files:
- split: train
path: gnb_Latn/*_keep.jsonl
- config_name: neb_Latn
data_files:
- split: train
path: neb_Latn/*_keep.jsonl
- config_name: ttt_Cyrl
data_files:
- split: train
path: ttt_Cyrl/*_keep.jsonl
- config_name: jic_Latn
data_files:
- split: train
path: jic_Latn/*_keep.jsonl
- config_name: xtm_Latn
data_files:
- split: train
path: xtm_Latn/*_keep.jsonl
- config_name: tll_Latn
data_files:
- split: train
path: tll_Latn/*_keep.jsonl
- config_name: apt_Latn
data_files:
- split: train
path: apt_Latn/*_keep.jsonl
- config_name: kgr_Latn
data_files:
- split: train
path: kgr_Latn/*_keep.jsonl
- config_name: pio_Latn
data_files:
- split: train
path: pio_Latn/*_keep.jsonl
- config_name: pmf_Latn
data_files:
- split: train
path: pmf_Latn/*_keep.jsonl
- config_name: yrk_Latn
data_files:
- split: train
path: yrk_Latn/*_keep.jsonl
- config_name: akh_Latn
data_files:
- split: train
path: akh_Latn/*_keep.jsonl
- config_name: box_Latn
data_files:
- split: train
path: box_Latn/*_keep.jsonl
- config_name: cub_Latn
data_files:
- split: train
path: cub_Latn/*_keep.jsonl
- config_name: cux_Latn
data_files:
- split: train
path: cux_Latn/*_keep.jsonl
- config_name: mrq_Latn
data_files:
- split: train
path: mrq_Latn/*_keep.jsonl
- config_name: inh_Latn
data_files:
- split: train
path: inh_Latn/*_keep.jsonl
- config_name: bsp_Latn
data_files:
- split: train
path: bsp_Latn/*_keep.jsonl
- config_name: for_Latn
data_files:
- split: train
path: for_Latn/*_keep.jsonl
- config_name: kny_Latn
data_files:
- split: train
path: kny_Latn/*_keep.jsonl
- config_name: uri_Latn
data_files:
- split: train
path: uri_Latn/*_keep.jsonl
- config_name: zsr_Latn
data_files:
- split: train
path: zsr_Latn/*_keep.jsonl
- config_name: pce_Mymr
data_files:
- split: train
path: pce_Mymr/*_keep.jsonl
- config_name: cme_Latn
data_files:
- split: train
path: cme_Latn/*_keep.jsonl
- config_name: cot_Latn
data_files:
- split: train
path: cot_Latn/*_keep.jsonl
- config_name: kxw_Latn
data_files:
- split: train
path: kxw_Latn/*_keep.jsonl
- config_name: omb_Latn
data_files:
- split: train
path: omb_Latn/*_keep.jsonl
- config_name: srq_Latn
data_files:
- split: train
path: srq_Latn/*_keep.jsonl
- config_name: vag_Latn
data_files:
- split: train
path: vag_Latn/*_keep.jsonl
- config_name: cpu_Latn
data_files:
- split: train
path: cpu_Latn/*_keep.jsonl
- config_name: kmy_Latn
data_files:
- split: train
path: kmy_Latn/*_keep.jsonl
- config_name: ksc_Latn
data_files:
- split: train
path: ksc_Latn/*_keep.jsonl
- config_name: lai_Latn
data_files:
- split: train
path: lai_Latn/*_keep.jsonl
- config_name: nnp_Latn
data_files:
- split: train
path: nnp_Latn/*_keep.jsonl
- config_name: ruf_Latn
data_files:
- split: train
path: ruf_Latn/*_keep.jsonl
- config_name: spm_Latn
data_files:
- split: train
path: spm_Latn/*_keep.jsonl
- config_name: tro_Latn
data_files:
- split: train
path: tro_Latn/*_keep.jsonl
- config_name: ttj_Latn
data_files:
- split: train
path: ttj_Latn/*_keep.jsonl
- config_name: waj_Latn
data_files:
- split: train
path: waj_Latn/*_keep.jsonl
- config_name: raj_Deva
data_files:
- split: train
path: raj_Deva/*_keep.jsonl
- config_name: ian_Latn
data_files:
- split: train
path: ian_Latn/*_keep.jsonl
- config_name: ivb_Latn
data_files:
- split: train
path: ivb_Latn/*_keep.jsonl
- config_name: kmm_Latn
data_files:
- split: train
path: kmm_Latn/*_keep.jsonl
- config_name: kzf_Latn
data_files:
- split: train
path: kzf_Latn/*_keep.jsonl
- config_name: maw_Latn
data_files:
- split: train
path: maw_Latn/*_keep.jsonl
- config_name: tuv_Latn
data_files:
- split: train
path: tuv_Latn/*_keep.jsonl
- config_name: whk_Latn
data_files:
- split: train
path: whk_Latn/*_keep.jsonl
- config_name: zsm_Arab
data_files:
- split: train
path: zsm_Arab/*_keep.jsonl
- config_name: aph_Deva
data_files:
- split: train
path: aph_Deva/*_keep.jsonl
- config_name: anm_Latn
data_files:
- split: train
path: anm_Latn/*_keep.jsonl
- config_name: mny_Latn
data_files:
- split: train
path: mny_Latn/*_keep.jsonl
- config_name: bug_Bugi
data_files:
- split: train
path: bug_Bugi/*_keep.jsonl
- config_name: trp_Latn
data_files:
- split: train
path: trp_Latn/*_keep.jsonl
- config_name: are_Latn
data_files:
- split: train
path: are_Latn/*_keep.jsonl
- config_name: esi_Latn
data_files:
- split: train
path: esi_Latn/*_keep.jsonl
- config_name: mhi_Latn
data_files:
- split: train
path: mhi_Latn/*_keep.jsonl
- config_name: obo_Latn
data_files:
- split: train
path: obo_Latn/*_keep.jsonl
- config_name: bwu_Latn
data_files:
- split: train
path: bwu_Latn/*_keep.jsonl
- config_name: lwo_Latn
data_files:
- split: train
path: lwo_Latn/*_keep.jsonl
- config_name: mrg_Latn
data_files:
- split: train
path: mrg_Latn/*_keep.jsonl
- config_name: old_Latn
data_files:
- split: train
path: old_Latn/*_keep.jsonl
- config_name: zav_Latn
data_files:
- split: train
path: zav_Latn/*_keep.jsonl
- config_name: snk_Latn
data_files:
- split: train
path: snk_Latn/*_keep.jsonl
- config_name: daa_Latn
data_files:
- split: train
path: daa_Latn/*_keep.jsonl
- config_name: fuq_Latn
data_files:
- split: train
path: fuq_Latn/*_keep.jsonl
- config_name: lud_Latn
data_files:
- split: train
path: lud_Latn/*_keep.jsonl
- config_name: nlg_Latn
data_files:
- split: train
path: nlg_Latn/*_keep.jsonl
- config_name: sck_Deva
data_files:
- split: train
path: sck_Deva/*_keep.jsonl
- config_name: tbl_Latn
data_files:
- split: train
path: tbl_Latn/*_keep.jsonl
- config_name: khb_Talu
data_files:
- split: train
path: khb_Talu/*_keep.jsonl
- config_name: tpu_Khmr
data_files:
- split: train
path: tpu_Khmr/*_keep.jsonl
- config_name: fue_Latn
data_files:
- split: train
path: fue_Latn/*_keep.jsonl
- config_name: tmd_Latn
data_files:
- split: train
path: tmd_Latn/*_keep.jsonl
- config_name: whg_Latn
data_files:
- split: train
path: whg_Latn/*_keep.jsonl
- config_name: blt_Latn
data_files:
- split: train
path: blt_Latn/*_keep.jsonl
- config_name: gqr_Latn
data_files:
- split: train
path: gqr_Latn/*_keep.jsonl
- config_name: kub_Latn
data_files:
- split: train
path: kub_Latn/*_keep.jsonl
- config_name: ots_Latn
data_files:
- split: train
path: ots_Latn/*_keep.jsonl
- config_name: tpp_Latn
data_files:
- split: train
path: tpp_Latn/*_keep.jsonl
- config_name: var_Latn
data_files:
- split: train
path: var_Latn/*_keep.jsonl
- config_name: hbb_Latn
data_files:
- split: train
path: hbb_Latn/*_keep.jsonl
- config_name: krr_Khmr
data_files:
- split: train
path: krr_Khmr/*_keep.jsonl
- config_name: zxx_Zyyy
data_files:
- split: train
path: zxx_Zyyy/*_keep.jsonl
- config_name: alj_Latn
data_files:
- split: train
path: alj_Latn/*_keep.jsonl
- config_name: ebk_Latn
data_files:
- split: train
path: ebk_Latn/*_keep.jsonl
- config_name: goa_Latn
data_files:
- split: train
path: goa_Latn/*_keep.jsonl
- config_name: hav_Latn
data_files:
- split: train
path: hav_Latn/*_keep.jsonl
- config_name: kmk_Latn
data_files:
- split: train
path: kmk_Latn/*_keep.jsonl
- config_name: kqo_Latn
data_files:
- split: train
path: kqo_Latn/*_keep.jsonl
- config_name: muy_Latn
data_files:
- split: train
path: muy_Latn/*_keep.jsonl
- config_name: njb_Latn
data_files:
- split: train
path: njb_Latn/*_keep.jsonl
- config_name: snw_Latn
data_files:
- split: train
path: snw_Latn/*_keep.jsonl
- config_name: wib_Latn
data_files:
- split: train
path: wib_Latn/*_keep.jsonl
- config_name: shn_Latn
data_files:
- split: train
path: shn_Latn/*_keep.jsonl
- config_name: tkr_Latn
data_files:
- split: train
path: tkr_Latn/*_keep.jsonl
- config_name: ksp_Latn
data_files:
- split: train
path: ksp_Latn/*_keep.jsonl
- config_name: lip_Latn
data_files:
- split: train
path: lip_Latn/*_keep.jsonl
- config_name: mog_Latn
data_files:
- split: train
path: mog_Latn/*_keep.jsonl
- config_name: nla_Latn
data_files:
- split: train
path: nla_Latn/*_keep.jsonl
- config_name: pss_Latn
data_files:
- split: train
path: pss_Latn/*_keep.jsonl
- config_name: zad_Latn
data_files:
- split: train
path: zad_Latn/*_keep.jsonl
- config_name: awi_Latn
data_files:
- split: train
path: awi_Latn/*_keep.jsonl
- config_name: frd_Latn
data_files:
- split: train
path: frd_Latn/*_keep.jsonl
- config_name: kei_Latn
data_files:
- split: train
path: kei_Latn/*_keep.jsonl
- config_name: kmd_Latn
data_files:
- split: train
path: kmd_Latn/*_keep.jsonl
- config_name: mim_Latn
data_files:
- split: train
path: mim_Latn/*_keep.jsonl
- config_name: got_Latn
data_files:
- split: train
path: got_Latn/*_keep.jsonl
- config_name: lbe_Latn
data_files:
- split: train
path: lbe_Latn/*_keep.jsonl
- config_name: adi_Latn
data_files:
- split: train
path: adi_Latn/*_keep.jsonl
- config_name: any_Latn
data_files:
- split: train
path: any_Latn/*_keep.jsonl
- config_name: arp_Latn
data_files:
- split: train
path: arp_Latn/*_keep.jsonl
- config_name: biu_Latn
data_files:
- split: train
path: biu_Latn/*_keep.jsonl
- config_name: bvc_Latn
data_files:
- split: train
path: bvc_Latn/*_keep.jsonl
- config_name: ghs_Latn
data_files:
- split: train
path: ghs_Latn/*_keep.jsonl
- config_name: ttq_Latn
data_files:
- split: train
path: ttq_Latn/*_keep.jsonl
- config_name: vmk_Latn
data_files:
- split: train
path: vmk_Latn/*_keep.jsonl
- config_name: zpj_Latn
data_files:
- split: train
path: zpj_Latn/*_keep.jsonl
- config_name: crl_Cans
data_files:
- split: train
path: crl_Cans/*_keep.jsonl
- config_name: dtb_Latn
data_files:
- split: train
path: dtb_Latn/*_keep.jsonl
- config_name: dug_Latn
data_files:
- split: train
path: dug_Latn/*_keep.jsonl
- config_name: kam_Latn
data_files:
- split: train
path: kam_Latn/*_keep.jsonl
- config_name: nmo_Latn
data_files:
- split: train
path: nmo_Latn/*_keep.jsonl
- config_name: qva_Latn
data_files:
- split: train
path: qva_Latn/*_keep.jsonl
- config_name: rhg_Latn
data_files:
- split: train
path: rhg_Latn/*_keep.jsonl
- config_name: taw_Latn
data_files:
- split: train
path: taw_Latn/*_keep.jsonl
- config_name: tui_Latn
data_files:
- split: train
path: tui_Latn/*_keep.jsonl
- config_name: uth_Latn
data_files:
- split: train
path: uth_Latn/*_keep.jsonl
- config_name: wap_Latn
data_files:
- split: train
path: wap_Latn/*_keep.jsonl
- config_name: zpg_Latn
data_files:
- split: train
path: zpg_Latn/*_keep.jsonl
- config_name: brb_Khmr
data_files:
- split: train
path: brb_Khmr/*_keep.jsonl
- config_name: dty_Latn
data_files:
- split: train
path: dty_Latn/*_keep.jsonl
- config_name: syr_Latn
data_files:
- split: train
path: syr_Latn/*_keep.jsonl
- config_name: bih_Arab
data_files:
- split: train
path: bih_Arab/*_keep.jsonl
- config_name: bqj_Latn
data_files:
- split: train
path: bqj_Latn/*_keep.jsonl
- config_name: cjv_Latn
data_files:
- split: train
path: cjv_Latn/*_keep.jsonl
- config_name: cnw_Latn
data_files:
- split: train
path: cnw_Latn/*_keep.jsonl
- config_name: kgf_Latn
data_files:
- split: train
path: kgf_Latn/*_keep.jsonl
- config_name: mfh_Latn
data_files:
- split: train
path: mfh_Latn/*_keep.jsonl
- config_name: mns_Cyrl
data_files:
- split: train
path: mns_Cyrl/*_keep.jsonl
- config_name: nnw_Latn
data_files:
- split: train
path: nnw_Latn/*_keep.jsonl
- config_name: ntp_Latn
data_files:
- split: train
path: ntp_Latn/*_keep.jsonl
- config_name: oto_Latn
data_files:
- split: train
path: oto_Latn/*_keep.jsonl
- config_name: dks_Latn
data_files:
- split: train
path: dks_Latn/*_keep.jsonl
- config_name: mpt_Latn
data_files:
- split: train
path: mpt_Latn/*_keep.jsonl
- config_name: nmw_Latn
data_files:
- split: train
path: nmw_Latn/*_keep.jsonl
- config_name: sba_Latn
data_files:
- split: train
path: sba_Latn/*_keep.jsonl
- config_name: sfw_Latn
data_files:
- split: train
path: sfw_Latn/*_keep.jsonl
- config_name: shu_Arab
data_files:
- split: train
path: shu_Arab/*_keep.jsonl
- config_name: ury_Latn
data_files:
- split: train
path: ury_Latn/*_keep.jsonl
- config_name: yrl_Latn
data_files:
- split: train
path: yrl_Latn/*_keep.jsonl
- config_name: arc_Latn
data_files:
- split: train
path: arc_Latn/*_keep.jsonl
- config_name: cmo_Khmr
data_files:
- split: train
path: cmo_Khmr/*_keep.jsonl
- config_name: xkg_Latn
data_files:
- split: train
path: xkg_Latn/*_keep.jsonl
- config_name: iqw_Latn
data_files:
- split: train
path: iqw_Latn/*_keep.jsonl
- config_name: kpj_Latn
data_files:
- split: train
path: kpj_Latn/*_keep.jsonl
- config_name: nfa_Latn
data_files:
- split: train
path: nfa_Latn/*_keep.jsonl
- config_name: nph_Latn
data_files:
- split: train
path: nph_Latn/*_keep.jsonl
- config_name: nre_Latn
data_files:
- split: train
path: nre_Latn/*_keep.jsonl
- config_name: wsg_Telu
data_files:
- split: train
path: wsg_Telu/*_keep.jsonl
- config_name: xsu_Latn
data_files:
- split: train
path: xsu_Latn/*_keep.jsonl
- config_name: nut_Latn
data_files:
- split: train
path: nut_Latn/*_keep.jsonl
- config_name: bmk_Latn
data_files:
- split: train
path: bmk_Latn/*_keep.jsonl
- config_name: cdf_Latn
data_files:
- split: train
path: cdf_Latn/*_keep.jsonl
- config_name: enl_Latn
data_files:
- split: train
path: enl_Latn/*_keep.jsonl
- config_name: fuv_Arab
data_files:
- split: train
path: fuv_Arab/*_keep.jsonl
- config_name: hop_Latn
data_files:
- split: train
path: hop_Latn/*_keep.jsonl
- config_name: ife_Latn
data_files:
- split: train
path: ife_Latn/*_keep.jsonl
- config_name: lnd_Latn
data_files:
- split: train
path: lnd_Latn/*_keep.jsonl
- config_name: mrv_Latn
data_files:
- split: train
path: mrv_Latn/*_keep.jsonl
- config_name: nzm_Latn
data_files:
- split: train
path: nzm_Latn/*_keep.jsonl
- config_name: tem_Latn
data_files:
- split: train
path: tem_Latn/*_keep.jsonl
- config_name: dwr_Ethi
data_files:
- split: train
path: dwr_Ethi/*_keep.jsonl
- config_name: gwc_Arab
data_files:
- split: train
path: gwc_Arab/*_keep.jsonl
- config_name: udi_Cyrl
data_files:
- split: train
path: udi_Cyrl/*_keep.jsonl
- config_name: zha_Hani
data_files:
- split: train
path: zha_Hani/*_keep.jsonl
- config_name: ajz_Latn
data_files:
- split: train
path: ajz_Latn/*_keep.jsonl
- config_name: kru_Deva
data_files:
- split: train
path: kru_Deva/*_keep.jsonl
- config_name: qvo_Latn
data_files:
- split: train
path: qvo_Latn/*_keep.jsonl
- config_name: sld_Latn
data_files:
- split: train
path: sld_Latn/*_keep.jsonl
- config_name: yup_Latn
data_files:
- split: train
path: yup_Latn/*_keep.jsonl
- config_name: anp_Latn
data_files:
- split: train
path: anp_Latn/*_keep.jsonl
- config_name: kwu_Latn
data_files:
- split: train
path: kwu_Latn/*_keep.jsonl
- config_name: szy_Hani
data_files:
- split: train
path: szy_Hani/*_keep.jsonl
- config_name: tdg_Deva
data_files:
- split: train
path: tdg_Deva/*_keep.jsonl
- config_name: ade_Latn
data_files:
- split: train
path: ade_Latn/*_keep.jsonl
- config_name: bcw_Latn
data_files:
- split: train
path: bcw_Latn/*_keep.jsonl
- config_name: bgz_Latn
data_files:
- split: train
path: bgz_Latn/*_keep.jsonl
- config_name: cul_Latn
data_files:
- split: train
path: cul_Latn/*_keep.jsonl
- config_name: fmu_Deva
data_files:
- split: train
path: fmu_Deva/*_keep.jsonl
- config_name: gvc_Latn
data_files:
- split: train
path: gvc_Latn/*_keep.jsonl
- config_name: ktj_Latn
data_files:
- split: train
path: ktj_Latn/*_keep.jsonl
- config_name: lis_Lisu
data_files:
- split: train
path: lis_Lisu/*_keep.jsonl
- config_name: mev_Latn
data_files:
- split: train
path: mev_Latn/*_keep.jsonl
- config_name: msc_Latn
data_files:
- split: train
path: msc_Latn/*_keep.jsonl
- config_name: mta_Latn
data_files:
- split: train
path: mta_Latn/*_keep.jsonl
- config_name: nbc_Latn
data_files:
- split: train
path: nbc_Latn/*_keep.jsonl
- config_name: ncq_Laoo
data_files:
- split: train
path: ncq_Laoo/*_keep.jsonl
- config_name: way_Latn
data_files:
- split: train
path: way_Latn/*_keep.jsonl
- config_name: wlv_Latn
data_files:
- split: train
path: wlv_Latn/*_keep.jsonl
- config_name: wlx_Latn
data_files:
- split: train
path: wlx_Latn/*_keep.jsonl
- config_name: jmx_Latn
data_files:
- split: train
path: jmx_Latn/*_keep.jsonl
- config_name: pli_Latn
data_files:
- split: train
path: pli_Latn/*_keep.jsonl
- config_name: tkr_Cyrl
data_files:
- split: train
path: tkr_Cyrl/*_keep.jsonl
- config_name: wni_Latn
data_files:
- split: train
path: wni_Latn/*_keep.jsonl
- config_name: aim_Latn
data_files:
- split: train
path: aim_Latn/*_keep.jsonl
- config_name: byv_Latn
data_files:
- split: train
path: byv_Latn/*_keep.jsonl
- config_name: cgg_Latn
data_files:
- split: train
path: cgg_Latn/*_keep.jsonl
- config_name: fud_Latn
data_files:
- split: train
path: fud_Latn/*_keep.jsonl
- config_name: jmc_Latn
data_files:
- split: train
path: jmc_Latn/*_keep.jsonl
- config_name: ker_Latn
data_files:
- split: train
path: ker_Latn/*_keep.jsonl
- config_name: kia_Latn
data_files:
- split: train
path: kia_Latn/*_keep.jsonl
- config_name: lom_Latn
data_files:
- split: train
path: lom_Latn/*_keep.jsonl
- config_name: ojb_Latn
data_files:
- split: train
path: ojb_Latn/*_keep.jsonl
- config_name: tnc_Latn
data_files:
- split: train
path: tnc_Latn/*_keep.jsonl
- config_name: bwx_Latn
data_files:
- split: train
path: bwx_Latn/*_keep.jsonl
- config_name: cre_Cans
data_files:
- split: train
path: cre_Cans/*_keep.jsonl
- config_name: skr_Latn
data_files:
- split: train
path: skr_Latn/*_keep.jsonl
- config_name: xal_Latn
data_files:
- split: train
path: xal_Latn/*_keep.jsonl
- config_name: gjn_Latn
data_files:
- split: train
path: gjn_Latn/*_keep.jsonl
- config_name: guq_Latn
data_files:
- split: train
path: guq_Latn/*_keep.jsonl
- config_name: lap_Latn
data_files:
- split: train
path: lap_Latn/*_keep.jsonl
- config_name: mip_Latn
data_files:
- split: train
path: mip_Latn/*_keep.jsonl
- config_name: mkl_Latn
data_files:
- split: train
path: mkl_Latn/*_keep.jsonl
- config_name: mtg_Latn
data_files:
- split: train
path: mtg_Latn/*_keep.jsonl
- config_name: mzk_Latn
data_files:
- split: train
path: mzk_Latn/*_keep.jsonl
- config_name: njm_Latn
data_files:
- split: train
path: njm_Latn/*_keep.jsonl
- config_name: nse_Latn
data_files:
- split: train
path: nse_Latn/*_keep.jsonl
- config_name: pne_Latn
data_files:
- split: train
path: pne_Latn/*_keep.jsonl
- config_name: pww_Thai
data_files:
- split: train
path: pww_Thai/*_keep.jsonl
- config_name: zam_Latn
data_files:
- split: train
path: zam_Latn/*_keep.jsonl
- config_name: zca_Latn
data_files:
- split: train
path: zca_Latn/*_keep.jsonl
- config_name: the_Deva
data_files:
- split: train
path: the_Deva/*_keep.jsonl
- config_name: dis_Latn
data_files:
- split: train
path: dis_Latn/*_keep.jsonl
- config_name: lgl_Latn
data_files:
- split: train
path: lgl_Latn/*_keep.jsonl
- config_name: mbd_Latn
data_files:
- split: train
path: mbd_Latn/*_keep.jsonl
- config_name: mda_Latn
data_files:
- split: train
path: mda_Latn/*_keep.jsonl
- config_name: mhy_Latn
data_files:
- split: train
path: mhy_Latn/*_keep.jsonl
- config_name: nst_Latn
data_files:
- split: train
path: nst_Latn/*_keep.jsonl
- config_name: pkb_Latn
data_files:
- split: train
path: pkb_Latn/*_keep.jsonl
- config_name: pmq_Latn
data_files:
- split: train
path: pmq_Latn/*_keep.jsonl
- config_name: poe_Latn
data_files:
- split: train
path: poe_Latn/*_keep.jsonl
- config_name: swk_Latn
data_files:
- split: train
path: swk_Latn/*_keep.jsonl
- config_name: trs_Latn
data_files:
- split: train
path: trs_Latn/*_keep.jsonl
- config_name: vot_Latn
data_files:
- split: train
path: vot_Latn/*_keep.jsonl
- config_name: yam_Latn
data_files:
- split: train
path: yam_Latn/*_keep.jsonl
- config_name: zpq_Latn
data_files:
- split: train
path: zpq_Latn/*_keep.jsonl
- config_name: atq_Latn
data_files:
- split: train
path: atq_Latn/*_keep.jsonl
- config_name: bav_Latn
data_files:
- split: train
path: bav_Latn/*_keep.jsonl
- config_name: cho_Latn
data_files:
- split: train
path: cho_Latn/*_keep.jsonl
- config_name: chq_Latn
data_files:
- split: train
path: chq_Latn/*_keep.jsonl
- config_name: cjo_Latn
data_files:
- split: train
path: cjo_Latn/*_keep.jsonl
- config_name: due_Latn
data_files:
- split: train
path: due_Latn/*_keep.jsonl
- config_name: duo_Latn
data_files:
- split: train
path: duo_Latn/*_keep.jsonl
- config_name: etu_Latn
data_files:
- split: train
path: etu_Latn/*_keep.jsonl
- config_name: ifa_Latn
data_files:
- split: train
path: ifa_Latn/*_keep.jsonl
- config_name: kby_Latn
data_files:
- split: train
path: kby_Latn/*_keep.jsonl
- config_name: khq_Latn
data_files:
- split: train
path: khq_Latn/*_keep.jsonl
- config_name: kno_Latn
data_files:
- split: train
path: kno_Latn/*_keep.jsonl
- config_name: lem_Latn
data_files:
- split: train
path: lem_Latn/*_keep.jsonl
- config_name: mcn_Latn
data_files:
- split: train
path: mcn_Latn/*_keep.jsonl
- config_name: mkz_Latn
data_files:
- split: train
path: mkz_Latn/*_keep.jsonl
- config_name: mnb_Latn
data_files:
- split: train
path: mnb_Latn/*_keep.jsonl
- config_name: myb_Latn
data_files:
- split: train
path: myb_Latn/*_keep.jsonl
- config_name: niy_Latn
data_files:
- split: train
path: niy_Latn/*_keep.jsonl
- config_name: pxm_Latn
data_files:
- split: train
path: pxm_Latn/*_keep.jsonl
- config_name: rub_Latn
data_files:
- split: train
path: rub_Latn/*_keep.jsonl
- config_name: sju_Latn
data_files:
- split: train
path: sju_Latn/*_keep.jsonl
- config_name: tmc_Latn
data_files:
- split: train
path: tmc_Latn/*_keep.jsonl
- config_name: tpm_Latn
data_files:
- split: train
path: tpm_Latn/*_keep.jsonl
- config_name: wob_Latn
data_files:
- split: train
path: wob_Latn/*_keep.jsonl
- config_name: nxa_Latn
data_files:
- split: train
path: nxa_Latn/*_keep.jsonl
- config_name: pnt_Latn
data_files:
- split: train
path: pnt_Latn/*_keep.jsonl
- config_name: sdk_Latn
data_files:
- split: train
path: sdk_Latn/*_keep.jsonl
- config_name: tcy_Latn
data_files:
- split: train
path: tcy_Latn/*_keep.jsonl
- config_name: bhz_Latn
data_files:
- split: train
path: bhz_Latn/*_keep.jsonl
- config_name: bth_Latn
data_files:
- split: train
path: bth_Latn/*_keep.jsonl
- config_name: crj_Cans
data_files:
- split: train
path: crj_Cans/*_keep.jsonl
- config_name: eka_Latn
data_files:
- split: train
path: eka_Latn/*_keep.jsonl
- config_name: khy_Latn
data_files:
- split: train
path: khy_Latn/*_keep.jsonl
- config_name: led_Latn
data_files:
- split: train
path: led_Latn/*_keep.jsonl
- config_name: met_Latn
data_files:
- split: train
path: met_Latn/*_keep.jsonl
- config_name: mgo_Latn
data_files:
- split: train
path: mgo_Latn/*_keep.jsonl
- config_name: mus_Latn
data_files:
- split: train
path: mus_Latn/*_keep.jsonl
- config_name: mzm_Latn
data_files:
- split: train
path: mzm_Latn/*_keep.jsonl
- config_name: naw_Latn
data_files:
- split: train
path: naw_Latn/*_keep.jsonl
- config_name: sri_Latn
data_files:
- split: train
path: sri_Latn/*_keep.jsonl
- config_name: tbk_Latn
data_files:
- split: train
path: tbk_Latn/*_keep.jsonl
- config_name: xbr_Latn
data_files:
- split: train
path: xbr_Latn/*_keep.jsonl
- config_name: awa_Latn
data_files:
- split: train
path: awa_Latn/*_keep.jsonl
- config_name: tnl_Latn
data_files:
- split: train
path: tnl_Latn/*_keep.jsonl
- config_name: tnn_Latn
data_files:
- split: train
path: tnn_Latn/*_keep.jsonl
- config_name: awb_Latn
data_files:
- split: train
path: awb_Latn/*_keep.jsonl
- config_name: bsc_Latn
data_files:
- split: train
path: bsc_Latn/*_keep.jsonl
- config_name: iri_Latn
data_files:
- split: train
path: iri_Latn/*_keep.jsonl
- config_name: kpq_Latn
data_files:
- split: train
path: kpq_Latn/*_keep.jsonl
- config_name: ktm_Latn
data_files:
- split: train
path: ktm_Latn/*_keep.jsonl
- config_name: loe_Latn
data_files:
- split: train
path: loe_Latn/*_keep.jsonl
- config_name: lun_Latn
data_files:
- split: train
path: lun_Latn/*_keep.jsonl
- config_name: mfg_Latn
data_files:
- split: train
path: mfg_Latn/*_keep.jsonl
- config_name: moa_Latn
data_files:
- split: train
path: moa_Latn/*_keep.jsonl
- config_name: ndj_Latn
data_files:
- split: train
path: ndj_Latn/*_keep.jsonl
- config_name: otn_Latn
data_files:
- split: train
path: otn_Latn/*_keep.jsonl
- config_name: sjo_Mong
data_files:
- split: train
path: sjo_Mong/*_keep.jsonl
- config_name: wat_Latn
data_files:
- split: train
path: wat_Latn/*_keep.jsonl
- config_name: bkm_Latn
data_files:
- split: train
path: bkm_Latn/*_keep.jsonl
- config_name: tuz_Latn
data_files:
- split: train
path: tuz_Latn/*_keep.jsonl
- config_name: ybi_Deva
data_files:
- split: train
path: ybi_Deva/*_keep.jsonl
- config_name: gbr_Latn
data_files:
- split: train
path: gbr_Latn/*_keep.jsonl
- config_name: gej_Latn
data_files:
- split: train
path: gej_Latn/*_keep.jsonl
- config_name: giz_Latn
data_files:
- split: train
path: giz_Latn/*_keep.jsonl
- config_name: gna_Latn
data_files:
- split: train
path: gna_Latn/*_keep.jsonl
- config_name: gwr_Latn
data_files:
- split: train
path: gwr_Latn/*_keep.jsonl
- config_name: kex_Deva
data_files:
- split: train
path: kex_Deva/*_keep.jsonl
- config_name: kle_Deva
data_files:
- split: train
path: kle_Deva/*_keep.jsonl
- config_name: knx_Latn
data_files:
- split: train
path: knx_Latn/*_keep.jsonl
- config_name: ktz_Latn
data_files:
- split: train
path: ktz_Latn/*_keep.jsonl
- config_name: kvj_Latn
data_files:
- split: train
path: kvj_Latn/*_keep.jsonl
- config_name: lob_Latn
data_files:
- split: train
path: lob_Latn/*_keep.jsonl
- config_name: mhw_Latn
data_files:
- split: train
path: mhw_Latn/*_keep.jsonl
- config_name: mph_Latn
data_files:
- split: train
path: mph_Latn/*_keep.jsonl
- config_name: pmx_Latn
data_files:
- split: train
path: pmx_Latn/*_keep.jsonl
- config_name: qus_Latn
data_files:
- split: train
path: qus_Latn/*_keep.jsonl
- config_name: tcc_Latn
data_files:
- split: train
path: tcc_Latn/*_keep.jsonl
- config_name: ang_Runr
data_files:
- split: train
path: ang_Runr/*_keep.jsonl
- config_name: azn_Latn
data_files:
- split: train
path: azn_Latn/*_keep.jsonl
- config_name: bra_Deva
data_files:
- split: train
path: bra_Deva/*_keep.jsonl
- config_name: gal_Latn
data_files:
- split: train
path: gal_Latn/*_keep.jsonl
- config_name: jra_Khmr
data_files:
- split: train
path: jra_Khmr/*_keep.jsonl
- config_name: nlv_Latn
data_files:
- split: train
path: nlv_Latn/*_keep.jsonl
- config_name: rki_Latn
data_files:
- split: train
path: rki_Latn/*_keep.jsonl
- config_name: stk_Latn
data_files:
- split: train
path: stk_Latn/*_keep.jsonl
- config_name: yux_Cyrl
data_files:
- split: train
path: yux_Cyrl/*_keep.jsonl
- config_name: dip_Latn
data_files:
- split: train
path: dip_Latn/*_keep.jsonl
- config_name: ivv_Latn
data_files:
- split: train
path: ivv_Latn/*_keep.jsonl
- config_name: kdj_Latn
data_files:
- split: train
path: kdj_Latn/*_keep.jsonl
- config_name: kff_Telu
data_files:
- split: train
path: kff_Telu/*_keep.jsonl
- config_name: kyu_Kali
data_files:
- split: train
path: kyu_Kali/*_keep.jsonl
- config_name: mnf_Latn
data_files:
- split: train
path: mnf_Latn/*_keep.jsonl
- config_name: moc_Latn
data_files:
- split: train
path: moc_Latn/*_keep.jsonl
- config_name: mpg_Latn
data_files:
- split: train
path: mpg_Latn/*_keep.jsonl
- config_name: mua_Latn
data_files:
- split: train
path: mua_Latn/*_keep.jsonl
- config_name: nma_Latn
data_files:
- split: train
path: nma_Latn/*_keep.jsonl
- config_name: rim_Latn
data_files:
- split: train
path: rim_Latn/*_keep.jsonl
- config_name: rjs_Deva
data_files:
- split: train
path: rjs_Deva/*_keep.jsonl
- config_name: rnl_Latn
data_files:
- split: train
path: rnl_Latn/*_keep.jsonl
- config_name: ssx_Latn
data_files:
- split: train
path: ssx_Latn/*_keep.jsonl
- config_name: tig_Ethi
data_files:
- split: train
path: tig_Ethi/*_keep.jsonl
- config_name: urw_Latn
data_files:
- split: train
path: urw_Latn/*_keep.jsonl
- config_name: idt_Latn
data_files:
- split: train
path: idt_Latn/*_keep.jsonl
- config_name: mrn_Latn
data_files:
- split: train
path: mrn_Latn/*_keep.jsonl
- config_name: oki_Latn
data_files:
- split: train
path: oki_Latn/*_keep.jsonl
- config_name: bbk_Latn
data_files:
- split: train
path: bbk_Latn/*_keep.jsonl
- config_name: bea_Latn
data_files:
- split: train
path: bea_Latn/*_keep.jsonl
- config_name: bkq_Latn
data_files:
- split: train
path: bkq_Latn/*_keep.jsonl
- config_name: btt_Latn
data_files:
- split: train
path: btt_Latn/*_keep.jsonl
- config_name: cou_Latn
data_files:
- split: train
path: cou_Latn/*_keep.jsonl
- config_name: ddn_Latn
data_files:
- split: train
path: ddn_Latn/*_keep.jsonl
- config_name: did_Latn
data_files:
- split: train
path: did_Latn/*_keep.jsonl
- config_name: diu_Latn
data_files:
- split: train
path: diu_Latn/*_keep.jsonl
- config_name: ilb_Latn
data_files:
- split: train
path: ilb_Latn/*_keep.jsonl
- config_name: ksj_Latn
data_files:
- split: train
path: ksj_Latn/*_keep.jsonl
- config_name: lhi_Latn
data_files:
- split: train
path: lhi_Latn/*_keep.jsonl
- config_name: ndi_Latn
data_files:
- split: train
path: ndi_Latn/*_keep.jsonl
- config_name: ngb_Latn
data_files:
- split: train
path: ngb_Latn/*_keep.jsonl
- config_name: njz_Latn
data_files:
- split: train
path: njz_Latn/*_keep.jsonl
- config_name: nmz_Latn
data_files:
- split: train
path: nmz_Latn/*_keep.jsonl
- config_name: npo_Latn
data_files:
- split: train
path: npo_Latn/*_keep.jsonl
- config_name: nwb_Latn
data_files:
- split: train
path: nwb_Latn/*_keep.jsonl
- config_name: pbi_Latn
data_files:
- split: train
path: pbi_Latn/*_keep.jsonl
- config_name: sbd_Latn
data_files:
- split: train
path: sbd_Latn/*_keep.jsonl
- config_name: soe_Latn
data_files:
- split: train
path: soe_Latn/*_keep.jsonl
- config_name: tpw_Latn
data_files:
- split: train
path: tpw_Latn/*_keep.jsonl
- config_name: twb_Latn
data_files:
- split: train
path: twb_Latn/*_keep.jsonl
- config_name: bfn_Latn
data_files:
- split: train
path: bfn_Latn/*_keep.jsonl
- config_name: boz_Latn
data_files:
- split: train
path: boz_Latn/*_keep.jsonl
- config_name: lwl_Thai
data_files:
- split: train
path: lwl_Thai/*_keep.jsonl
- config_name: psp_Latn
data_files:
- split: train
path: psp_Latn/*_keep.jsonl
- config_name: sgd_Latn
data_files:
- split: train
path: sgd_Latn/*_keep.jsonl
- config_name: ymp_Latn
data_files:
- split: train
path: ymp_Latn/*_keep.jsonl
- config_name: bkl_Latn
data_files:
- split: train
path: bkl_Latn/*_keep.jsonl
- config_name: bkv_Latn
data_files:
- split: train
path: bkv_Latn/*_keep.jsonl
- config_name: csk_Latn
data_files:
- split: train
path: csk_Latn/*_keep.jsonl
- config_name: enx_Latn
data_files:
- split: train
path: enx_Latn/*_keep.jsonl
- config_name: ess_Latn
data_files:
- split: train
path: ess_Latn/*_keep.jsonl
- config_name: gof_Ethi
data_files:
- split: train
path: gof_Ethi/*_keep.jsonl
- config_name: gud_Latn
data_files:
- split: train
path: gud_Latn/*_keep.jsonl
- config_name: guu_Latn
data_files:
- split: train
path: guu_Latn/*_keep.jsonl
- config_name: kdh_Latn
data_files:
- split: train
path: kdh_Latn/*_keep.jsonl
- config_name: krx_Latn
data_files:
- split: train
path: krx_Latn/*_keep.jsonl
- config_name: kzn_Latn
data_files:
- split: train
path: kzn_Latn/*_keep.jsonl
- config_name: maf_Latn
data_files:
- split: train
path: maf_Latn/*_keep.jsonl
- config_name: max_Latn
data_files:
- split: train
path: max_Latn/*_keep.jsonl
- config_name: mgc_Latn
data_files:
- split: train
path: mgc_Latn/*_keep.jsonl
- config_name: mvn_Latn
data_files:
- split: train
path: mvn_Latn/*_keep.jsonl
- config_name: mwm_Latn
data_files:
- split: train
path: mwm_Latn/*_keep.jsonl
- config_name: nbe_Latn
data_files:
- split: train
path: nbe_Latn/*_keep.jsonl
- config_name: nlc_Latn
data_files:
- split: train
path: nlc_Latn/*_keep.jsonl
- config_name: nxd_Latn
data_files:
- split: train
path: nxd_Latn/*_keep.jsonl
- config_name: pse_Latn
data_files:
- split: train
path: pse_Latn/*_keep.jsonl
- config_name: sbs_Latn
data_files:
- split: train
path: sbs_Latn/*_keep.jsonl
- config_name: tlb_Latn
data_files:
- split: train
path: tlb_Latn/*_keep.jsonl
- config_name: wwa_Latn
data_files:
- split: train
path: wwa_Latn/*_keep.jsonl
- config_name: yim_Latn
data_files:
- split: train
path: yim_Latn/*_keep.jsonl
- config_name: bkx_Latn
data_files:
- split: train
path: bkx_Latn/*_keep.jsonl
- config_name: bze_Latn
data_files:
- split: train
path: bze_Latn/*_keep.jsonl
- config_name: chu_Latn
data_files:
- split: train
path: chu_Latn/*_keep.jsonl
- config_name: kbq_Latn
data_files:
- split: train
path: kbq_Latn/*_keep.jsonl
- config_name: kpy_Armn
data_files:
- split: train
path: kpy_Armn/*_keep.jsonl
- config_name: mry_Latn
data_files:
- split: train
path: mry_Latn/*_keep.jsonl
- config_name: tkd_Latn
data_files:
- split: train
path: tkd_Latn/*_keep.jsonl
- config_name: ybh_Deva
data_files:
- split: train
path: ybh_Deva/*_keep.jsonl
- config_name: abi_Latn
data_files:
- split: train
path: abi_Latn/*_keep.jsonl
- config_name: ayo_Latn
data_files:
- split: train
path: ayo_Latn/*_keep.jsonl
- config_name: bex_Latn
data_files:
- split: train
path: bex_Latn/*_keep.jsonl
- config_name: bom_Latn
data_files:
- split: train
path: bom_Latn/*_keep.jsonl
- config_name: bov_Latn
data_files:
- split: train
path: bov_Latn/*_keep.jsonl
- config_name: dhg_Latn
data_files:
- split: train
path: dhg_Latn/*_keep.jsonl
- config_name: jaa_Latn
data_files:
- split: train
path: jaa_Latn/*_keep.jsonl
- config_name: kqs_Latn
data_files:
- split: train
path: kqs_Latn/*_keep.jsonl
- config_name: kqy_Ethi
data_files:
- split: train
path: kqy_Ethi/*_keep.jsonl
- config_name: ksb_Latn
data_files:
- split: train
path: ksb_Latn/*_keep.jsonl
- config_name: ldn_Latn
data_files:
- split: train
path: ldn_Latn/*_keep.jsonl
- config_name: log_Latn
data_files:
- split: train
path: log_Latn/*_keep.jsonl
- config_name: lol_Latn
data_files:
- split: train
path: lol_Latn/*_keep.jsonl
- config_name: mfk_Latn
data_files:
- split: train
path: mfk_Latn/*_keep.jsonl
- config_name: mtj_Latn
data_files:
- split: train
path: mtj_Latn/*_keep.jsonl
- config_name: ndz_Latn
data_files:
- split: train
path: ndz_Latn/*_keep.jsonl
- config_name: ngc_Latn
data_files:
- split: train
path: ngc_Latn/*_keep.jsonl
- config_name: nio_Cyrl
data_files:
- split: train
path: nio_Cyrl/*_keep.jsonl
- config_name: nri_Latn
data_files:
- split: train
path: nri_Latn/*_keep.jsonl
- config_name: nuz_Latn
data_files:
- split: train
path: nuz_Latn/*_keep.jsonl
- config_name: pps_Latn
data_files:
- split: train
path: pps_Latn/*_keep.jsonl
- config_name: shk_Latn
data_files:
- split: train
path: shk_Latn/*_keep.jsonl
- config_name: tlj_Latn
data_files:
- split: train
path: tlj_Latn/*_keep.jsonl
- config_name: vut_Latn
data_files:
- split: train
path: vut_Latn/*_keep.jsonl
- config_name: xnn_Latn
data_files:
- split: train
path: xnn_Latn/*_keep.jsonl
- config_name: bcc_Arab
data_files:
- split: train
path: bcc_Arab/*_keep.jsonl
- config_name: cuh_Latn
data_files:
- split: train
path: cuh_Latn/*_keep.jsonl
- config_name: emp_Cyrl
data_files:
- split: train
path: emp_Cyrl/*_keep.jsonl
- config_name: hro_Latn
data_files:
- split: train
path: hro_Latn/*_keep.jsonl
- config_name: nco_Latn
data_files:
- split: train
path: nco_Latn/*_keep.jsonl
- config_name: nxl_Latn
data_files:
- split: train
path: nxl_Latn/*_keep.jsonl
- config_name: pex_Latn
data_files:
- split: train
path: pex_Latn/*_keep.jsonl
- config_name: tnp_Latn
data_files:
- split: train
path: tnp_Latn/*_keep.jsonl
- config_name: trv_Hani
data_files:
- split: train
path: trv_Hani/*_keep.jsonl
- config_name: vif_Latn
data_files:
- split: train
path: vif_Latn/*_keep.jsonl
- config_name: aha_Latn
data_files:
- split: train
path: aha_Latn/*_keep.jsonl
- config_name: avn_Latn
data_files:
- split: train
path: avn_Latn/*_keep.jsonl
- config_name: bmv_Latn
data_files:
- split: train
path: bmv_Latn/*_keep.jsonl
- config_name: chj_Latn
data_files:
- split: train
path: chj_Latn/*_keep.jsonl
- config_name: crt_Latn
data_files:
- split: train
path: crt_Latn/*_keep.jsonl
- config_name: dos_Latn
data_files:
- split: train
path: dos_Latn/*_keep.jsonl
- config_name: eto_Latn
data_files:
- split: train
path: eto_Latn/*_keep.jsonl
- config_name: gya_Latn
data_files:
- split: train
path: gya_Latn/*_keep.jsonl
- config_name: hlt_Latn
data_files:
- split: train
path: hlt_Latn/*_keep.jsonl
- config_name: jun_Orya
data_files:
- split: train
path: jun_Orya/*_keep.jsonl
- config_name: ktb_Ethi
data_files:
- split: train
path: ktb_Ethi/*_keep.jsonl
- config_name: lgm_Latn
data_files:
- split: train
path: lgm_Latn/*_keep.jsonl
- config_name: mnx_Latn
data_files:
- split: train
path: mnx_Latn/*_keep.jsonl
- config_name: mor_Latn
data_files:
- split: train
path: mor_Latn/*_keep.jsonl
- config_name: mug_Latn
data_files:
- split: train
path: mug_Latn/*_keep.jsonl
- config_name: nng_Latn
data_files:
- split: train
path: nng_Latn/*_keep.jsonl
- config_name: nnh_Latn
data_files:
- split: train
path: nnh_Latn/*_keep.jsonl
- config_name: nsa_Latn
data_files:
- split: train
path: nsa_Latn/*_keep.jsonl
- config_name: nyk_Latn
data_files:
- split: train
path: nyk_Latn/*_keep.jsonl
- config_name: otd_Latn
data_files:
- split: train
path: otd_Latn/*_keep.jsonl
- config_name: oym_Latn
data_files:
- split: train
path: oym_Latn/*_keep.jsonl
- config_name: phm_Latn
data_files:
- split: train
path: phm_Latn/*_keep.jsonl
- config_name: prq_Latn
data_files:
- split: train
path: prq_Latn/*_keep.jsonl
- config_name: saj_Latn
data_files:
- split: train
path: saj_Latn/*_keep.jsonl
- config_name: szb_Latn
data_files:
- split: train
path: szb_Latn/*_keep.jsonl
- config_name: tap_Latn
data_files:
- split: train
path: tap_Latn/*_keep.jsonl
- config_name: tnr_Latn
data_files:
- split: train
path: tnr_Latn/*_keep.jsonl
- config_name: tzl_Latn
data_files:
- split: train
path: tzl_Latn/*_keep.jsonl
- config_name: wew_Latn
data_files:
- split: train
path: wew_Latn/*_keep.jsonl
- config_name: yas_Latn
data_files:
- split: train
path: yas_Latn/*_keep.jsonl
- config_name: yuz_Latn
data_files:
- split: train
path: yuz_Latn/*_keep.jsonl
- config_name: avk_Cyrl
data_files:
- split: train
path: avk_Cyrl/*_keep.jsonl
- config_name: bat_Cyrl
data_files:
- split: train
path: bat_Cyrl/*_keep.jsonl
- config_name: baw_Latn
data_files:
- split: train
path: baw_Latn/*_keep.jsonl
- config_name: bwt_Latn
data_files:
- split: train
path: bwt_Latn/*_keep.jsonl
- config_name: bxa_Latn
data_files:
- split: train
path: bxa_Latn/*_keep.jsonl
- config_name: cjs_Latn
data_files:
- split: train
path: cjs_Latn/*_keep.jsonl
- config_name: csw_Cans
data_files:
- split: train
path: csw_Cans/*_keep.jsonl
- config_name: fli_Latn
data_files:
- split: train
path: fli_Latn/*_keep.jsonl
- config_name: kqr_Latn
data_files:
- split: train
path: kqr_Latn/*_keep.jsonl
- config_name: lan_Latn
data_files:
- split: train
path: lan_Latn/*_keep.jsonl
- config_name: lgr_Latn
data_files:
- split: train
path: lgr_Latn/*_keep.jsonl
- config_name: lhm_Deva
data_files:
- split: train
path: lhm_Deva/*_keep.jsonl
- config_name: mlk_Latn
data_files:
- split: train
path: mlk_Latn/*_keep.jsonl
- config_name: mve_Arab
data_files:
- split: train
path: mve_Arab/*_keep.jsonl
- config_name: nqo_Latn
data_files:
- split: train
path: nqo_Latn/*_keep.jsonl
- config_name: odk_Arab
data_files:
- split: train
path: odk_Arab/*_keep.jsonl
- config_name: olo_Cyrl
data_files:
- split: train
path: olo_Cyrl/*_keep.jsonl
- config_name: otq_Arab
data_files:
- split: train
path: otq_Arab/*_keep.jsonl
- config_name: pea_Latn
data_files:
- split: train
path: pea_Latn/*_keep.jsonl
- config_name: saq_Latn
data_files:
- split: train
path: saq_Latn/*_keep.jsonl
- config_name: sea_Latn
data_files:
- split: train
path: sea_Latn/*_keep.jsonl
- config_name: sps_Latn
data_files:
- split: train
path: sps_Latn/*_keep.jsonl
- config_name: tio_Latn
data_files:
- split: train
path: tio_Latn/*_keep.jsonl
- config_name: yin_Latn
data_files:
- split: train
path: yin_Latn/*_keep.jsonl
- config_name: adl_Latn
data_files:
- split: train
path: adl_Latn/*_keep.jsonl
- config_name: agw_Latn
data_files:
- split: train
path: agw_Latn/*_keep.jsonl
- config_name: ald_Latn
data_files:
- split: train
path: ald_Latn/*_keep.jsonl
- config_name: asg_Latn
data_files:
- split: train
path: asg_Latn/*_keep.jsonl
- config_name: bnj_Latn
data_files:
- split: train
path: bnj_Latn/*_keep.jsonl
- config_name: brx_Latn
data_files:
- split: train
path: brx_Latn/*_keep.jsonl
- config_name: bwi_Latn
data_files:
- split: train
path: bwi_Latn/*_keep.jsonl
- config_name: bwq_Latn
data_files:
- split: train
path: bwq_Latn/*_keep.jsonl
- config_name: dbq_Latn
data_files:
- split: train
path: dbq_Latn/*_keep.jsonl
- config_name: dow_Latn
data_files:
- split: train
path: dow_Latn/*_keep.jsonl
- config_name: fad_Latn
data_files:
- split: train
path: fad_Latn/*_keep.jsonl
- config_name: hoc_Latn
data_files:
- split: train
path: hoc_Latn/*_keep.jsonl
- config_name: kql_Latn
data_files:
- split: train
path: kql_Latn/*_keep.jsonl
- config_name: ksf_Latn
data_files:
- split: train
path: ksf_Latn/*_keep.jsonl
- config_name: kyu_Mymr
data_files:
- split: train
path: kyu_Mymr/*_keep.jsonl
- config_name: lea_Latn
data_files:
- split: train
path: lea_Latn/*_keep.jsonl
- config_name: mse_Latn
data_files:
- split: train
path: mse_Latn/*_keep.jsonl
- config_name: mzl_Latn
data_files:
- split: train
path: mzl_Latn/*_keep.jsonl
- config_name: nhd_Latn
data_files:
- split: train
path: nhd_Latn/*_keep.jsonl
- config_name: nnl_Latn
data_files:
- split: train
path: nnl_Latn/*_keep.jsonl
- config_name: nwx_Deva
data_files:
- split: train
path: nwx_Deva/*_keep.jsonl
- config_name: pbc_Latn
data_files:
- split: train
path: pbc_Latn/*_keep.jsonl
- config_name: pny_Latn
data_files:
- split: train
path: pny_Latn/*_keep.jsonl
- config_name: tul_Latn
data_files:
- split: train
path: tul_Latn/*_keep.jsonl
- config_name: aaa_Latn
data_files:
- split: train
path: aaa_Latn/*_keep.jsonl
- config_name: abc_Latn
data_files:
- split: train
path: abc_Latn/*_keep.jsonl
- config_name: agq_Latn
data_files:
- split: train
path: agq_Latn/*_keep.jsonl
- config_name: ags_Latn
data_files:
- split: train
path: ags_Latn/*_keep.jsonl
- config_name: azo_Latn
data_files:
- split: train
path: azo_Latn/*_keep.jsonl
- config_name: bag_Latn
data_files:
- split: train
path: bag_Latn/*_keep.jsonl
- config_name: bax_Latn
data_files:
- split: train
path: bax_Latn/*_keep.jsonl
- config_name: bce_Latn
data_files:
- split: train
path: bce_Latn/*_keep.jsonl
- config_name: bfm_Latn
data_files:
- split: train
path: bfm_Latn/*_keep.jsonl
- config_name: bgf_Latn
data_files:
- split: train
path: bgf_Latn/*_keep.jsonl
- config_name: bhs_Latn
data_files:
- split: train
path: bhs_Latn/*_keep.jsonl
- config_name: bkc_Latn
data_files:
- split: train
path: bkc_Latn/*_keep.jsonl
- config_name: bkh_Latn
data_files:
- split: train
path: bkh_Latn/*_keep.jsonl
- config_name: blk_Latn
data_files:
- split: train
path: blk_Latn/*_keep.jsonl
- config_name: bob_Latn
data_files:
- split: train
path: bob_Latn/*_keep.jsonl
- config_name: bqm_Latn
data_files:
- split: train
path: bqm_Latn/*_keep.jsonl
- config_name: bri_Latn
data_files:
- split: train
path: bri_Latn/*_keep.jsonl
- config_name: brv_Laoo
data_files:
- split: train
path: brv_Laoo/*_keep.jsonl
- config_name: buo_Latn
data_files:
- split: train
path: buo_Latn/*_keep.jsonl
- config_name: bya_Latn
data_files:
- split: train
path: bya_Latn/*_keep.jsonl
- config_name: chp_Cans
data_files:
- split: train
path: chp_Cans/*_keep.jsonl
- config_name: cim_Latn
data_files:
- split: train
path: cim_Latn/*_keep.jsonl
- config_name: clo_Latn
data_files:
- split: train
path: clo_Latn/*_keep.jsonl
- config_name: cuv_Latn
data_files:
- split: train
path: cuv_Latn/*_keep.jsonl
- config_name: dag_Arab
data_files:
- split: train
path: dag_Arab/*_keep.jsonl
- config_name: dmg_Latn
data_files:
- split: train
path: dmg_Latn/*_keep.jsonl
- config_name: dtr_Latn
data_files:
- split: train
path: dtr_Latn/*_keep.jsonl
- config_name: eee_Thai
data_files:
- split: train
path: eee_Thai/*_keep.jsonl
- config_name: ekm_Latn
data_files:
- split: train
path: ekm_Latn/*_keep.jsonl
- config_name: enc_Latn
data_files:
- split: train
path: enc_Latn/*_keep.jsonl
- config_name: fiu_Cyrl
data_files:
- split: train
path: fiu_Cyrl/*_keep.jsonl
- config_name: gbj_Orya
data_files:
- split: train
path: gbj_Orya/*_keep.jsonl
- config_name: gou_Latn
data_files:
- split: train
path: gou_Latn/*_keep.jsonl
- config_name: hao_Latn
data_files:
- split: train
path: hao_Latn/*_keep.jsonl
- config_name: hna_Latn
data_files:
- split: train
path: hna_Latn/*_keep.jsonl
- config_name: hre_Latn
data_files:
- split: train
path: hre_Latn/*_keep.jsonl
- config_name: isu_Latn
data_files:
- split: train
path: isu_Latn/*_keep.jsonl
- config_name: jgo_Latn
data_files:
- split: train
path: jgo_Latn/*_keep.jsonl
- config_name: kbx_Latn
data_files:
- split: train
path: kbx_Latn/*_keep.jsonl
- config_name: kvt_Latn
data_files:
- split: train
path: kvt_Latn/*_keep.jsonl
- config_name: kxp_Arab
data_files:
- split: train
path: kxp_Arab/*_keep.jsonl
- config_name: lfa_Latn
data_files:
- split: train
path: lfa_Latn/*_keep.jsonl
- config_name: lkb_Latn
data_files:
- split: train
path: lkb_Latn/*_keep.jsonl
- config_name: lns_Latn
data_files:
- split: train
path: lns_Latn/*_keep.jsonl
- config_name: loh_Latn
data_files:
- split: train
path: loh_Latn/*_keep.jsonl
- config_name: lrc_Latn
data_files:
- split: train
path: lrc_Latn/*_keep.jsonl
- config_name: lts_Latn
data_files:
- split: train
path: lts_Latn/*_keep.jsonl
- config_name: luy_Latn
data_files:
- split: train
path: luy_Latn/*_keep.jsonl
- config_name: mdr_Latn
data_files:
- split: train
path: mdr_Latn/*_keep.jsonl
- config_name: mfj_Latn
data_files:
- split: train
path: mfj_Latn/*_keep.jsonl
- config_name: mgg_Latn
data_files:
- split: train
path: mgg_Latn/*_keep.jsonl
- config_name: mgq_Latn
data_files:
- split: train
path: mgq_Latn/*_keep.jsonl
- config_name: mle_Latn
data_files:
- split: train
path: mle_Latn/*_keep.jsonl
- config_name: mlw_Latn
data_files:
- split: train
path: mlw_Latn/*_keep.jsonl
- config_name: mmu_Latn
data_files:
- split: train
path: mmu_Latn/*_keep.jsonl
- config_name: mne_Latn
data_files:
- split: train
path: mne_Latn/*_keep.jsonl
- config_name: muv_Mlym
data_files:
- split: train
path: muv_Mlym/*_keep.jsonl
- config_name: mxu_Latn
data_files:
- split: train
path: mxu_Latn/*_keep.jsonl
- config_name: nge_Latn
data_files:
- split: train
path: nge_Latn/*_keep.jsonl
- config_name: ngn_Latn
data_files:
- split: train
path: ngn_Latn/*_keep.jsonl
- config_name: njy_Latn
data_files:
- split: train
path: njy_Latn/*_keep.jsonl
- config_name: nsk_Cans
data_files:
- split: train
path: nsk_Cans/*_keep.jsonl
- config_name: nsk_Latn
data_files:
- split: train
path: nsk_Latn/*_keep.jsonl
- config_name: nwe_Latn
data_files:
- split: train
path: nwe_Latn/*_keep.jsonl
- config_name: nza_Latn
data_files:
- split: train
path: nza_Latn/*_keep.jsonl
- config_name: pcg_Mlym
data_files:
- split: train
path: pcg_Mlym/*_keep.jsonl
- config_name: pdu_Latn
data_files:
- split: train
path: pdu_Latn/*_keep.jsonl
- config_name: pnz_Latn
data_files:
- split: train
path: pnz_Latn/*_keep.jsonl
- config_name: pwg_Latn
data_files:
- split: train
path: pwg_Latn/*_keep.jsonl
- config_name: qaa_Latn
data_files:
- split: train
path: qaa_Latn/*_keep.jsonl
- config_name: shi_Arab
data_files:
- split: train
path: shi_Arab/*_keep.jsonl
- config_name: smn_Cyrl
data_files:
- split: train
path: smn_Cyrl/*_keep.jsonl
- config_name: snk_Arab
data_files:
- split: train
path: snk_Arab/*_keep.jsonl
- config_name: snl_Latn
data_files:
- split: train
path: snl_Latn/*_keep.jsonl
- config_name: sox_Latn
data_files:
- split: train
path: sox_Latn/*_keep.jsonl
- config_name: ssn_Latn
data_files:
- split: train
path: ssn_Latn/*_keep.jsonl
- config_name: syw_Deva
data_files:
- split: train
path: syw_Deva/*_keep.jsonl
- config_name: tay_Hani
data_files:
- split: train
path: tay_Hani/*_keep.jsonl
- config_name: tbj_Latn
data_files:
- split: train
path: tbj_Latn/*_keep.jsonl
- config_name: tdb_Deva
data_files:
- split: train
path: tdb_Deva/*_keep.jsonl
- config_name: thy_Thai
data_files:
- split: train
path: thy_Thai/*_keep.jsonl
- config_name: tnt_Latn
data_files:
- split: train
path: tnt_Latn/*_keep.jsonl
- config_name: tom_Latn
data_files:
- split: train
path: tom_Latn/*_keep.jsonl
- config_name: tpl_Latn
data_files:
- split: train
path: tpl_Latn/*_keep.jsonl
- config_name: tsb_Latn
data_files:
- split: train
path: tsb_Latn/*_keep.jsonl
- config_name: tvs_Latn
data_files:
- split: train
path: tvs_Latn/*_keep.jsonl
- config_name: udg_Mlym
data_files:
- split: train
path: udg_Mlym/*_keep.jsonl
- config_name: unr_Orya
data_files:
- split: train
path: unr_Orya/*_keep.jsonl
- config_name: wbr_Deva
data_files:
- split: train
path: wbr_Deva/*_keep.jsonl
- config_name: wms_Latn
data_files:
- split: train
path: wms_Latn/*_keep.jsonl
- config_name: wnk_Latn
data_files:
- split: train
path: wnk_Latn/*_keep.jsonl
- config_name: wtk_Latn
data_files:
- split: train
path: wtk_Latn/*_keep.jsonl
- config_name: xmd_Latn
data_files:
- split: train
path: xmd_Latn/*_keep.jsonl
- config_name: xmg_Latn
data_files:
- split: train
path: xmg_Latn/*_keep.jsonl
- config_name: xty_Latn
data_files:
- split: train
path: xty_Latn/*_keep.jsonl
- config_name: yav_Latn
data_files:
- split: train
path: yav_Latn/*_keep.jsonl
- config_name: yea_Mlym
data_files:
- split: train
path: yea_Mlym/*_keep.jsonl
- config_name: zuh_Latn
data_files:
- split: train
path: zuh_Latn/*_keep.jsonl
language:
- eng
- rus
- cmn
- deu
- spa
- fra
- jpn
- ita
- por
- pol
- nld
- ind
- tur
- vie
- fas
- kor
- swe
- hun
- ell
- ukr
- tha
- aze
- arb
- slv
- cat
- fin
- hbs
- ces
- fil
- guj
- nob
- mal
- bul
- kan
- hin
- tam
- ara
- kaz
- sqi
- pan
- est
- srp
- heb
- isl
- mlt
- mkd
- bos
- lit
- kat
- hrv
- ben
- glg
- ceb
- ron
- msa
- hye
- tgk
- mar
- bel
- nep
- urd
- slk
- mon
- swa
- dan
- eus
- azj
- sin
- lat
- als
- tel
- afr
- ekk
- ltz
- zsm
- som
- cym
- und
- kir
- nor
- uzb
- mya
- epo
- hau
- ary
- gle
- lvs
- nno
- und
- ory
- amh
- tat
- khm
- und
- und
- und
- arz
- pus
- lav
- hbs
- und
- war
- und
- mlg
- uzn
- hat
- zul
- kur
- tgl
- uzb
- div
- und
- fry
- sna
- und
- fao
- sun
- und
- und
- snd
- cos
- bak
- ckb
- asm
- und
- jpn
- ast
- ibo
- mri
- und
- ars
- und
- anp
- khk
- und
- lao
- ori
- und
- yid
- hmn
- jav
- nya
- und
- und
- und
- tir
- uig
- wln
- und
- yor
- swh
- gla
- xho
- uzn
- bre
- sot
- che
- nan
- tel
- bew
- glk
- smo
- orm
- zho
- haw
- pnb
- und
- oci
- chv
- kin
- und
- srp
- und
- und
- pcm
- kaa
- lug
- pbt
- aze
- gsw
- min
- tuk
- lim
- kmr
- und
- roh
- und
- und
- vec
- san
- und
- gmh
- nds
- sco
- und
- und
- azb
- tsn
- rus
- und
- mon
- und
- tgk
- sah
- pri
- pap
- plt
- lmo
- bod
- und
- yue
- und
- und
- und
- bar
- und
- hif
- und
- arg
- pms
- kat
- nrm
- vol
- aeb
- und
- und
- ido
- pam
- kal
- und
- und
- und
- lus
- und
- und
- und
- new
- sme
- und
- und
- nso
- lld
- ina
- scn
- und
- und
- und
- hyw
- abk
- bew
- und
- und
- bpy
- tgk
- lin
- oss
- que
- szl
- sdh
- mww
- mhr
- und
- und
- und
- rmy
- diq
- srd
- und
- und
- ydd
- und
- run
- und
- hsb
- und
- wol
- und
- urd
- und
- kiu
- ilo
- und
- cak
- und
- acm
- und
- und
- grc
- xmf
- vls
- und
- und
- tso
- crh
- nav
- mwl
- und
- tat
- grn
- mui
- hin
- mvf
- udm
- mzn
- und
- und
- und
- bcl
- und
- und
- ban
- und
- afb
- und
- cnh
- und
- cbk
- kaa
- tyv
- kur
- ful
- und
- zea
- frp
- frr
- pfl
- lij
- apc
- mai
- und
- und
- pon
- bak
- eml
- uzs
- nde
- hil
- mam
- gom
- dag
- nap
- bjn
- und
- hbo
- tet
- inh
- ven
- gaz
- tpi
- und
- ajp
- sat
- wuu
- bam
- vep
- glv
- tzo
- bik
- und
- acr
- twi
- myv
- ewe
- poh
- und
- ile
- rue
- und
- und
- ace
- kha
- und
- und
- und
- und
- ksh
- hui
- gor
- krc
- lfn
- tls
- fur
- und
- gag
- chk
- hmo
- kek
- fon
- ssw
- quh
- meu
- csb
- ksd
- ext
- rop
- und
- aln
- kab
- shn
- tbz
- knv
- bqc
- und
- und
- npi
- rug
- und
- kom
- kpg
- kbd
- dzo
- wal
- zlm
- und
- mzh
- pan
- hac
- und
- tum
- sgs
- fij
- tuc
- und
- und
- jbo
- iba
- pcd
- kjh
- ixl
- und
- und
- und
- ndo
- suz
- und
- und
- mfe
- bxr
- ava
- und
- bho
- brh
- und
- ctd
- aym
- ady
- nbl
- gom
- hus
- gug
- und
- und
- bat
- guj
- hne
- srn
- ctu
- san
- ton
- bug
- aoj
- jam
- und
- lez
- xav
- luo
- ltg
- bih
- uig
- mrj
- avk
- kos
- kbp
- alt
- cfm
- goh
- lad
- und
- cor
- quc
- und
- kpv
- und
- aka
- und
- hak
- rmn
- pls
- sag
- min
- und
- rcf
- som
- gym
- und
- kik
- pag
- arn
- und
- ngu
- dsb
- tca
- mni
- naq
- pdc
- mps
- und
- olo
- fro
- fit
- crh
- cdo
- kac
- mdf
- mad
- cab
- srm
- toj
- zai
- guc
- smn
- ote
- nch
- und
- prs
- tuk
- bis
- cuk
- bzj
- map
- djk
- ncj
- und
- mco
- shp
- und
- ang
- seh
- ikk
- qvi
- yom
- top
- awa
- ike
- mau
- kum
- mos
- tly
- und
- ayr
- tdt
- yap
- evn
- tah
- xal
- szy
- quw
- stq
- tlh
- otq
- und
- nov
- skr
- yua
- raw
- und
- fiu
- enm
- bbc
- und
- non
- und
- ber
- kea
- uig
- bua
- kua
- zza
- csy
- mnw
- dtp
- nah
- yao
- azj
- hau
- nog
- abt
- meo
- ame
- mbf
- chm
- arb
- kas
- tam
- crs
- tcy
- lbe
- cni
- ben
- rom
- zom
- blk
- efi
- ada
- und
- koi
- und
- bbj
- trv
- ach
- mgh
- kas
- gcr
- krl
- iso
- lki
- atj
- cmo
- chu
- gur
- mni
- iku
- sma
- lrc
- gos
- dty
- nzi
- btx
- brx
- bts
- nyu
- ckt
- guw
- pck
- quy
- gcf
- ksw
- nia
- bal
- nhe
- hwc
- mup
- dyu
- ami
- tay
- gaa
- loz
- abq
- pnt
- snd
- ium
- abs
- cmr
- shi
- tzm
- gil
- ewo
- kon
- sms
- ape
- tzh
- bci
- acd
- tab
- fuv
- swc
- tcz
- und
- sgc
- aaz
- tly
- tvl
- zha
- pib
- pli
- bru
- quz
- cha
- aeu
- bjn
- nqo
- mer
- acf
- rmc
- msi
- smj
- kng
- dhv
- doi
- zap
- hla
- cac
- pdt
- mbt
- taq
- bgp
- tyz
- aau
- mah
- nak
- kmb
- mjw
- fuf
- bba
- ron
- amu
- alz
- cop
- rar
- emp
- arc
- nyn
- rut
- lub
- tzj
- fat
- lzh
- aak
- nnb
- lhu
- bjv
- bum
- maq
- roa
- bvr
- kcg
- tok
- yrk
- khs
- rup
- tuk
- qub
- jac
- hmr
- sid
- chr
- amp
- qug
- mas
- fkv
- mal
- bem
- cav
- her
- kwn
- ify
- asm
- gpe
- zpa
- agu
- ppk
- tiv
- npi
- ahk
- pis
- hns
- sus
- hak
- bon
- sja
- kas
- mar
- kri
- dik
- wrk
- got
- dar
- gui
- mak
- jiv
- kjb
- maz
- bsn
- dov
- cok
- rwo
- mag
- hnj
- krj
- teo
- enq
- aom
- ffm
- din
- ubu
- pwn
- gum
- sny
- twu
- med
- acn
- rme
- cbs
- abx
- ndc
- ibb
- pih
- zty
- rad
- aby
- zne
- cui
- lua
- yby
- bno
- niu
- agd
- nij
- agg
- yre
- fip
- qve
- tsg
- skg
- adj
- mfq
- agx
- kmg
- umb
- cnk
- nhw
- maa
- kwy
- jra
- syr
- inb
- pau
- xsm
- vap
- alp
- des
- syl
- kmr
- mkn
- gan
- qwh
- jvn
- bib
- ckt
- mpx
- bin
- klv
- xon
- jbu
- bdh
- knj
- ptu
- mmn
- mwq
- sop
- cgc
- rmy
- dop
- mgr
- rmo
- tbc
- kyq
- zpu
- kkc
- acu
- ata
- gag
- kan
- mif
- sus
- tsz
- mbi
- imo
- fuh
- zyp
- apr
- att
- bus
- kqp
- koo
- bas
- urh
- bbr
- msy
- mdy
- nas
- lee
- mcu
- mrj
- dig
- izr
- nfr
- hto
- mzw
- qxh
- chz
- guh
- ded
- bvz
- kkj
- gux
- ikt
- bnp
- bgt
- quf
- dwr
- nod
- yal
- bmr
- ljp
- xla
- zyb
- gof
- niv
- nrf
- npy
- ura
- dob
- gfk
- gdr
- sdc
- pjt
- spp
- irk
- cpa
- knc
- gvl
- agr
- njo
- xrb
- kao
- zpz
- bgr
- rnd
- ntu
- aai
- gai
- dgi
- kez
- dad
- kmh
- aii
- ifk
- aji
- noa
- bhl
- ztq
- rmn
- gde
- dnj
- men
- miq
- mdf
- isd
- lgg
- ncx
- gub
- itv
- tac
- cce
- kwi
- nhu
- snd
- sml
- pah
- sas
- cwt
- spy
- enb
- fai
- cas
- sue
- yss
- ozm
- bku
- biv
- akp
- dts
- kdr
- fal
- mmo
- byr
- cbv
- cbu
- eza
- kgp
- sda
- gbo
- mfi
- bdd
- boj
- blh
- ibg
- agn
- vmy
- sua
- gul
- rap
- bmh
- sxn
- dyi
- tdx
- car
- heh
- dgc
- sgb
- bpr
- gbi
- avu
- udu
- yli
- cjs
- gnn
- nhi
- urk
- kpy
- qup
- qvh
- pir
- tsc
- bxr
- thk
- grt
- kqn
- hay
- mqb
- tbg
- bss
- trn
- jae
- myy
- ipk
- bao
- qvz
- dua
- mwv
- nct
- kyc
- tuo
- tee
- mop
- mxt
- qvm
- gkn
- lam
- ntr
- sbe
- dyo
- lex
- smk
- coe
- kpe
- mni
- xbi
- atb
- tcf
- bfo
- cbc
- swg
- niv
- cnt
- izz
- knf
- mpp
- kki
- nho
- sat
- kbr
- bmu
- hig
- qxr
- orv
- pma
- zpl
- kpr
- sig
- ory
- mxp
- lef
- mlp
- soq
- cre
- aey
- qvs
- sur
- knc
- suk
- cax
- alq
- chw
- dww
- gog
- viv
- qvw
- ted
- yuj
- ziw
- kto
- mcq
- sim
- koi
- lue
- cya
- knk
- tpt
- taj
- blz
- mil
- kde
- kpz
- hag
- vun
- kus
- lid
- tos
- bfd
- chd
- cle
- yon
- bgs
- lsm
- msb
- ota
- guj
- mxq
- lfn
- myw
- aso
- esu
- kdi
- bhp
- mfz
- sgw
- xsi
- gun
- kxc
- mux
- otw
- cjp
- nyo
- zia
- cco
- ojb
- ktu
- cpy
- txu
- bjr
- bud
- ken
- bzh
- ipi
- crx
- cko
- gnd
- kix
- mmx
- esk
- kud
- toh
- byx
- mtp
- kpv
- kyz
- lmp
- tiy
- ccp
- sbl
- tcs
- usp
- buk
- hvn
- amm
- kzj
- gng
- ter
- kij
- ngl
- pab
- pad
- kca
- meq
- kas
- niq
- dah
- lia
- kss
- bch
- khz
- nop
- yle
- ain
- pem
- yml
- taq
- gaw
- kze
- tby
- cso
- ifb
- rki
- aly
- kmu
- mhl
- kmo
- mva
- opm
- rtm
- xtn
- dgr
- ksr
- snp
- tvk
- zpv
- hrx
- kwj
- muh
- kak
- kpw
- akb
- ybb
- kjs
- wuv
- laj
- ogo
- nim
- czt
- aoz
- rro
- yuw
- heg
- mrw
- zac
- kpf
- leu
- nwi
- awx
- gso
- keo
- tte
- ceg
- nhy
- bhw
- gnw
- nin
- xmv
- crm
- mxv
- ubr
- gld
- bzi
- ann
- lac
- mej
- zpc
- dje
- caq
- guk
- wos
- msm
- zaw
- cap
- mox
- zab
- mca
- wrs
- hae
- nss
- rav
- uvh
- dga
- ppo
- nog
- arq
- bim
- lwg
- kms
- kne
- tkl
- ady
- caf
- zat
- aba
- chf
- lew
- npl
- uvl
- cdo
- rmq
- mek
- snc
- nsn
- amn
- abz
- cek
- ong
- txq
- bjp
- gvf
- crk
- snf
- nhg
- kqw
- mic
- mie
- nus
- plu
- idu
- sll
- thl
- bef
- kwd
- mqy
- caa
- aia
- wer
- agm
- mbl
- mbs
- adh
- hub
- mav
- nab
- bqp
- guo
- auy
- big
- nii
- row
- kim
- mti
- otm
- prg
- icr
- mcp
- tbo
- chy
- oji
- kpx
- lbb
- pao
- xuo
- zgh
- wls
- aca
- apy
- wnc
- ssg
- eve
- mee
- yut
- chr
- kxm
- mur
- lad
- mcd
- cto
- mqj
- usa
- emi
- liv
- guz
- tgp
- ncl
- azg
- kck
- apb
- kbm
- sgz
- wsk
- gbm
- csw
- ctp
- eri
- vid
- bsq
- dak
- tif
- xsr
- mwp
- roo
- wbp
- gdn
- tgo
- zpm
- mih
- qvc
- tod
- abn
- cao
- kbc
- kog
- moh
- leh
- nsu
- okv
- tih
- zos
- llg
- gmv
- kyf
- mhx
- mvp
- naf
- ino
- dng
- mnk
- oke
- omw
- tnk
- xtd
- agt
- cbr
- eko
- nhk
- hot
- iou
- mpm
- myx
- ndh
- pbb
- rng
- cri
- bvd
- kmz
- cta
- blw
- clu
- cof
- kje
- nmf
- kbh
- qxn
- ria
- wed
- zar
- huv
- lsi
- msk
- nbq
- ati
- lww
- yva
- amr
- lmk
- tim
- bbb
- bzd
- djr
- kwf
- nuy
- poi
- ptp
- atd
- bps
- lcm
- mbh
- nuj
- ayp
- cwe
- geb
- aoi
- mto
- cbi
- gvn
- kqf
- rkb
- xed
- zas
- apz
- cpb
- ese
- gdg
- iws
- apu
- cag
- kdc
- nou
- too
- xmm
- beq
- hnn
- myk
- sby
- arl
- ncu
- rml
- gam
- kvn
- mbc
- mfy
- urt
- luc
- mio
- snn
- tar
- tku
- upv
- kjg
- btd
- cut
- mwn
- tue
- adz
- kyg
- rel
- tew
- ttc
- nkf
- yrb
- fub
- nsm
- nyf
- ava
- bkd
- cpc
- lif
- mwc
- gan
- apw
- azz
- zpo
- ace
- faa
- ood
- toc
- wmt
- ajg
- nko
- ssd
- sxb
- zxx
- not
- amk
- hch
- kqc
- aui
- llb
- avt
- kaq
- tna
- zpt
- mnw
- bwd
- lif
- stn
- crk
- kgk
- mck
- bhg
- ifu
- kkl
- zao
- huu
- sil
- trq
- tuf
- wnu
- amf
- fan
- zdj
- bco
- kdl
- lln
- mit
- yaq
- ake
- ige
- mna
- nhx
- njn
- wba
- zaa
- lez
- syb
- qxo
- rgu
- wiu
- bla
- etr
- miy
- mjc
- nif
- pov
- qxl
- sey
- bxh
- ddg
- mks
- nki
- pot
- prf
- hra
- law
- plw
- zpi
- atg
- daf
- drg
- dru
- ngp
- boa
- cbt
- dhm
- ikw
- kqe
- lbj
- loq
- xsb
- yad
- kyu
- mcb
- tks
- cnl
- far
- miz
- maj
- mgm
- mib
- mza
- nhr
- smt
- ish
- mlu
- yka
- anv
- ign
- mir
- tav
- gyr
- klt
- apn
- wbm
- cjk
- dln
- mcf
- twx
- ldi
- poy
- yaa
- zae
- bdq
- con
- dnw
- gah
- lcp
- mbj
- xog
- yan
- sdq
- sgh
- gwi
- lbk
- ses
- ycn
- kau
- hix
- mig
- mlh
- aer
- amx
- auc
- cly
- dsh
- gnb
- neb
- ttt
- jic
- xtm
- tll
- apt
- kgr
- pio
- pmf
- yrk
- akh
- box
- cub
- cux
- mrq
- inh
- bsp
- for
- kny
- uri
- zsr
- pce
- cme
- cot
- kxw
- omb
- srq
- vag
- cpu
- kmy
- ksc
- lai
- nnp
- ruf
- spm
- tro
- ttj
- waj
- raj
- ian
- ivb
- kmm
- kzf
- maw
- tuv
- whk
- zsm
- aph
- anm
- mny
- bug
- trp
- are
- esi
- mhi
- obo
- bwu
- lwo
- mrg
- old
- zav
- snk
- daa
- fuq
- lud
- nlg
- sck
- tbl
- khb
- tpu
- fue
- tmd
- whg
- blt
- gqr
- kub
- ots
- tpp
- var
- hbb
- krr
- zxx
- alj
- ebk
- goa
- hav
- kmk
- kqo
- muy
- njb
- snw
- wib
- shn
- tkr
- ksp
- lip
- mog
- nla
- pss
- zad
- awi
- frd
- kei
- kmd
- mim
- got
- lbe
- adi
- any
- arp
- biu
- bvc
- ghs
- ttq
- vmk
- zpj
- crl
- dtb
- dug
- kam
- nmo
- qva
- rhg
- taw
- tui
- uth
- wap
- zpg
- brb
- dty
- syr
- bih
- bqj
- cjv
- cnw
- kgf
- mfh
- mns
- nnw
- ntp
- oto
- dks
- mpt
- nmw
- sba
- sfw
- shu
- ury
- yrl
- arc
- cmo
- xkg
- iqw
- kpj
- nfa
- nph
- nre
- wsg
- xsu
- nut
- bmk
- cdf
- enl
- fuv
- hop
- ife
- lnd
- mrv
- nzm
- tem
- dwr
- gwc
- udi
- zha
- ajz
- kru
- qvo
- sld
- yup
- anp
- kwu
- szy
- tdg
- ade
- bcw
- bgz
- cul
- fmu
- gvc
- ktj
- lis
- mev
- msc
- mta
- nbc
- ncq
- way
- wlv
- wlx
- jmx
- pli
- tkr
- wni
- aim
- byv
- cgg
- fud
- jmc
- ker
- kia
- lom
- ojb
- tnc
- bwx
- cre
- skr
- xal
- gjn
- guq
- lap
- mip
- mkl
- mtg
- mzk
- njm
- nse
- pne
- pww
- zam
- zca
- the
- dis
- lgl
- mbd
- mda
- mhy
- nst
- pkb
- pmq
- poe
- swk
- trs
- vot
- yam
- zpq
- atq
- bav
- cho
- chq
- cjo
- due
- duo
- etu
- ifa
- kby
- khq
- kno
- lem
- mcn
- mkz
- mnb
- myb
- niy
- pxm
- rub
- sju
- tmc
- tpm
- wob
- nxa
- pnt
- sdk
- tcy
- bhz
- bth
- crj
- eka
- khy
- led
- met
- mgo
- mus
- mzm
- naw
- sri
- tbk
- xbr
- awa
- tnl
- tnn
- awb
- bsc
- iri
- kpq
- ktm
- loe
- lun
- mfg
- moa
- ndj
- otn
- sjo
- wat
- bkm
- tuz
- ybi
- gbr
- gej
- giz
- gna
- gwr
- kex
- kle
- knx
- ktz
- kvj
- lob
- mhw
- mph
- pmx
- qus
- tcc
- ang
- azn
- bra
- gal
- jra
- nlv
- rki
- stk
- yux
- dip
- ivv
- kdj
- kff
- kyu
- mnf
- moc
- mpg
- mua
- nma
- rim
- rjs
- rnl
- ssx
- tig
- urw
- idt
- mrn
- oki
- bbk
- bea
- bkq
- btt
- cou
- ddn
- did
- diu
- ilb
- ksj
- lhi
- ndi
- ngb
- njz
- nmz
- npo
- nwb
- pbi
- sbd
- soe
- tpw
- twb
- bfn
- boz
- lwl
- psp
- sgd
- ymp
- bkl
- bkv
- csk
- enx
- ess
- gof
- gud
- guu
- kdh
- krx
- kzn
- maf
- max
- mgc
- mvn
- mwm
- nbe
- nlc
- nxd
- pse
- sbs
- tlb
- wwa
- yim
- bkx
- bze
- chu
- kbq
- kpy
- mry
- tkd
- ybh
- abi
- ayo
- bex
- bom
- bov
- dhg
- jaa
- kqs
- kqy
- ksb
- ldn
- log
- lol
- mfk
- mtj
- ndz
- ngc
- nio
- nri
- nuz
- pps
- shk
- tlj
- vut
- xnn
- bcc
- cuh
- emp
- hro
- nco
- nxl
- pex
- tnp
- trv
- vif
- aha
- avn
- bmv
- chj
- crt
- dos
- eto
- gya
- hlt
- jun
- ktb
- lgm
- mnx
- mor
- mug
- nng
- nnh
- nsa
- nyk
- otd
- oym
- phm
- prq
- saj
- szb
- tap
- tnr
- tzl
- wew
- yas
- yuz
- avk
- bat
- baw
- bwt
- bxa
- cjs
- csw
- fli
- kqr
- lan
- lgr
- lhm
- mlk
- mve
- nqo
- odk
- olo
- otq
- pea
- saq
- sea
- sps
- tio
- yin
- adl
- agw
- ald
- asg
- bnj
- brx
- bwi
- bwq
- dbq
- dow
- fad
- hoc
- kql
- ksf
- kyu
- lea
- mse
- mzl
- nhd
- nnl
- nwx
- pbc
- pny
- tul
- aaa
- abc
- agq
- ags
- azo
- bag
- bax
- bce
- bfm
- bgf
- bhs
- bkc
- bkh
- blk
- bob
- bqm
- bri
- brv
- buo
- bya
- chp
- cim
- clo
- cuv
- dag
- dmg
- dtr
- eee
- ekm
- enc
- fiu
- gbj
- gou
- hao
- hna
- hre
- isu
- jgo
- kbx
- kvt
- kxp
- lfa
- lkb
- lns
- loh
- lrc
- lts
- luy
- mdr
- mfj
- mgg
- mgq
- mle
- mlw
- mmu
- mne
- muv
- mxu
- nge
- ngn
- njy
- nsk
- nsk
- nwe
- nza
- pcg
- pdu
- pnz
- pwg
- qaa
- shi
- smn
- snk
- snl
- sox
- ssn
- syw
- tay
- tbj
- tdb
- thy
- tnt
- tom
- tpl
- tsb
- tvs
- udg
- unr
- wbr
- wms
- wnk
- wtk
- xmd
- xmg
- xty
- yav
- yea
- zuh
multilinguality:
- multilingual
pinned: true
tags:
- multilingual
---
## Dataset Summary
DCAD-2000 is a large-scale multilingual corpus built using
newly extracted Common Crawl data (CC-MAIN-2024-46) and existing multilingual datasets. It includes over 2,282 languages, 46.72TB of data, and 8.63 billion documents, spanning 155 highand medium-resource languages and 159 writing scripts. We propose reframing data cleaning as an anomaly detection task. This dynamic filtering approach significantly enhances data quality by identifying and removing noisy or anomalous content.
+ Paper: [A Multilingual Dataset across 2000+ Languages with Data Cleaning as Anomaly Detection](https://www.arxiv.org/abs/2502.11546)
+ Github: [https://github.com/yl-shen/DCAD-2000](https://github.com/yl-shen/DCAD-2000)
+ Dataset (HuggingFace): [openbmb/DCAD-2000](https://huggingface.co/datasets/openbmb/DCAD-2000)
## Dataset Overview
Comparison of multilingual datasets constructed from Common Crawl (CC) and our constructed DCAD-2000, focusing on the latest CC version used, the total number of languages supported, distribution across resource categories (high, medium, low, very low), and training readiness. The CC version marked with **bold** indicates an inferred version due to the lack of explicit specification in the original paper. The ``Training-Ready'' column indicates whether the dataset is ready for training LLMs without requiring further data cleaning.
| **Dataset** | **CC Version** | **#Langs (total)** | **#Langs (high)** | **#Langs (medium)** | **#Langs (low)** | **#Langs (very low)** | **Training-Ready** |
|---------------------------|--------------------|--------------------|-------------------|---------------------|------------------|-----------------------|--------------------|
| mC4 (Raffel et al., 2020) | CC-MAIN-2020-34 | 101 | 0 | 43 | 52 | 6 | ✘ |
| OSCAR 23.01 (Abadji et al., 2022) | CC-MAIN-2022-49 | 153 | 6 | 42 | 25 | 80 | ✘ |
| Glot500 (Imani et al., 2023) | **CC-MAIN-2020-34** | 511 | 0 | 108 | 79 | 324 | ✘ |
| CulturaX (Nguyen et al., 2024) | **CC-MAIN-2022-49** | 167 | 11 | 47 | 27 | 82 | ✘ |
| Madlad-400 (Kudugunta et al., 2024) | CC-MAIN-2022-33 | 419 | 7 | 46 | 39 | 327 | ✘ |
| MaLA (Ji et al., 2024) | **CC-MAIN-2022-49** | 939 | 1 | 125 | 78 | 735 | ✘ |
| Glotcc (Kargaran et al., 2024) | CC-MAIN-2023-50 | 1331 | 0 | 10 | 52 | 1269 | ✘ |
| HPLT-v1.2 (de Gilbert et al., 2024) | **CC-MAIN-2022-40** | 191 | 12 | 53 | 38 | 88 | ✘ |
| Fineweb-2 (Penedo et al., 2024) | CC-MAIN-2024-18 | 1915 | 10 | 62 | 49 | 1794 | ✘ |
| **DCAD-2000** | CC-MAIN-2024-46 | 2282 | 13 | 142 | 124 | 2003 | ✓ |
## Dataset Creation
+ **Data Collection:** DCAD-2000 integrates data from four main sources: MaLA, Fineweb, Fineweb-2, and newly extracted Common Crawl data.
+ **Data Cleaning as Anomaly Detection:** Traditional data cleaning methods rely on fixed thresholds for document-level features, making them less adaptable to the diversity of multilingual data. To address this, we propose a novel framework that formulates data cleaning as an anomaly detection task, which involves the feature extraction and anomaly detection.
- **Feature Extraction:** For each document, we consider the following eight features: (1) Number of Words; (2) Character Repetition Ratio; (3) Word Repetition Ratio; (4) Special Characters Ratio; (5) Stop- words Ratio; (6) Flagged Words Ratio; (7) Language Identification (LID) Score; (8) Perplexity Score.
- **Anomaly Detection:** We evaluate several classical anomaly detection algorithms including (1) Isolation Forest; (2) One Class SVM; (3) Local Outlier Factor and (4) K-Means.
- Visualization<br>

## Data Statistics
+ [Geographical Distribution](https://github.com/yl-shen/DCAD-2000/blob/master/statistics/geo_group.tsv)
+ [Script Distribution](https://github.com/yl-shen/DCAD-2000/blob/master/statistics/script_group_detail.tsv)
+ [Statistical Details](https://github.com/yl-shen/DCAD-2000/blob/master/statistics/statistic_detail.tsv)
+ [language list](https://github.com/yl-shen/DCAD-2000/blob/master/statistics/statistic_lang_num.tsv)
+ [language mapping](https://github.com/yl-shen/DCAD-2000/blob/master/lang_mapping/all_mapping.csv)
## Usage (Dataset)
```
from datasets import load_dataset
data = load_dataset("openbmb/DCAD-2000")
```
You can also specifiy the language you wanted
```
from datasets import load_dataset
data = load_dataset("openbmb/DCAD-2000", name="eng_Latn", split="train")
```
## Citation Information
```
@article{shen2025dcad,
title={DCAD-2000: A Multilingual Dataset across 2000+ Languages with Data Cleaning as Anomaly Detection},
author={Shen, Yingli and Lai, Wen and Wang, Shuo and Zhang, Xueren and Luo, Kangyang and Fraser, Alexander and Sun, Maosong},
journal={arXiv preprint arXiv:2502.11546},
year={2025}
}
```
## Acknowledgements
We introduce DCAD-2000, a large- scale multilingual dataset designed to address the increasing demand for high-quality and diverse training data for multilingual LLMs.
This work is done by researchers at [Tsinghua NLP group](https://nlp.csai.tsinghua.edu.cn) in collaboration with partners from [TUM](https://wenlai-lavine.github.io/) and [Modelbest Inc.](https://huggingface.co/openbmb)
## Contact Information
Yingli Shen ([email protected])
Wen Lai ([email protected]) |
stanfordnlp/sst2 | stanfordnlp | "2024-01-04T16:31:07Z" | 15,748 | 114 | [
"task_categories:text-classification",
"task_ids:sentiment-classification",
"annotations_creators:crowdsourced",
"language_creators:found",
"multilinguality:monolingual",
"source_datasets:original",
"language:en",
"license:unknown",
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [
"text-classification"
] | "2022-06-13T14:01:47Z" | ---
annotations_creators:
- crowdsourced
language_creators:
- found
language:
- en
license:
- unknown
multilinguality:
- monolingual
size_categories:
- 10K<n<100K
source_datasets:
- original
task_categories:
- text-classification
task_ids:
- sentiment-classification
paperswithcode_id: sst
pretty_name: Stanford Sentiment Treebank v2
dataset_info:
features:
- name: idx
dtype: int32
- name: sentence
dtype: string
- name: label
dtype:
class_label:
names:
'0': negative
'1': positive
splits:
- name: train
num_bytes: 4681603
num_examples: 67349
- name: validation
num_bytes: 106252
num_examples: 872
- name: test
num_bytes: 216640
num_examples: 1821
download_size: 3331058
dataset_size: 5004495
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
---
# Dataset Card for [Dataset Name]
## Table of Contents
- [Table of Contents](#table-of-contents)
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** https://nlp.stanford.edu/sentiment/
- **Repository:**
- **Paper:** [Recursive Deep Models for Semantic Compositionality Over a Sentiment Treebank](https://www.aclweb.org/anthology/D13-1170/)
- **Leaderboard:**
- **Point of Contact:**
### Dataset Summary
The Stanford Sentiment Treebank is a corpus with fully labeled parse trees that allows for a complete analysis of the
compositional effects of sentiment in language. The corpus is based on the dataset introduced by Pang and Lee (2005)
and consists of 11,855 single sentences extracted from movie reviews. It was parsed with the Stanford parser and
includes a total of 215,154 unique phrases from those parse trees, each annotated by 3 human judges.
Binary classification experiments on full sentences (negative or somewhat negative vs somewhat positive or positive
with neutral sentences discarded) refer to the dataset as SST-2 or SST binary.
### Supported Tasks and Leaderboards
- `sentiment-classification`
### Languages
The text in the dataset is in English (`en`).
## Dataset Structure
### Data Instances
```
{'idx': 0,
'sentence': 'hide new secretions from the parental units ',
'label': 0}
```
### Data Fields
- `idx`: Monotonically increasing index ID.
- `sentence`: Complete sentence expressing an opinion about a film.
- `label`: Sentiment of the opinion, either "negative" (0) or positive (1). The test set labels are hidden (-1).
### Data Splits
| | train | validation | test |
|--------------------|---------:|-----------:|-----:|
| Number of examples | 67349 | 872 | 1821 |
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
Rotten Tomatoes reviewers.
### Annotations
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
Unknown.
### Citation Information
```bibtex
@inproceedings{socher-etal-2013-recursive,
title = "Recursive Deep Models for Semantic Compositionality Over a Sentiment Treebank",
author = "Socher, Richard and
Perelygin, Alex and
Wu, Jean and
Chuang, Jason and
Manning, Christopher D. and
Ng, Andrew and
Potts, Christopher",
booktitle = "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing",
month = oct,
year = "2013",
address = "Seattle, Washington, USA",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/D13-1170",
pages = "1631--1642",
}
```
### Contributions
Thanks to [@albertvillanova](https://github.com/albertvillanova) for adding this dataset. |
bigcode/bigcodebench-hard | bigcode | "2025-02-23T16:42:46Z" | 15,726 | 3 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-09-14T14:50:33Z" | ---
dataset_info:
features:
- name: task_id
dtype: string
- name: complete_prompt
dtype: string
- name: instruct_prompt
dtype: string
- name: canonical_solution
dtype: string
- name: code_prompt
dtype: string
- name: test
dtype: string
- name: entry_point
dtype: string
- name: doc_struct
dtype: string
- name: libs
dtype: string
- name: q_idx
dtype: int64
- name: question
dtype: string
- name: score
dtype: float64
- name: _id
dtype: string
splits:
- name: v0.1.0_hf
num_bytes: 1271624
num_examples: 148
- name: v0.1.1
num_bytes: 1271607
num_examples: 148
- name: v0.1.2
num_bytes: 1271812
num_examples: 148
- name: v0.1.3
num_bytes: 1271812
num_examples: 148
- name: v0.1.4
num_bytes: 1272012
num_examples: 148
download_size: 2758366
dataset_size: 6358867
configs:
- config_name: default
data_files:
- split: v0.1.0_hf
path: data/v0.1.0_hf-*
- split: v0.1.1
path: data/v0.1.1-*
- split: v0.1.2
path: data/v0.1.2-*
- split: v0.1.3
path: data/v0.1.3-*
- split: v0.1.4
path: data/v0.1.4-*
---
|
andstor/methods2test | andstor | "2023-12-23T03:01:51Z" | 15,658 | 0 | [
"task_categories:text-generation",
"language:en",
"license:mit",
"size_categories:10M<n<100M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"arxiv:2203.12776",
"region:us",
"unit test",
"java",
"code"
] | [
"text-generation"
] | "2023-12-07T13:37:44Z" | ---
language:
- en
license: mit
task_categories:
- text-generation
configs:
- config_name: fm
data_files:
- split: train
path: data/fm/train-*
- split: test
path: data/fm/test-*
- split: validation
path: data/fm/validation-*
- config_name: fm_indented
data_files:
- split: train
path: data/fm_indented/train-*
- split: test
path: data/fm_indented/test-*
- split: validation
path: data/fm_indented/validation-*
- config_name: fm+t
data_files:
- split: train
path: data/fm+t/train-*
- split: test
path: data/fm+t/test-*
- split: validation
path: data/fm+t/validation-*
- config_name: fm+fc
data_files:
- split: train
path: data/fm+fc/train-*
- split: test
path: data/fm+fc/test-*
- split: validation
path: data/fm+fc/validation-*
- config_name: fm+fc+t+tc
data_files:
- split: train
path: data/fm+fc+t+tc/train-*
- split: test
path: data/fm+fc+t+tc/test-*
- split: validation
path: data/fm+fc+t+tc/validation-*
- config_name: fm+fc+c
data_files:
- split: train
path: data/fm+fc+c/train-*
- split: test
path: data/fm+fc+c/test-*
- split: validation
path: data/fm+fc+c/validation-*
- config_name: fm+fc+c+t+tc
data_files:
- split: train
path: data/fm+fc+c+t+tc/train-*
- split: test
path: data/fm+fc+c+t+tc/test-*
- split: validation
path: data/fm+fc+c+t+tc/validation-*
- config_name: fm+fc+c+m
data_files:
- split: train
path: data/fm+fc+c+m/train-*
- split: test
path: data/fm+fc+c+m/test-*
- split: validation
path: data/fm+fc+c+m/validation-*
- config_name: fm+fc+c+m+t+tc
data_files:
- split: train
path: data/fm+fc+c+m+t+tc/train-*
- split: test
path: data/fm+fc+c+m+t+tc/test-*
- split: validation
path: data/fm+fc+c+m+t+tc/validation-*
- config_name: fm+fc+c+m+f
data_files:
- split: train
path: data/fm+fc+c+m+f/train-*
- split: test
path: data/fm+fc+c+m+f/test-*
- split: validation
path: data/fm+fc+c+m+f/validation-*
- config_name: fm+fc+c+m+f+t+tc
data_files:
- split: train
path: data/fm+fc+c+m+f+t+tc/train-*
- split: test
path: data/fm+fc+c+m+f+t+tc/test-*
- split: validation
path: data/fm+fc+c+m+f+t+tc/validation-*
- config_name: t
data_files:
- split: train
path: data/t/train-*
- split: test
path: data/t/test-*
- split: validation
path: data/t/validation-*
- config_name: t_indented
data_files:
- split: train
path: data/t_indented/train-*
- split: test
path: data/t_indented/test-*
- split: validation
path: data/t_indented/validation-*
- config_name: t+tc
data_files:
- split: train
path: data/t+tc/train-*
- split: test
path: data/t+tc/test-*
- split: validation
path: data/t+tc/validation-*
dataset_info:
- config_name: fm
features:
- name: id
dtype: string
- name: text
dtype: string
splits:
- name: train
num_bytes: 440444124
num_examples: 624022
- name: test
num_bytes: 59407291
num_examples: 78388
- name: validation
num_bytes: 57170315
num_examples: 78534
download_size: 99172217
dataset_size: 557021730
- config_name: fm+fc
features:
- name: id
dtype: string
- name: text
dtype: string
splits:
- name: train
num_bytes: 506130678
num_examples: 624022
- name: test
num_bytes: 68407490
num_examples: 78388
- name: validation
num_bytes: 65318956
num_examples: 78534
download_size: 109141139
dataset_size: 639857124
- config_name: fm+fc+c
features:
- name: id
dtype: string
- name: text
dtype: string
splits:
- name: train
num_bytes: 569209100
num_examples: 624022
- name: test
num_bytes: 75552573
num_examples: 78388
- name: validation
num_bytes: 73101169
num_examples: 78534
download_size: 117996353
dataset_size: 717862842
- config_name: fm+fc+c+m
features:
- name: id
dtype: string
- name: text
dtype: string
splits:
- name: train
num_bytes: 1354004338
num_examples: 624022
- name: test
num_bytes: 187724929
num_examples: 78388
- name: validation
num_bytes: 184349299
num_examples: 78534
download_size: 222922572
dataset_size: 1726078566
- config_name: fm+fc+c+m+f
features:
- name: id
dtype: string
- name: text
dtype: string
splits:
- name: train
num_bytes: 1476073209
num_examples: 624022
- name: test
num_bytes: 201686811
num_examples: 78388
- name: validation
num_bytes: 201259950
num_examples: 78534
download_size: 240405885
dataset_size: 1879019970
- config_name: fm+fc+c+m+f+t+tc
features:
- name: id
dtype: string
- name: source
dtype: string
- name: target
dtype: string
splits:
- name: train
num_bytes: 2019918359
num_examples: 624022
- name: test
num_bytes: 269021331
num_examples: 78388
- name: validation
num_bytes: 272958781
num_examples: 78534
download_size: 371500476
dataset_size: 2561898471
- config_name: fm+fc+c+m+t+tc
features:
- name: id
dtype: string
- name: source
dtype: string
- name: target
dtype: string
splits:
- name: train
num_bytes: 1897682665
num_examples: 624022
- name: test
num_bytes: 255053799
num_examples: 78388
- name: validation
num_bytes: 256030595
num_examples: 78534
download_size: 360175965
dataset_size: 2408767059
- config_name: fm+fc+c+t+tc
features:
- name: id
dtype: string
- name: source
dtype: string
- name: target
dtype: string
splits:
- name: train
num_bytes: 1109827485
num_examples: 624022
- name: test
num_bytes: 142558255
num_examples: 78388
- name: validation
num_bytes: 144523616
num_examples: 78534
download_size: 251861137
dataset_size: 1396909356
- config_name: fm+fc+t+tc
features:
- name: id
dtype: string
- name: source
dtype: string
- name: target
dtype: string
splits:
- name: train
num_bytes: 1046592848
num_examples: 624022
- name: test
num_bytes: 135403379
num_examples: 78388
- name: validation
num_bytes: 136729952
num_examples: 78534
download_size: 243052074
dataset_size: 1318726179
- config_name: fm+t
features:
- name: id
dtype: string
- name: source
dtype: string
- name: target
dtype: string
splits:
- name: train
num_bytes: 868034154
num_examples: 624022
- name: test
num_bytes: 114371187
num_examples: 78388
- name: validation
num_bytes: 112688219
num_examples: 78534
download_size: 217267853
dataset_size: 1095093560
- config_name: fm_indented
features:
- name: id
dtype: string
- name: text
dtype: string
splits:
- name: train
num_bytes: 473170158
num_examples: 624022
- name: test
num_bytes: 64280367
num_examples: 78388
- name: validation
num_bytes: 61093848
num_examples: 78534
download_size: 103174190
dataset_size: 598544373
- config_name: t
features:
- name: id
dtype: string
- name: source
dtype: string
- name: target
dtype: string
splits:
- name: train
num_bytes: 380207303
num_examples: 624022
- name: test
num_bytes: 47993188
num_examples: 78388
- name: validation
num_bytes: 49808813
num_examples: 78534
download_size: 113820250
dataset_size: 478009304
- config_name: t+tc
features:
- name: id
dtype: string
- name: source
dtype: string
- name: target
dtype: string
splits:
- name: train
num_bytes: 550955294
num_examples: 624022
- name: test
num_bytes: 68323462
num_examples: 78388
- name: validation
num_bytes: 72740770
num_examples: 78534
download_size: 136767271
dataset_size: 692019526
- config_name: t_indented
features:
- name: id
dtype: string
- name: source
dtype: string
- name: target
dtype: string
splits:
- name: train
num_bytes: 405853738
num_examples: 624022
- name: test
num_bytes: 51457514
num_examples: 78388
- name: validation
num_bytes: 52970428
num_examples: 78534
download_size: 117732776
dataset_size: 510281680
tags:
- unit test
- java
- code
---
## Dataset Description
Microsoft created the methods2test dataset, consisting of Java Junit test cases with its corresponding focal methods.
It contains 780k pairs of JUnit test cases and focal methods which were extracted from a total of 91K
Java open source project hosted on GitHub.
This is an assembled version of the methods2test dataset. It provides convenient access to the different context levels based on the raw source code (e.g. newlines are preserved). The test cases and associated classes are also made available.
The mapping between test case and focal methods are based heuristics rules and Java developer's best practice.
More information could be found here:
- [methods2test Github repo](https://github.com/microsoft/methods2test)
- [Methods2Test: A dataset of focal methods mapped to test cases](https://arxiv.org/pdf/2203.12776.pdf)
## Dataset Schema
```
t: <TEST_CASE>
t_tc: <TEST_CASE> <TEST_CLASS_NAME>
fm: <FOCAL_METHOD>
fm_fc: <FOCAL_CLASS_NAME> <FOCAL_METHOD>
fm_fc_c: <FOCAL_CLASS_NAME> <FOCAL_METHOD> <CONTRSUCTORS>
fm_fc_c_m: <FOCAL_CLASS_NAME> <FOCAL_METHOD> <CONTRSUCTORS> <METHOD_SIGNATURES>
fm_fc_c_m_f: <FOCAL_CLASS_NAME> <FOCAL_METHOD> <CONTRSUCTORS> <METHOD_SIGNATURES> <FIELDS>
```
## Focal Context
- fm: this representation incorporates exclusively the source
code of the focal method. Intuitively, this contains the most
important information for generating accurate test cases for
the given method.
- fm+fc: this representations adds the focal class name, which
can provide meaningful semantic information to the model.
- fm+fc+c: this representation adds the signatures of the constructor methods of the focal class. The idea behind this
augmentation is that the test case may require instantiating
an object of the focal class in order to properly test the focal
method.
- fm+fc+c+m: this representation adds the signatures of the
other public methods in the focal class. The rationale which
motivated this inclusion is that the test case may need to
invoke other auxiliary methods within the class (e.g., getters,
setters) to set up or tear down the testing environment.
- fm+fc+c+m+f : this representation adds the public fields of
the focal class. The motivation is that test cases may need to
inspect the status of the public fields to properly test a focal
method.

The different levels of focal contexts are the following:
```
T: test case
T_TC: test case + test class name
FM: focal method
FM_FC: focal method + focal class name
FM_FC_C: focal method + focal class name + constructor signatures
FM_FC_C_M: focal method + focal class name + constructor signatures + public method signatures
FM_FC_C_M_F: focal method + focal class name + constructor signatures + public method signatures + public fields
```
## Limitations
The original authors validate the heuristics by inspecting a
statistically significant sample (confidence level of 95% within 10%
margin of error) of 97 samples from the training set. Two authors
independently evaluated the sample, then met to discuss the disagreements. We found that 90.72% of the samples have a correct
link between the test case and the corresponding focal method
## Contribution
All thanks to the original authors.
|
zh-plus/tiny-imagenet | zh-plus | "2022-07-12T09:04:30Z" | 15,447 | 72 | [
"task_categories:image-classification",
"task_ids:multi-class-image-classification",
"annotations_creators:crowdsourced",
"language_creators:crowdsourced",
"multilinguality:monolingual",
"source_datasets:extended|imagenet-1k",
"language:en",
"size_categories:100K<n<1M",
"format:parquet",
"modality:image",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [
"image-classification"
] | "2022-07-01T03:33:16Z" | ---
annotations_creators:
- crowdsourced
extra_gated_prompt: "By clicking on \u201CAccess repository\u201D below, you also\
\ agree to ImageNet Terms of Access:\n[RESEARCHER_FULLNAME] (the \"Researcher\"\
) has requested permission to use the ImageNet database (the \"Database\") at Princeton\
\ University and Stanford University. In exchange for such permission, Researcher\
\ hereby agrees to the following terms and conditions:\n1. Researcher shall use\
\ the Database only for non-commercial research and educational purposes.\n2. Princeton\
\ University, Stanford University and Hugging Face make no representations or warranties\
\ regarding the Database, including but not limited to warranties of non-infringement\
\ or fitness for a particular purpose.\n3. Researcher accepts full responsibility\
\ for his or her use of the Database and shall defend and indemnify the ImageNet\
\ team, Princeton University, Stanford University and Hugging Face, including their\
\ employees, Trustees, officers and agents, against any and all claims arising from\
\ Researcher's use of the Database, including but not limited to Researcher's use\
\ of any copies of copyrighted images that he or she may create from the Database.\n\
4. Researcher may provide research associates and colleagues with access to the\
\ Database provided that they first agree to be bound by these terms and conditions.\n\
5. Princeton University, Stanford University and Hugging Face reserve the right\
\ to terminate Researcher's access to the Database at any time.\n6. If Researcher\
\ is employed by a for-profit, commercial entity, Researcher's employer shall also\
\ be bound by these terms and conditions, and Researcher hereby represents that\
\ he or she is fully authorized to enter into this agreement on behalf of such employer.\n\
7. The law of the State of New Jersey shall apply to all disputes under this agreement."
language:
- en
language_creators:
- crowdsourced
license: []
multilinguality:
- monolingual
paperswithcode_id: imagenet
pretty_name: Tiny-ImageNet
size_categories:
- 100K<n<1M
source_datasets:
- extended|imagenet-1k
task_categories:
- image-classification
task_ids:
- multi-class-image-classification
---
# Dataset Card for tiny-imagenet
## Dataset Description
- **Homepage:** https://www.kaggle.com/c/tiny-imagenet
- **Repository:** [Needs More Information]
- **Paper:** http://cs231n.stanford.edu/reports/2017/pdfs/930.pdf
- **Leaderboard:** https://paperswithcode.com/sota/image-classification-on-tiny-imagenet-1
### Dataset Summary
Tiny ImageNet contains 100000 images of 200 classes (500 for each class) downsized to 64×64 colored images. Each class has 500 training images, 50 validation images, and 50 test images.
### Languages
The class labels in the dataset are in English.
## Dataset Structure
### Data Instances
```json
{
'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=64x64 at 0x1A800E8E190,
'label': 15
}
```
### Data Fields
- image: A PIL.Image.Image object containing the image. Note that when accessing the image column: dataset[0]["image"] the image file is automatically decoded. Decoding of a large number of image files might take a significant amount of time. Thus it is important to first query the sample index before the "image" column, i.e. dataset[0]["image"] should always be preferred over dataset["image"][0].
- label: an int classification label. -1 for test set as the labels are missing. Check `classes.py` for the map of numbers & labels.
### Data Splits
| | Train | Valid |
| ------------ | ------ | ----- |
| # of samples | 100000 | 10000 |
## Usage
### Example
#### Load Dataset
```python
def example_usage():
tiny_imagenet = load_dataset('Maysee/tiny-imagenet', split='train')
print(tiny_imagenet[0])
if __name__ == '__main__':
example_usage()
``` |
Multimodal-Fatima/SNLI-VE_train | Multimodal-Fatima | "2023-02-07T23:21:35Z" | 15,368 | 1 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2023-02-07T00:32:32Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: filename
dtype: string
- name: premise
dtype: string
- name: hypothesis
dtype: string
- name: label
dtype:
class_label:
names:
'0': entailment
'1': neutral
'2': contradiction
- name: id
dtype: int64
- name: id_image
dtype: int64
- name: clip_tags_ViT_L_14
sequence: string
- name: blip_caption
dtype: string
- name: LLM_Description_gpt3_downstream_tasks_visual_genome_ViT_L_14
sequence: string
splits:
- name: train
num_bytes: 73634118251.385
num_examples: 529527
download_size: 27853612384
dataset_size: 73634118251.385
---
# Dataset Card for "SNLI-VE_train"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
Avelina/smollm-corpus-cleaned | Avelina | "2025-02-26T23:03:34Z" | 15,329 | 0 | [
"task_categories:text-generation",
"language:en",
"license:odc-by",
"size_categories:100M<n<1B",
"format:json",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"region:us"
] | [
"text-generation"
] | "2025-02-26T20:55:01Z" | ---
license: odc-by
dataset_info:
- config_name: default
features:
- name: text
dtype: string
configs:
- config_name: default
data_files:
- split: train
path: data*/train-*
task_categories:
- text-generation
language:
- en
size_categories:
- 100M<n<1B
---
# SmolLM-Corpus: Now shuffled and sharded (and Cleaned)!
This is a version of the SmolLM-Corpus where the 3 subsets have been interleved, shuffled and sharded as 23698 `jsonl.zst` files for easy streaming!
The dataset is comprised of the `cosmopedia-v2` and `fineweb-edu-dedup` subsets from the original [SmolLM-Corpus repo](https://huggingface.co/datasets/HuggingFaceTB/smollm-corpus), with the `python-edu` subset being pulled from my [python-edu-cleaned repo](https://huggingface.co/datasets/Avelina/python-edu-cleaned).
## Dataset Structure
The dataset is split into 24 subdirectories, with the first 23 containing 1000 shards and the 24th containing the final 698. The repository is structured as follows:
```
data00/
├── train-00000-of-23698.jsonl.zst
├── ...
└── train-00999-of-23698.jsonl.zst
data01/
├── train-01000-of-23698.jsonl.zst
├── ...
└── train-01999-of-23698.jsonl.zst
...
data22/
├── train-22000-of-23698.jsonl.zst
├── ...
└── train-22999-of-23698.jsonl.zst
data23/
├── train-23000-of-23698.jsonl.zst
├── ...
└── train-23697-of-23698.jsonl.zst
```
In general, you can obtain the exact download URL for all shards using the following python function:
```py
def get_url_from_shard( index: int ) -> str:
if index >= 23_698:
raise ValueError( f'Shard index must be less than 23,698 but received {index}' )
group = index // 1000
return f'https://huggingface.co/datasets/Avelina/smollm-corpus/resolve/main/data{group:02d}/train-{index:05d}-of-23698.jsonl.zst'
```
## Generation Code
Here is the code which was used to generate the shuffled shards. Note the use of non-contiguous interleaving in attempt to uniformly pull documents from across entire subsets to loosely decouple shard index from original document position.
Please make sure you `pip install zstandard`!!!
```py
import tqdm
import datasets
from datasets import load_dataset
# Output directory and file format. Note that the file extension enforces zst compression is used.
OUTPUT_FMT = '/YOUR/FILE/PATH/HERE/data/train-{index:05d}-of-{num_shards:05d}.jsonl.zst'
# Total number of shards giving approximately 10,000 documents per shard
OUTPUT_NUM_SHARDS = 23698
# Grab the three datasets
ds_python = load_dataset( 'Avelina/python-edu-cleaned' )
ds_cosmo = load_dataset( 'HuggingFaceTB/smollm-corpus', 'cosmopedia-v2' )
ds_edu = load_dataset( 'HuggingFaceTB/smollm-corpus', 'fineweb-edu-dedup' )
# Retain only the text columns and the train splits
ds_python = ds_python.select_columns( 'text' )[ 'train' ]
ds_cosmo = ds_cosmo.select_columns( 'text' )[ 'train' ]
ds_edu = ds_edu.select_columns( 'text' )[ 'train' ]
# Iterate over all shards with a nice progbar
for index in tqdm.tqdm( range( OUTPUT_NUM_SHARDS ) ):
# Get non-contiguous in-memory sub-shards for the three datasets
curr_python = ds_python.shard( num_shards=OUTPUT_NUM_SHARDS, index=index, contiguous=False, keep_in_memory=True )
curr_cosmo = ds_cosmo.shard( num_shards=OUTPUT_NUM_SHARDS, index=index, contiguous=False, keep_in_memory=True )
curr_edu = ds_edu.shard( num_shards=OUTPUT_NUM_SHARDS, index=index, contiguous=False, keep_in_memory=True )
# Concatenate the sub-shards
curr_shard = datasets.concatenate_datasets( [ curr_python, curr_cosmo, curr_edu ] )
# Deterministically shuffle using the current shard index for reproducibility
curr_shard = curr_shard.shuffle( seed=index, keep_in_memory=True )
# Dump the shards to .jsonl.zst
curr_shard.to_json( OUTPUT_FMT.format( index=index, num_shards=OUTPUT_NUM_SHARDS ) )
```
## In-Memory Decompression
Zstandard was chosen as it enables trivial in-memory decompression to minimise the storage impact of the dataset. Here is some example code which creates a python generator that yields each json line from a compressed shard stored at `file_name`, and a second function which creates a python generator that parses and yields the compressed shard.
```py
import json
from json import JSONDecodeError
import zstandard
def read_lines_zst( file_name ):
# Open the file for reading in binary mode
with open( file_name, 'rb' ) as file_handle:
# Initialise an empty buffer
buffer = ''
# Create a reader for the opened file
reader = zstandard.ZstdDecompressor( max_window_size=2**31 ).stream_reader( file_handle )
while True:
# Read a chunk of up to 128MB
chunk = reader.read( 2**27 ).decode()
# If chunk is empty we've reached the end of the file and can break out
if not chunk:
break
# Combine any prior buffer with the current chunk and split by newline
lines = ( buffer + chunk ).split( '\n' )
# Yield the full lines so far
for line in lines[ : -1 ]:
yield line
# The last 'line' is incomplete, so place in buffer for next chunk
buffer = lines[ -1 ]
# Always remember to close your reader!
reader.close()
def parse_jsonl_zst( file_name ):
# Iterate over the yielded lines of the compressed shard
for i, line in enumerate( read_lines_zst( file_name ) ):
try:
# Convert the line into a python dict and yield the text field
yield json.loads( line )[ 'text' ]
except ( KeyError, JSONDecodeError ):
# Catch KeyError for 'text' not present in dict
# Catch JSONDecodeError for malformed line
print( f'JSON error @ shard={file_name}, line={i}' )
```
Of course you *could* use HuggingFace's in-built streaming mechanics to handle things for you, but in my experience that approach is less reliable, doesn't handle `JSONDecodeError`s if there are malformed lines, can cause memory leaks, and has forced sharding behaviour when used inside a multi-worker PyTorch `DataLoader` which I've not yet found a way to disable! |
fixie-ai/librispeech_asr | fixie-ai | "2024-08-05T18:38:33Z" | 15,326 | 3 | [
"language:en",
"size_categories:100K<n<1M",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-07-19T02:34:30Z" | ---
language:
- en
dataset_info:
- config_name: clean
features:
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: text
dtype: string
- name: speaker_id
dtype: int64
- name: chapter_id
dtype: int64
- name: id
dtype: string
- name: continuation
dtype: string
splits:
- name: test
num_bytes: 623948478.48
num_examples: 2620
- name: validation
num_bytes: 622190064.956
num_examples: 2703
- name: train.360
num_bytes: 41953890926.124
num_examples: 104014
- name: train.100
num_bytes: 11606313661.774
num_examples: 28539
download_size: 53886816833
dataset_size: 54806343131.334
- config_name: other
features:
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: text
dtype: string
- name: speaker_id
dtype: int64
- name: chapter_id
dtype: int64
- name: id
dtype: string
- name: continuation
dtype: string
splits:
- name: train.500
num_bytes: 57330687390.808
num_examples: 148688
- name: validation
num_bytes: 591511495.496
num_examples: 2864
- name: test
num_bytes: 616939198.113
num_examples: 2939
download_size: 57019309170
dataset_size: 58539138084.417
configs:
- config_name: clean
data_files:
- split: test
path: clean/test-*
- split: validation
path: clean/validation-*
- split: train.360
path: clean/train.360-*
- split: train.100
path: clean/train.100-*
- config_name: other
data_files:
- split: train.500
path: other/train.500-*
- split: validation
path: other/validation-*
- split: test
path: other/test-*
---
|
parrotzone/sdxl-1.0 | parrotzone | "2023-09-20T12:27:51Z" | 15,274 | 12 | [
"license:openrail++",
"size_categories:1K<n<10K",
"format:imagefolder",
"modality:image",
"library:datasets",
"library:mlcroissant",
"region:us"
] | null | "2023-07-31T07:18:18Z" | ---
license: openrail++
---
# check [sdxl.parrotzone.art](https://sdxl.parrotzone.art) for easy viewing ⋆。°✩
---
## all images were made with SDXL 1.0 + the 0.9 VAE
- steps: 20
- cfg scale: 7
- no refiner
- random seeds
|
emozilla/Long-Data-Collections-Pretrain-Without-Books | emozilla | "2023-10-09T14:32:21Z" | 15,251 | 1 | [
"size_categories:1M<n<10M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2023-10-07T05:31:23Z" | ---
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
dataset_info:
features:
- name: text
dtype: string
splits:
- name: train
num_bytes: 236565210292
num_examples: 9383848
download_size: 25749677954
dataset_size: 236565210292
---
# Dataset Card for "Long-Data-Collections-Pretrain-Without-Books"
Paraquet version of the pretrain split of [togethercomputer/Long-Data-Collections](https://huggingface.co/datasets/togethercomputer/Long-Data-Collections) WITHOUT books
Statistics (in # of characters): `total_len: 236088622215, average_len: 25159.041601590307`
|
Helsinki-NLP/opus_infopankki | Helsinki-NLP | "2024-02-22T15:10:55Z" | 15,247 | 4 | [
"task_categories:translation",
"annotations_creators:found",
"language_creators:found",
"multilinguality:multilingual",
"source_datasets:original",
"language:ar",
"language:en",
"language:es",
"language:et",
"language:fa",
"language:fi",
"language:fr",
"language:ru",
"language:so",
"language:sv",
"language:tr",
"language:zh",
"license:cc-by-4.0",
"size_categories:1M<n<10M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [
"translation"
] | "2022-03-02T23:29:22Z" | ---
annotations_creators:
- found
language_creators:
- found
language:
- ar
- en
- es
- et
- fa
- fi
- fr
- ru
- so
- sv
- tr
- zh
license: cc-by-4.0
multilinguality:
- multilingual
size_categories:
- 10K<n<100K
source_datasets:
- original
task_categories:
- translation
task_ids: []
pretty_name: OpusInfopankki
config_names:
- ar-en
- ar-es
- ar-et
- ar-fa
- ar-fi
- ar-fr
- ar-ru
- ar-so
- ar-sv
- ar-tr
- ar-zh
- en-es
- en-et
- en-fa
- en-fi
- en-fr
- en-ru
- en-so
- en-sv
- en-tr
- en-zh
- es-et
- es-fa
- es-fi
- es-fr
- es-ru
- es-so
- es-sv
- es-tr
- es-zh
- et-fa
- et-fi
- et-fr
- et-ru
- et-so
- et-sv
- et-tr
- et-zh
- fa-fi
- fa-fr
- fa-ru
- fa-so
- fa-sv
- fa-tr
- fa-zh
- fi-fr
- fi-ru
- fi-so
- fi-sv
- fi-tr
- fi-zh
- fr-ru
- fr-so
- fr-sv
- fr-tr
- fr-zh
- ru-so
- ru-sv
- ru-tr
- ru-zh
- so-sv
- so-tr
- so-zh
- sv-tr
- sv-zh
- tr-zh
dataset_info:
- config_name: ar-en
features:
- name: translation
dtype:
translation:
languages:
- ar
- en
splits:
- name: train
num_bytes: 10133337
num_examples: 50769
download_size: 2775475
dataset_size: 10133337
- config_name: ar-es
features:
- name: translation
dtype:
translation:
languages:
- ar
- es
splits:
- name: train
num_bytes: 8665355
num_examples: 40514
download_size: 2366264
dataset_size: 8665355
- config_name: ar-et
features:
- name: translation
dtype:
translation:
languages:
- ar
- et
splits:
- name: train
num_bytes: 9087555
num_examples: 46573
download_size: 2475165
dataset_size: 9087555
- config_name: ar-fa
features:
- name: translation
dtype:
translation:
languages:
- ar
- fa
splits:
- name: train
num_bytes: 12220196
num_examples: 47007
download_size: 3017006
dataset_size: 12220196
- config_name: ar-fi
features:
- name: translation
dtype:
translation:
languages:
- ar
- fi
splits:
- name: train
num_bytes: 9524265
num_examples: 49608
download_size: 2704144
dataset_size: 9524265
- config_name: ar-fr
features:
- name: translation
dtype:
translation:
languages:
- ar
- fr
splits:
- name: train
num_bytes: 8877629
num_examples: 41061
download_size: 2434048
dataset_size: 8877629
- config_name: ar-ru
features:
- name: translation
dtype:
translation:
languages:
- ar
- ru
splits:
- name: train
num_bytes: 13648194
num_examples: 50286
download_size: 3393441
dataset_size: 13648194
- config_name: ar-so
features:
- name: translation
dtype:
translation:
languages:
- ar
- so
splits:
- name: train
num_bytes: 9555548
num_examples: 44736
download_size: 2614055
dataset_size: 9555548
- config_name: ar-sv
features:
- name: translation
dtype:
translation:
languages:
- ar
- sv
splits:
- name: train
num_bytes: 8585135
num_examples: 43085
download_size: 2312217
dataset_size: 8585135
- config_name: ar-tr
features:
- name: translation
dtype:
translation:
languages:
- ar
- tr
splits:
- name: train
num_bytes: 8691077
num_examples: 41710
download_size: 2417172
dataset_size: 8691077
- config_name: ar-zh
features:
- name: translation
dtype:
translation:
languages:
- ar
- zh
splits:
- name: train
num_bytes: 5973634
num_examples: 29943
download_size: 1523722
dataset_size: 5973634
- config_name: en-es
features:
- name: translation
dtype:
translation:
languages:
- en
- es
splits:
- name: train
num_bytes: 6933983
num_examples: 42657
download_size: 2108422
dataset_size: 6933983
- config_name: en-et
features:
- name: translation
dtype:
translation:
languages:
- en
- et
splits:
- name: train
num_bytes: 8211562
num_examples: 58410
download_size: 2473732
dataset_size: 8211562
- config_name: en-fa
features:
- name: translation
dtype:
translation:
languages:
- en
- fa
splits:
- name: train
num_bytes: 10166305
num_examples: 48277
download_size: 2696051
dataset_size: 10166305
- config_name: en-fi
features:
- name: translation
dtype:
translation:
languages:
- en
- fi
splits:
- name: train
num_bytes: 10913601
num_examples: 84645
download_size: 3183398
dataset_size: 10913601
- config_name: en-fr
features:
- name: translation
dtype:
translation:
languages:
- en
- fr
splits:
- name: train
num_bytes: 8903183
num_examples: 56120
download_size: 2522185
dataset_size: 8903183
- config_name: en-ru
features:
- name: translation
dtype:
translation:
languages:
- en
- ru
splits:
- name: train
num_bytes: 15918195
num_examples: 75305
download_size: 3834067
dataset_size: 15918195
- config_name: en-so
features:
- name: translation
dtype:
translation:
languages:
- en
- so
splits:
- name: train
num_bytes: 7602290
num_examples: 47220
download_size: 2317274
dataset_size: 7602290
- config_name: en-sv
features:
- name: translation
dtype:
translation:
languages:
- en
- sv
splits:
- name: train
num_bytes: 7410975
num_examples: 51749
download_size: 2214196
dataset_size: 7410975
- config_name: en-tr
features:
- name: translation
dtype:
translation:
languages:
- en
- tr
splits:
- name: train
num_bytes: 6929154
num_examples: 44030
download_size: 2158897
dataset_size: 6929154
- config_name: en-zh
features:
- name: translation
dtype:
translation:
languages:
- en
- zh
splits:
- name: train
num_bytes: 4666963
num_examples: 29907
download_size: 1313255
dataset_size: 4666963
- config_name: es-et
features:
- name: translation
dtype:
translation:
languages:
- es
- et
splits:
- name: train
num_bytes: 6611956
num_examples: 42342
download_size: 2109076
dataset_size: 6611956
- config_name: es-fa
features:
- name: translation
dtype:
translation:
languages:
- es
- fa
splits:
- name: train
num_bytes: 9338210
num_examples: 41218
download_size: 2535729
dataset_size: 9338210
- config_name: es-fi
features:
- name: translation
dtype:
translation:
languages:
- es
- fi
splits:
- name: train
num_bytes: 6436298
num_examples: 41479
download_size: 2052254
dataset_size: 6436298
- config_name: es-fr
features:
- name: translation
dtype:
translation:
languages:
- es
- fr
splits:
- name: train
num_bytes: 7368724
num_examples: 41940
download_size: 2234633
dataset_size: 7368724
- config_name: es-ru
features:
- name: translation
dtype:
translation:
languages:
- es
- ru
splits:
- name: train
num_bytes: 9844937
num_examples: 41061
download_size: 2638368
dataset_size: 9844937
- config_name: es-so
features:
- name: translation
dtype:
translation:
languages:
- es
- so
splits:
- name: train
num_bytes: 7257038
num_examples: 41752
download_size: 2261851
dataset_size: 7257038
- config_name: es-sv
features:
- name: translation
dtype:
translation:
languages:
- es
- sv
splits:
- name: train
num_bytes: 6650652
num_examples: 41256
download_size: 2027874
dataset_size: 6650652
- config_name: es-tr
features:
- name: translation
dtype:
translation:
languages:
- es
- tr
splits:
- name: train
num_bytes: 7144065
num_examples: 42191
download_size: 2206245
dataset_size: 7144065
- config_name: es-zh
features:
- name: translation
dtype:
translation:
languages:
- es
- zh
splits:
- name: train
num_bytes: 4358751
num_examples: 26004
download_size: 1176333
dataset_size: 4358751
- config_name: et-fa
features:
- name: translation
dtype:
translation:
languages:
- et
- fa
splits:
- name: train
num_bytes: 9795996
num_examples: 47633
download_size: 2680445
dataset_size: 9795996
- config_name: et-fi
features:
- name: translation
dtype:
translation:
languages:
- et
- fi
splits:
- name: train
num_bytes: 7656989
num_examples: 57353
download_size: 2419554
dataset_size: 7656989
- config_name: et-fr
features:
- name: translation
dtype:
translation:
languages:
- et
- fr
splits:
- name: train
num_bytes: 7012430
num_examples: 44753
download_size: 2193006
dataset_size: 7012430
- config_name: et-ru
features:
- name: translation
dtype:
translation:
languages:
- et
- ru
splits:
- name: train
num_bytes: 12001391
num_examples: 55901
download_size: 3160673
dataset_size: 12001391
- config_name: et-so
features:
- name: translation
dtype:
translation:
languages:
- et
- so
splits:
- name: train
num_bytes: 7260797
num_examples: 46933
download_size: 2319211
dataset_size: 7260797
- config_name: et-sv
features:
- name: translation
dtype:
translation:
languages:
- et
- sv
splits:
- name: train
num_bytes: 6523041
num_examples: 46775
download_size: 2074448
dataset_size: 6523041
- config_name: et-tr
features:
- name: translation
dtype:
translation:
languages:
- et
- tr
splits:
- name: train
num_bytes: 6621665
num_examples: 43729
download_size: 2123880
dataset_size: 6621665
- config_name: et-zh
features:
- name: translation
dtype:
translation:
languages:
- et
- zh
splits:
- name: train
num_bytes: 4305273
num_examples: 27826
download_size: 1201275
dataset_size: 4305273
- config_name: fa-fi
features:
- name: translation
dtype:
translation:
languages:
- fa
- fi
splits:
- name: train
num_bytes: 9579257
num_examples: 46924
download_size: 2618699
dataset_size: 9579257
- config_name: fa-fr
features:
- name: translation
dtype:
translation:
languages:
- fa
- fr
splits:
- name: train
num_bytes: 9574254
num_examples: 41975
download_size: 2588917
dataset_size: 9574254
- config_name: fa-ru
features:
- name: translation
dtype:
translation:
languages:
- fa
- ru
splits:
- name: train
num_bytes: 13544451
num_examples: 47814
download_size: 3351553
dataset_size: 13544451
- config_name: fa-so
features:
- name: translation
dtype:
translation:
languages:
- fa
- so
splits:
- name: train
num_bytes: 10254723
num_examples: 45571
download_size: 2813443
dataset_size: 10254723
- config_name: fa-sv
features:
- name: translation
dtype:
translation:
languages:
- fa
- sv
splits:
- name: train
num_bytes: 9153752
num_examples: 43510
download_size: 2512908
dataset_size: 9153752
- config_name: fa-tr
features:
- name: translation
dtype:
translation:
languages:
- fa
- tr
splits:
- name: train
num_bytes: 9393209
num_examples: 42708
download_size: 2599794
dataset_size: 9393209
- config_name: fa-zh
features:
- name: translation
dtype:
translation:
languages:
- fa
- zh
splits:
- name: train
num_bytes: 5792439
num_examples: 27748
download_size: 1413779
dataset_size: 5792439
- config_name: fi-fr
features:
- name: translation
dtype:
translation:
languages:
- fi
- fr
splits:
- name: train
num_bytes: 8310851
num_examples: 55087
download_size: 2455971
dataset_size: 8310851
- config_name: fi-ru
features:
- name: translation
dtype:
translation:
languages:
- fi
- ru
splits:
- name: train
num_bytes: 15188168
num_examples: 74699
download_size: 3842831
dataset_size: 15188168
- config_name: fi-so
features:
- name: translation
dtype:
translation:
languages:
- fi
- so
splits:
- name: train
num_bytes: 7076221
num_examples: 46032
download_size: 2219872
dataset_size: 7076221
- config_name: fi-sv
features:
- name: translation
dtype:
translation:
languages:
- fi
- sv
splits:
- name: train
num_bytes: 6947224
num_examples: 51506
download_size: 2137629
dataset_size: 6947224
- config_name: fi-tr
features:
- name: translation
dtype:
translation:
languages:
- fi
- tr
splits:
- name: train
num_bytes: 6438716
num_examples: 42781
download_size: 2081615
dataset_size: 6438716
- config_name: fi-zh
features:
- name: translation
dtype:
translation:
languages:
- fi
- zh
splits:
- name: train
num_bytes: 4434168
num_examples: 29503
download_size: 1312557
dataset_size: 4434168
- config_name: fr-ru
features:
- name: translation
dtype:
translation:
languages:
- fr
- ru
splits:
- name: train
num_bytes: 12564196
num_examples: 54213
download_size: 3159587
dataset_size: 12564196
- config_name: fr-so
features:
- name: translation
dtype:
translation:
languages:
- fr
- so
splits:
- name: train
num_bytes: 7473559
num_examples: 42652
download_size: 2344399
dataset_size: 7473559
- config_name: fr-sv
features:
- name: translation
dtype:
translation:
languages:
- fr
- sv
splits:
- name: train
num_bytes: 7027563
num_examples: 43524
download_size: 2107653
dataset_size: 7027563
- config_name: fr-tr
features:
- name: translation
dtype:
translation:
languages:
- fr
- tr
splits:
- name: train
num_bytes: 7341078
num_examples: 43036
download_size: 2279611
dataset_size: 7341078
- config_name: fr-zh
features:
- name: translation
dtype:
translation:
languages:
- fr
- zh
splits:
- name: train
num_bytes: 4525109
num_examples: 26654
download_size: 1211652
dataset_size: 4525109
- config_name: ru-so
features:
- name: translation
dtype:
translation:
languages:
- ru
- so
splits:
- name: train
num_bytes: 10809193
num_examples: 45430
download_size: 2932790
dataset_size: 10809193
- config_name: ru-sv
features:
- name: translation
dtype:
translation:
languages:
- ru
- sv
splits:
- name: train
num_bytes: 10517433
num_examples: 47672
download_size: 2724280
dataset_size: 10517433
- config_name: ru-tr
features:
- name: translation
dtype:
translation:
languages:
- ru
- tr
splits:
- name: train
num_bytes: 9930592
num_examples: 42587
download_size: 2727600
dataset_size: 9930592
- config_name: ru-zh
features:
- name: translation
dtype:
translation:
languages:
- ru
- zh
splits:
- name: train
num_bytes: 6417808
num_examples: 29523
download_size: 1582749
dataset_size: 6417808
- config_name: so-sv
features:
- name: translation
dtype:
translation:
languages:
- so
- sv
splits:
- name: train
num_bytes: 6763754
num_examples: 42384
download_size: 2098877
dataset_size: 6763754
- config_name: so-tr
features:
- name: translation
dtype:
translation:
languages:
- so
- tr
splits:
- name: train
num_bytes: 7272349
num_examples: 43242
download_size: 2279999
dataset_size: 7272349
- config_name: so-zh
features:
- name: translation
dtype:
translation:
languages:
- so
- zh
splits:
- name: train
num_bytes: 4535955
num_examples: 27090
download_size: 1267321
dataset_size: 4535955
- config_name: sv-tr
features:
- name: translation
dtype:
translation:
languages:
- sv
- tr
splits:
- name: train
num_bytes: 6637744
num_examples: 42555
download_size: 2045078
dataset_size: 6637744
- config_name: sv-zh
features:
- name: translation
dtype:
translation:
languages:
- sv
- zh
splits:
- name: train
num_bytes: 4216405
num_examples: 26898
download_size: 1149609
dataset_size: 4216405
- config_name: tr-zh
features:
- name: translation
dtype:
translation:
languages:
- tr
- zh
splits:
- name: train
num_bytes: 4494071
num_examples: 27323
download_size: 1221951
dataset_size: 4494071
configs:
- config_name: ar-en
data_files:
- split: train
path: ar-en/train-*
- config_name: ar-es
data_files:
- split: train
path: ar-es/train-*
- config_name: ar-et
data_files:
- split: train
path: ar-et/train-*
- config_name: ar-fa
data_files:
- split: train
path: ar-fa/train-*
- config_name: ar-fi
data_files:
- split: train
path: ar-fi/train-*
- config_name: ar-fr
data_files:
- split: train
path: ar-fr/train-*
- config_name: ar-ru
data_files:
- split: train
path: ar-ru/train-*
- config_name: ar-so
data_files:
- split: train
path: ar-so/train-*
- config_name: ar-sv
data_files:
- split: train
path: ar-sv/train-*
- config_name: ar-tr
data_files:
- split: train
path: ar-tr/train-*
- config_name: ar-zh
data_files:
- split: train
path: ar-zh/train-*
- config_name: en-es
data_files:
- split: train
path: en-es/train-*
- config_name: en-et
data_files:
- split: train
path: en-et/train-*
- config_name: en-fa
data_files:
- split: train
path: en-fa/train-*
- config_name: en-fi
data_files:
- split: train
path: en-fi/train-*
- config_name: en-fr
data_files:
- split: train
path: en-fr/train-*
- config_name: en-ru
data_files:
- split: train
path: en-ru/train-*
- config_name: en-so
data_files:
- split: train
path: en-so/train-*
- config_name: en-sv
data_files:
- split: train
path: en-sv/train-*
- config_name: en-tr
data_files:
- split: train
path: en-tr/train-*
- config_name: en-zh
data_files:
- split: train
path: en-zh/train-*
- config_name: es-et
data_files:
- split: train
path: es-et/train-*
- config_name: es-fa
data_files:
- split: train
path: es-fa/train-*
- config_name: es-fi
data_files:
- split: train
path: es-fi/train-*
- config_name: es-fr
data_files:
- split: train
path: es-fr/train-*
- config_name: es-ru
data_files:
- split: train
path: es-ru/train-*
- config_name: es-so
data_files:
- split: train
path: es-so/train-*
- config_name: es-sv
data_files:
- split: train
path: es-sv/train-*
- config_name: es-tr
data_files:
- split: train
path: es-tr/train-*
- config_name: es-zh
data_files:
- split: train
path: es-zh/train-*
- config_name: et-fa
data_files:
- split: train
path: et-fa/train-*
- config_name: et-fi
data_files:
- split: train
path: et-fi/train-*
- config_name: et-fr
data_files:
- split: train
path: et-fr/train-*
- config_name: et-ru
data_files:
- split: train
path: et-ru/train-*
- config_name: et-so
data_files:
- split: train
path: et-so/train-*
- config_name: et-sv
data_files:
- split: train
path: et-sv/train-*
- config_name: et-tr
data_files:
- split: train
path: et-tr/train-*
- config_name: et-zh
data_files:
- split: train
path: et-zh/train-*
- config_name: fa-fi
data_files:
- split: train
path: fa-fi/train-*
- config_name: fa-fr
data_files:
- split: train
path: fa-fr/train-*
- config_name: fa-ru
data_files:
- split: train
path: fa-ru/train-*
- config_name: fa-so
data_files:
- split: train
path: fa-so/train-*
- config_name: fa-sv
data_files:
- split: train
path: fa-sv/train-*
- config_name: fa-tr
data_files:
- split: train
path: fa-tr/train-*
- config_name: fa-zh
data_files:
- split: train
path: fa-zh/train-*
- config_name: fi-fr
data_files:
- split: train
path: fi-fr/train-*
- config_name: fi-ru
data_files:
- split: train
path: fi-ru/train-*
- config_name: fi-so
data_files:
- split: train
path: fi-so/train-*
- config_name: fi-sv
data_files:
- split: train
path: fi-sv/train-*
- config_name: fi-tr
data_files:
- split: train
path: fi-tr/train-*
- config_name: fi-zh
data_files:
- split: train
path: fi-zh/train-*
- config_name: fr-ru
data_files:
- split: train
path: fr-ru/train-*
- config_name: fr-so
data_files:
- split: train
path: fr-so/train-*
- config_name: fr-sv
data_files:
- split: train
path: fr-sv/train-*
- config_name: fr-tr
data_files:
- split: train
path: fr-tr/train-*
- config_name: fr-zh
data_files:
- split: train
path: fr-zh/train-*
- config_name: ru-so
data_files:
- split: train
path: ru-so/train-*
- config_name: ru-sv
data_files:
- split: train
path: ru-sv/train-*
- config_name: ru-tr
data_files:
- split: train
path: ru-tr/train-*
- config_name: ru-zh
data_files:
- split: train
path: ru-zh/train-*
- config_name: so-sv
data_files:
- split: train
path: so-sv/train-*
- config_name: so-tr
data_files:
- split: train
path: so-tr/train-*
- config_name: so-zh
data_files:
- split: train
path: so-zh/train-*
- config_name: sv-tr
data_files:
- split: train
path: sv-tr/train-*
- config_name: sv-zh
data_files:
- split: train
path: sv-zh/train-*
- config_name: tr-zh
data_files:
- split: train
path: tr-zh/train-*
---
# Dataset Card for infopankki
## Table of Contents
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** https://opus.nlpl.eu/infopankki/corpus/version/infopankki
- **Repository:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
- **Paper:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
- **Leaderboard:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
- **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Dataset Summary
A parallel corpus of 12 languages, 66 bitexts.
### Supported Tasks and Leaderboards
The underlying task is machine translation.
### Languages
[More Information Needed]
## Dataset Structure
### Data Instances
[More Information Needed]
### Data Fields
[More Information Needed]
### Data Splits
[More Information Needed]
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
#### Initial Data Collection and Normalization
Source: http://www.infopankki.fi via the Open Data API
#### Who are the source language producers?
[More Information Needed]
### Annotations
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
Licensed under [CC-BY 4.0](https://creativecommons.org/licenses/by/4.0/).
### Citation Information
If you use any part of the corpus in your own work, please cite the following article:
```
@inproceedings{tiedemann-2012-parallel,
title = "Parallel Data, Tools and Interfaces in {OPUS}",
author = {Tiedemann, J{\"o}rg},
editor = "Calzolari, Nicoletta and
Choukri, Khalid and
Declerck, Thierry and
Do{\u{g}}an, Mehmet U{\u{g}}ur and
Maegaard, Bente and
Mariani, Joseph and
Moreno, Asuncion and
Odijk, Jan and
Piperidis, Stelios",
booktitle = "Proceedings of the Eighth International Conference on Language Resources and Evaluation ({LREC}'12)",
month = may,
year = "2012",
address = "Istanbul, Turkey",
publisher = "European Language Resources Association (ELRA)",
url = "http://www.lrec-conf.org/proceedings/lrec2012/pdf/463_Paper.pdf",
pages = "2214--2218",
abstract = "This paper presents the current status of OPUS, a growing language resource of parallel corpora and related tools. The focus in OPUS is to provide freely available data sets in various formats together with basic annotation to be useful for applications in computational linguistics, translation studies and cross-linguistic corpus studies. In this paper, we report about new data sets and their features, additional annotation tools and models provided from the website and essential interfaces and on-line services included in the project.",
}
```
### Contributions
Thanks to [@patil-suraj](https://github.com/patil-suraj) for adding this dataset. |
pietrolesci/pythia-deduped-stats-raw | pietrolesci | "2025-04-09T19:26:24Z" | 15,193 | 0 | [
"language:en",
"size_categories:10M<n<100M",
"format:parquet",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"arxiv:2406.04327",
"region:us"
] | null | "2024-02-12T01:19:05Z" | ---
language:
- en
---
This dataset has been created as an artefact of the paper [Causal Estimation of Memorisation Profiles (Lesci et al., 2024)](https://arxiv.org/abs/2406.04327).
More info about this dataset in the related collection [Memorisation-Profiles](https://huggingface.co/collections/pietrolesci/memorisation-profiles-6619604c4594c878cd9d451f).
Collection of data statistics computed using the intermediate checkpoints (step0, step1000, ..., step143k) of all Pythia deduped versions.
This folder contains the model evaluations (or "stats") for each model size included in the study. This is the "raw" version where we have stats at the token level. We gathered these statistics "just in case" since the inference process was expensive. However, we provide the sequence-level statistics in the `pietrolesci/pythia-deduped-stats` dataset. |
jxie/camelyon17 | jxie | "2023-06-22T09:10:17Z" | 15,192 | 0 | [
"size_categories:1M<n<10M",
"format:parquet",
"modality:image",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2023-06-20T19:19:23Z" | ---
dataset_info:
features:
- name: image
dtype: image
- name: label
dtype:
class_label:
names:
'0': '0'
'1': '1'
splits:
- name: id_train
num_bytes: 1028118482.46
num_examples: 302436
- name: id_val
num_bytes: 114778024.28
num_examples: 33560
- name: unlabeled_train
num_bytes: 2167898085.29
num_examples: 600030
- name: ood_val
num_bytes: 129021135.128
num_examples: 34904
- name: ood_test
num_bytes: 276517018.354
num_examples: 85054
download_size: 2858780601
dataset_size: 3716332745.5119996
---
# Dataset Card for "camelyon17"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
Yelp/yelp_review_full | Yelp | "2024-01-04T17:14:53Z" | 15,147 | 118 | [
"task_categories:text-classification",
"task_ids:sentiment-classification",
"annotations_creators:crowdsourced",
"language_creators:crowdsourced",
"multilinguality:monolingual",
"source_datasets:original",
"language:en",
"license:other",
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"arxiv:1509.01626",
"region:us"
] | [
"text-classification"
] | "2022-03-02T23:29:22Z" | ---
annotations_creators:
- crowdsourced
language_creators:
- crowdsourced
language:
- en
license:
- other
multilinguality:
- monolingual
size_categories:
- 100K<n<1M
source_datasets:
- original
task_categories:
- text-classification
task_ids:
- sentiment-classification
pretty_name: YelpReviewFull
license_details: yelp-licence
dataset_info:
config_name: yelp_review_full
features:
- name: label
dtype:
class_label:
names:
'0': 1 star
'1': 2 star
'2': 3 stars
'3': 4 stars
'4': 5 stars
- name: text
dtype: string
splits:
- name: train
num_bytes: 483811554
num_examples: 650000
- name: test
num_bytes: 37271188
num_examples: 50000
download_size: 322952369
dataset_size: 521082742
configs:
- config_name: yelp_review_full
data_files:
- split: train
path: yelp_review_full/train-*
- split: test
path: yelp_review_full/test-*
default: true
train-eval-index:
- config: yelp_review_full
task: text-classification
task_id: multi_class_classification
splits:
train_split: train
eval_split: test
col_mapping:
text: text
label: target
metrics:
- type: accuracy
name: Accuracy
- type: f1
name: F1 macro
args:
average: macro
- type: f1
name: F1 micro
args:
average: micro
- type: f1
name: F1 weighted
args:
average: weighted
- type: precision
name: Precision macro
args:
average: macro
- type: precision
name: Precision micro
args:
average: micro
- type: precision
name: Precision weighted
args:
average: weighted
- type: recall
name: Recall macro
args:
average: macro
- type: recall
name: Recall micro
args:
average: micro
- type: recall
name: Recall weighted
args:
average: weighted
---
---
# Dataset Card for YelpReviewFull
## Table of Contents
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** [Yelp](https://www.yelp.com/dataset)
- **Repository:** [Crepe](https://github.com/zhangxiangxiao/Crepe)
- **Paper:** [Character-level Convolutional Networks for Text Classification](https://arxiv.org/abs/1509.01626)
- **Point of Contact:** [Xiang Zhang](mailto:[email protected])
### Dataset Summary
The Yelp reviews dataset consists of reviews from Yelp.
It is extracted from the Yelp Dataset Challenge 2015 data.
### Supported Tasks and Leaderboards
- `text-classification`, `sentiment-classification`: The dataset is mainly used for text classification: given the text, predict the sentiment.
### Languages
The reviews were mainly written in english.
## Dataset Structure
### Data Instances
A typical data point, comprises of a text and the corresponding label.
An example from the YelpReviewFull test set looks as follows:
```
{
'label': 0,
'text': 'I got \'new\' tires from them and within two weeks got a flat. I took my car to a local mechanic to see if i could get the hole patched, but they said the reason I had a flat was because the previous patch had blown - WAIT, WHAT? I just got the tire and never needed to have it patched? This was supposed to be a new tire. \\nI took the tire over to Flynn\'s and they told me that someone punctured my tire, then tried to patch it. So there are resentful tire slashers? I find that very unlikely. After arguing with the guy and telling him that his logic was far fetched he said he\'d give me a new tire \\"this time\\". \\nI will never go back to Flynn\'s b/c of the way this guy treated me and the simple fact that they gave me a used tire!'
}
```
### Data Fields
- 'text': The review texts are escaped using double quotes ("), and any internal double quote is escaped by 2 double quotes (""). New lines are escaped by a backslash followed with an "n" character, that is "\n".
- 'label': Corresponds to the score associated with the review (between 1 and 5).
### Data Splits
The Yelp reviews full star dataset is constructed by randomly taking 130,000 training samples and 10,000 testing samples for each review star from 1 to 5.
In total there are 650,000 trainig samples and 50,000 testing samples.
## Dataset Creation
### Curation Rationale
The Yelp reviews full star dataset is constructed by Xiang Zhang ([email protected]) from the Yelp Dataset Challenge 2015. It is first used as a text classification benchmark in the following paper: Xiang Zhang, Junbo Zhao, Yann LeCun. Character-level Convolutional Networks for Text Classification. Advances in Neural Information Processing Systems 28 (NIPS 2015).
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
[More Information Needed]
### Annotations
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
You can check the official [yelp-dataset-agreement](https://s3-media3.fl.yelpcdn.com/assets/srv0/engineering_pages/bea5c1e92bf3/assets/vendor/yelp-dataset-agreement.pdf).
### Citation Information
Xiang Zhang, Junbo Zhao, Yann LeCun. Character-level Convolutional Networks for Text Classification. Advances in Neural Information Processing Systems 28 (NIPS 2015).
### Contributions
Thanks to [@hfawaz](https://github.com/hfawaz) for adding this dataset. |
laion/strategic_game_chess | laion | "2023-10-20T04:14:20Z" | 15,087 | 28 | [
"license:cc-by-4.0",
"size_categories:1M<n<10M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"game"
] | null | "2023-06-06T02:09:13Z" | ---
tags:
- game
pretty_name: The Chess Dataset
license: cc-by-4.0
---
# Chess
> Recent advancements in artificial intelligence (AI) underscore the progress of reasoning and planning shown by recent generalist machine learning (ML) models. The progress can be boosted by datasets that can further boost these generic capabilities when used for training foundation models of various kind. This research initiative has generated extensive synthetic datasets from complex games — chess, Rubik's Cube, and mazes — to study facilitation and the advancement of these critical generic skills in AI models.
This dataset contains 3.2 billion games, equating to approximately 608 billion individual moves.
it is generated through self-play by Stockfish engine using Fugaku and we add initial moves to expand its diversity.
Each game has three columns: 'Moves', 'Termination' and 'Result',
- 'Move': recorded chess moves of the whole game.
- 'Termination': include CHECKMATE, INSUFFICIENT_MATERIAL, ... etc.
- Please check this for detail information
https://python-chess.readthedocs.io/en/latest/core.html#chess.Outcome.termination
- 'Result': result of this game, 1-0, 1/2-1/2, 0-1.
### Call for Collaboration
We invite interested researchers and ML practitioners to explore these datasets' potential. Whether training GPT models from scratch or fine-tuning pre-existing models, we encourage the exploration of various pre-training and fine-tuning strategies using these game-based datasets standalone or as enhancement of other already composed large-scale data.
Our team is prepared to assist in securing necessary GPU resources for these explorations. We are particularly interested in collaborators eager to pre-train models of small to medium scale on our game data, subsequently transition to standard text-based training, and then perform comparative analyses against models of similar architecture trained exclusively on text data.
Conclusively, this initiative marks a significant stride toward intricate problem-solving and strategic planning in AI, extending an open invitation to the research community for collaborative advancement in this domain. |
taesiri/arxiv_audio | taesiri | "2023-12-22T04:56:39Z" | 15,073 | 2 | [
"license:cc-by-4.0",
"size_categories:1K<n<10K",
"format:text",
"modality:audio",
"modality:text",
"library:datasets",
"library:mlcroissant",
"region:us"
] | null | "2023-07-03T03:46:57Z" | ---
license: cc-by-4.0
---
|
MERaLiON/Multitask-National-Speech-Corpus-v1 | MERaLiON | "2025-01-21T03:54:47Z" | 15,053 | 9 | [
"size_categories:10M<n<100M",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"arxiv:2501.01034",
"region:us"
] | null | "2024-11-28T02:20:03Z" | ---
dataset_info:
- config_name: ASR-PART1-Test
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 571211945.0
num_examples: 3000
download_size: 559850838
dataset_size: 571211945.0
- config_name: ASR-PART1-Train
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 385418198300.75
num_examples: 2258301
download_size: 377045178322
dataset_size: 385418198300.75
- config_name: ASR-PART2-Test
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 465911787.0
num_examples: 3000
download_size: 453955477
dataset_size: 465911787.0
- config_name: ASR-PART2-Train
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 363327397513.5
num_examples: 2473990
download_size: 353295436382
dataset_size: 363327397513.5
- config_name: ASR-PART3-Test
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 888128151.0
num_examples: 1000
download_size: 869839634
dataset_size: 888128151.0
- config_name: ASR-PART3-Train
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 85425161802.75
num_examples: 96245
download_size: 83617613004
dataset_size: 85425161802.75
- config_name: ASR-PART4-Test
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 840669815.0
num_examples: 1000
download_size: 840073363
dataset_size: 840669815.0
- config_name: ASR-PART4-Train
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 8086630962.75
num_examples: 9629
download_size: 8080765859
dataset_size: 8086630962.75
- config_name: ASR-PART5-Test
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 796429463.0
num_examples: 1000
download_size: 793697822
dataset_size: 796429463.0
- config_name: ASR-PART5-Train
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 19368760964.0
num_examples: 24320
download_size: 19307168143
dataset_size: 19368760964.0
- config_name: ASR-PART6-Test
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 778918943.0
num_examples: 1000
download_size: 776120504
dataset_size: 778918943.0
- config_name: ASR-PART6-Train
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 80452740634.25
num_examples: 103935
download_size: 80163834483
dataset_size: 80452740634.25
- config_name: PQA-AR-Dialogue-Test
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 2524213101.0
num_examples: 3000
download_size: 2502881430
dataset_size: 2524213101.0
- config_name: PQA-AR-Dialogue-Train
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 112833638040.5
num_examples: 130194
download_size: 110972595941
dataset_size: 112833638040.5
- config_name: PQA-AR-Sentence-Test
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 1037448188.0
num_examples: 6000
download_size: 1013575979
dataset_size: 1037448188.0
- config_name: PQA-AR-Sentence-Train
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 748962171712.25
num_examples: 4732291
download_size: 730150284902
dataset_size: 748962171712.25
- config_name: PQA-GR-Dialogue-Test
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 2524070317.0
num_examples: 3000
download_size: 2502849279
dataset_size: 2524070317.0
- config_name: PQA-GR-Dialogue-Train
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 112828111831.5
num_examples: 130194
download_size: 110971280799
dataset_size: 112828111831.5
- config_name: PQA-GR-Sentence-Test
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 1037310373.0
num_examples: 6000
download_size: 1013567377
dataset_size: 1037310373.0
- config_name: PQA-GR-Sentence-Train
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 748852926219.25
num_examples: 4732291
download_size: 730143237881
dataset_size: 748852926219.25
- config_name: SDS-PART3-Test
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 90554299.0
num_examples: 100
download_size: 89690527
dataset_size: 90554299.0
- config_name: SDS-PART3-Train
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 85422319098.75
num_examples: 96245
download_size: 83614162476
dataset_size: 85422319098.75
- config_name: SDS-PART4-Test
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 88940350.0
num_examples: 100
download_size: 88911005
dataset_size: 88940350.0
- config_name: SDS-PART4-Train
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 8085687190.75
num_examples: 9629
download_size: 8079929577
dataset_size: 8085687190.75
- config_name: SDS-PART5-Test
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 88124206.0
num_examples: 100
download_size: 87803731
dataset_size: 88124206.0
- config_name: SDS-PART5-Train
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 19367349094.0
num_examples: 24320
download_size: 19305847065
dataset_size: 19367349094.0
- config_name: SDS-PART6-Test
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 87732392.0
num_examples: 100
download_size: 87551938
dataset_size: 87732392.0
- config_name: SDS-PART6-Train
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 80449120699.25
num_examples: 103935
download_size: 80159781897
dataset_size: 80449120699.25
- config_name: SQA-PART3-Test
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 90552574.0
num_examples: 100
download_size: 89693755
dataset_size: 90552574.0
- config_name: SQA-PART3-Train
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 85376993587.0
num_examples: 96232
download_size: 83585944443
dataset_size: 85376993587.0
- config_name: SQA-PART4-Test
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 88935324.0
num_examples: 100
download_size: 88913332
dataset_size: 88935324.0
- config_name: SQA-PART4-Train
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 8080383599.5
num_examples: 9626
download_size: 8076488125
dataset_size: 8080383599.5
- config_name: SQA-PART5-Test
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 88115583.0
num_examples: 100
download_size: 87803336
dataset_size: 88115583.0
- config_name: SQA-PART5-Train
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 19354344382.25
num_examples: 24311
download_size: 19296773516
dataset_size: 19354344382.25
- config_name: SQA-PART6-Test
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 87727131.0
num_examples: 100
download_size: 87554027
dataset_size: 87727131.0
- config_name: SQA-PART6-Train
features:
- name: context
dtype:
audio:
sampling_rate: 16000
- name: instruction
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 80401794701.0
num_examples: 103908
download_size: 80128135250
dataset_size: 80401794701.0
configs:
- config_name: ASR-PART1-Test
data_files:
- split: train
path: ASR-PART1-Test/train-*
- config_name: ASR-PART1-Train
data_files:
- split: train
path: ASR-PART1-Train/train-*
- config_name: ASR-PART2-Test
data_files:
- split: train
path: ASR-PART2-Test/train-*
- config_name: ASR-PART2-Train
data_files:
- split: train
path: ASR-PART2-Train/train-*
- config_name: ASR-PART3-Test
data_files:
- split: train
path: ASR-PART3-Test/train-*
- config_name: ASR-PART3-Train
data_files:
- split: train
path: ASR-PART3-Train/train-*
- config_name: ASR-PART4-Test
data_files:
- split: train
path: ASR-PART4-Test/train-*
- config_name: ASR-PART4-Train
data_files:
- split: train
path: ASR-PART4-Train/train-*
- config_name: ASR-PART5-Test
data_files:
- split: train
path: ASR-PART5-Test/train-*
- config_name: ASR-PART5-Train
data_files:
- split: train
path: ASR-PART5-Train/train-*
- config_name: ASR-PART6-Test
data_files:
- split: train
path: ASR-PART6-Test/train-*
- config_name: ASR-PART6-Train
data_files:
- split: train
path: ASR-PART6-Train/train-*
- config_name: PQA-AR-Dialogue-Test
data_files:
- split: train
path: PQA-AR-Dialogue-Test/train-*
- config_name: PQA-AR-Dialogue-Train
data_files:
- split: train
path: PQA-AR-Dialogue-Train/train-*
- config_name: PQA-AR-Sentence-Test
data_files:
- split: train
path: PQA-AR-Sentence-Test/train-*
- config_name: PQA-AR-Sentence-Train
data_files:
- split: train
path: PQA-AR-Sentence-Train/train-*
- config_name: PQA-GR-Dialogue-Test
data_files:
- split: train
path: PQA-GR-Dialogue-Test/train-*
- config_name: PQA-GR-Dialogue-Train
data_files:
- split: train
path: PQA-GR-Dialogue-Train/train-*
- config_name: PQA-GR-Sentence-Test
data_files:
- split: train
path: PQA-GR-Sentence-Test/train-*
- config_name: PQA-GR-Sentence-Train
data_files:
- split: train
path: PQA-GR-Sentence-Train/train-*
- config_name: SDS-PART3-Test
data_files:
- split: train
path: SDS-PART3-Test/train-*
- config_name: SDS-PART3-Train
data_files:
- split: train
path: SDS-PART3-Train/train-*
- config_name: SDS-PART4-Test
data_files:
- split: train
path: SDS-PART4-Test/train-*
- config_name: SDS-PART4-Train
data_files:
- split: train
path: SDS-PART4-Train/train-*
- config_name: SDS-PART5-Test
data_files:
- split: train
path: SDS-PART5-Test/train-*
- config_name: SDS-PART5-Train
data_files:
- split: train
path: SDS-PART5-Train/train-*
- config_name: SDS-PART6-Test
data_files:
- split: train
path: SDS-PART6-Test/train-*
- config_name: SDS-PART6-Train
data_files:
- split: train
path: SDS-PART6-Train/train-*
- config_name: SQA-PART3-Test
data_files:
- split: train
path: SQA-PART3-Test/train-*
- config_name: SQA-PART3-Train
data_files:
- split: train
path: SQA-PART3-Train/train-*
- config_name: SQA-PART4-Test
data_files:
- split: train
path: SQA-PART4-Test/train-*
- config_name: SQA-PART4-Train
data_files:
- split: train
path: SQA-PART4-Train/train-*
- config_name: SQA-PART5-Test
data_files:
- split: train
path: SQA-PART5-Test/train-*
- config_name: SQA-PART5-Train
data_files:
- split: train
path: SQA-PART5-Train/train-*
- config_name: SQA-PART6-Test
data_files:
- split: train
path: SQA-PART6-Test/train-*
- config_name: SQA-PART6-Train
data_files:
- split: train
path: SQA-PART6-Train/train-*
---
Multitask-National-Speech-Corpus (MNSC v1) is derived from [IMDA's NSC Corpus](https://www.imda.gov.sg/how-we-can-help/national-speech-corpus).
MNSC is a multitask speech understanding dataset derived and further annotated from IMDA NSC Corpus. It focuses on the knowledge of Singapore's local accent, localised terms, and code-switching.
- ASR: Automatic Speech Recognition
- SQA: Speech Question Answering
- SDS: Spoken Dialogue Summarization
- PQA: Paralinguistic Question Answering
```
from datasets import load_dataset
data = load_dataset('MERaLiON/Multitask-National-Speech-Corpus-v1', data_dir='ASR-PART1-Train')['train']
```
```
@article{wang2025advancing,
title={Advancing Singlish Understanding: Bridging the Gap with Datasets and Multimodal Models},
author={Wang, Bin and Zou, Xunlong and Sun, Shuo and Zhang, Wenyu and He, Yingxu and Liu, Zhuohan and Wei, Chengwei and Chen, Nancy F and Aw, AiTi},
journal={arXiv preprint arXiv:2501.01034},
year={2025}
}
```
|
laion/LAION-Audio-300M | laion | "2025-01-10T21:33:57Z" | 15,032 | 28 | [
"license:apache-2.0",
"size_categories:100M<n<1B",
"format:webdataset",
"modality:audio",
"modality:text",
"library:datasets",
"library:webdataset",
"library:mlcroissant",
"region:us"
] | null | "2024-12-29T09:50:41Z" | ---
license: apache-2.0
---
|
davanstrien/AmericanStories-parquet | davanstrien | "2023-10-20T15:09:54Z" | 15,026 | 2 | [
"size_categories:10M<n<100M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2023-10-20T14:07:00Z" | ---
configs:
- config_name: default
data_files:
- split: '1774'
path: data/1774-*
- split: '1798'
path: data/1798-*
- split: '1799'
path: data/1799-*
- split: '1800'
path: data/1800-*
- split: '1801'
path: data/1801-*
- split: '1802'
path: data/1802-*
- split: '1803'
path: data/1803-*
- split: '1804'
path: data/1804-*
- split: '1805'
path: data/1805-*
- split: '1806'
path: data/1806-*
- split: '1807'
path: data/1807-*
- split: '1808'
path: data/1808-*
- split: '1809'
path: data/1809-*
- split: '1810'
path: data/1810-*
- split: '1811'
path: data/1811-*
- split: '1812'
path: data/1812-*
- split: '1813'
path: data/1813-*
- split: '1814'
path: data/1814-*
- split: '1815'
path: data/1815-*
- split: '1816'
path: data/1816-*
- split: '1817'
path: data/1817-*
- split: '1818'
path: data/1818-*
- split: '1819'
path: data/1819-*
- split: '1820'
path: data/1820-*
- split: '1821'
path: data/1821-*
- split: '1822'
path: data/1822-*
- split: '1823'
path: data/1823-*
- split: '1824'
path: data/1824-*
- split: '1825'
path: data/1825-*
- split: '1826'
path: data/1826-*
- split: '1827'
path: data/1827-*
- split: '1828'
path: data/1828-*
- split: '1829'
path: data/1829-*
- split: '1830'
path: data/1830-*
- split: '1831'
path: data/1831-*
- split: '1832'
path: data/1832-*
- split: '1833'
path: data/1833-*
- split: '1834'
path: data/1834-*
- split: '1835'
path: data/1835-*
- split: '1836'
path: data/1836-*
- split: '1837'
path: data/1837-*
- split: '1838'
path: data/1838-*
- split: '1839'
path: data/1839-*
- split: '1840'
path: data/1840-*
- split: '1841'
path: data/1841-*
- split: '1842'
path: data/1842-*
- split: '1843'
path: data/1843-*
- split: '1844'
path: data/1844-*
- split: '1845'
path: data/1845-*
- split: '1846'
path: data/1846-*
- split: '1847'
path: data/1847-*
- split: '1848'
path: data/1848-*
- split: '1849'
path: data/1849-*
- split: '1850'
path: data/1850-*
- split: '1851'
path: data/1851-*
- split: '1852'
path: data/1852-*
- split: '1853'
path: data/1853-*
- split: '1854'
path: data/1854-*
- split: '1855'
path: data/1855-*
- split: '1856'
path: data/1856-*
- split: '1857'
path: data/1857-*
- split: '1858'
path: data/1858-*
- split: '1859'
path: data/1859-*
- split: '1860'
path: data/1860-*
- split: '1861'
path: data/1861-*
- split: '1862'
path: data/1862-*
- split: '1863'
path: data/1863-*
- split: '1864'
path: data/1864-*
- split: '1865'
path: data/1865-*
- split: '1866'
path: data/1866-*
- split: '1867'
path: data/1867-*
- split: '1868'
path: data/1868-*
- split: '1869'
path: data/1869-*
- split: '1870'
path: data/1870-*
- split: '1871'
path: data/1871-*
- split: '1872'
path: data/1872-*
- split: '1873'
path: data/1873-*
- split: '1874'
path: data/1874-*
- split: '1875'
path: data/1875-*
- split: '1876'
path: data/1876-*
- split: '1877'
path: data/1877-*
- split: '1878'
path: data/1878-*
- split: '1879'
path: data/1879-*
- split: '1880'
path: data/1880-*
- split: '1881'
path: data/1881-*
- split: '1882'
path: data/1882-*
- split: '1883'
path: data/1883-*
- split: '1884'
path: data/1884-*
- split: '1885'
path: data/1885-*
- split: '1886'
path: data/1886-*
- split: '1887'
path: data/1887-*
- split: '1888'
path: data/1888-*
- split: '1889'
path: data/1889-*
- split: '1890'
path: data/1890-*
- split: '1891'
path: data/1891-*
- split: '1892'
path: data/1892-*
- split: '1893'
path: data/1893-*
- split: '1894'
path: data/1894-*
- split: '1895'
path: data/1895-*
- split: '1896'
path: data/1896-*
- split: '1897'
path: data/1897-*
- split: '1898'
path: data/1898-*
- split: '1899'
path: data/1899-*
- split: '1900'
path: data/1900-*
- split: '1901'
path: data/1901-*
- split: '1902'
path: data/1902-*
- split: '1903'
path: data/1903-*
- split: '1904'
path: data/1904-*
- split: '1905'
path: data/1905-*
- split: '1906'
path: data/1906-*
- split: '1907'
path: data/1907-*
- split: '1908'
path: data/1908-*
- split: '1909'
path: data/1909-*
- split: '1910'
path: data/1910-*
- split: '1911'
path: data/1911-*
- split: '1912'
path: data/1912-*
- split: '1913'
path: data/1913-*
- split: '1914'
path: data/1914-*
- split: '1915'
path: data/1915-*
- split: '1916'
path: data/1916-*
- split: '1917'
path: data/1917-*
- split: '1918'
path: data/1918-*
- split: '1919'
path: data/1919-*
- split: '1920'
path: data/1920-*
- split: '1921'
path: data/1921-*
- split: '1922'
path: data/1922-*
- split: '1923'
path: data/1923-*
- split: '1924'
path: data/1924-*
- split: '1925'
path: data/1925-*
- split: '1926'
path: data/1926-*
- split: '1927'
path: data/1927-*
- split: '1928'
path: data/1928-*
- split: '1929'
path: data/1929-*
- split: '1930'
path: data/1930-*
- split: '1931'
path: data/1931-*
- split: '1932'
path: data/1932-*
- split: '1933'
path: data/1933-*
- split: '1934'
path: data/1934-*
- split: '1935'
path: data/1935-*
- split: '1936'
path: data/1936-*
- split: '1937'
path: data/1937-*
- split: '1938'
path: data/1938-*
- split: '1939'
path: data/1939-*
- split: '1940'
path: data/1940-*
- split: '1941'
path: data/1941-*
- split: '1942'
path: data/1942-*
- split: '1943'
path: data/1943-*
- split: '1944'
path: data/1944-*
- split: '1945'
path: data/1945-*
- split: '1946'
path: data/1946-*
- split: '1947'
path: data/1947-*
- split: '1948'
path: data/1948-*
- split: '1949'
path: data/1949-*
- split: '1950'
path: data/1950-*
- split: '1951'
path: data/1951-*
- split: '1952'
path: data/1952-*
- split: '1953'
path: data/1953-*
- split: '1954'
path: data/1954-*
- split: '1955'
path: data/1955-*
- split: '1956'
path: data/1956-*
- split: '1957'
path: data/1957-*
- split: '1958'
path: data/1958-*
- split: '1959'
path: data/1959-*
- split: '1960'
path: data/1960-*
- split: '1961'
path: data/1961-*
- split: '1962'
path: data/1962-*
- split: '1963'
path: data/1963-*
dataset_info:
features:
- name: article_id
dtype: string
- name: newspaper_name
dtype: string
- name: edition
dtype: string
- name: date
dtype: string
- name: page
dtype: string
- name: headline
dtype: string
- name: byline
dtype: string
- name: article
dtype: string
splits:
- name: '1774'
num_bytes: 22245
num_examples: 12
- name: '1798'
num_bytes: 72288
num_examples: 73
- name: '1799'
num_bytes: 946373
num_examples: 623
- name: '1800'
num_bytes: 38139
num_examples: 45
- name: '1801'
num_bytes: 94991
num_examples: 93
- name: '1802'
num_bytes: 1463322
num_examples: 1158
- name: '1803'
num_bytes: 799797
num_examples: 654
- name: '1804'
num_bytes: 120141
num_examples: 103
- name: '1805'
num_bytes: 2475205
num_examples: 2303
- name: '1806'
num_bytes: 2043729
num_examples: 1860
- name: '1807'
num_bytes: 310568
num_examples: 315
- name: '1808'
num_bytes: 75639
num_examples: 45
- name: '1809'
num_bytes: 430706
num_examples: 422
- name: '1810'
num_bytes: 1319755
num_examples: 982
- name: '1811'
num_bytes: 117701
num_examples: 110
- name: '1812'
num_bytes: 75299
num_examples: 67
- name: '1813'
num_bytes: 290966
num_examples: 242
- name: '1814'
num_bytes: 378212
num_examples: 379
- name: '1815'
num_bytes: 185179
num_examples: 160
- name: '1816'
num_bytes: 495706
num_examples: 409
- name: '1817'
num_bytes: 446354
num_examples: 394
- name: '1818'
num_bytes: 1257916
num_examples: 1108
- name: '1819'
num_bytes: 2476297
num_examples: 1997
- name: '1820'
num_bytes: 611884
num_examples: 433
- name: '1821'
num_bytes: 347361
num_examples: 270
- name: '1822'
num_bytes: 286227
num_examples: 264
- name: '1823'
num_bytes: 2030816
num_examples: 1113
- name: '1824'
num_bytes: 5171191
num_examples: 3110
- name: '1825'
num_bytes: 6341915
num_examples: 4005
- name: '1826'
num_bytes: 10462258
num_examples: 7079
- name: '1827'
num_bytes: 11634621
num_examples: 7213
- name: '1828'
num_bytes: 10253681
num_examples: 6350
- name: '1829'
num_bytes: 4021832
num_examples: 2296
- name: '1830'
num_bytes: 8321949
num_examples: 4232
- name: '1831'
num_bytes: 16796125
num_examples: 9699
- name: '1832'
num_bytes: 9982722
num_examples: 6565
- name: '1833'
num_bytes: 6653515
num_examples: 4108
- name: '1834'
num_bytes: 7099875
num_examples: 4632
- name: '1835'
num_bytes: 9066392
num_examples: 6168
- name: '1836'
num_bytes: 10473366
num_examples: 7375
- name: '1837'
num_bytes: 21002773
num_examples: 13609
- name: '1838'
num_bytes: 13735809
num_examples: 8492
- name: '1839'
num_bytes: 12512339
num_examples: 8938
- name: '1840'
num_bytes: 12647911
num_examples: 8052
- name: '1841'
num_bytes: 39146669
num_examples: 30019
- name: '1842'
num_bytes: 26218700
num_examples: 21290
- name: '1843'
num_bytes: 50447372
num_examples: 41657
- name: '1844'
num_bytes: 79351064
num_examples: 61373
- name: '1845'
num_bytes: 131632573
num_examples: 95921
- name: '1846'
num_bytes: 81086068
num_examples: 70331
- name: '1847'
num_bytes: 32733527
num_examples: 24354
- name: '1848'
num_bytes: 44577825
num_examples: 32531
- name: '1849'
num_bytes: 53877014
num_examples: 42711
- name: '1850'
num_bytes: 76697622
num_examples: 49992
- name: '1851'
num_bytes: 128372084
num_examples: 90184
- name: '1852'
num_bytes: 67005975
num_examples: 51172
- name: '1853'
num_bytes: 54210932
num_examples: 48130
- name: '1854'
num_bytes: 150406197
num_examples: 118825
- name: '1855'
num_bytes: 115893679
num_examples: 99390
- name: '1856'
num_bytes: 188859881
num_examples: 157592
- name: '1857'
num_bytes: 152841585
num_examples: 129179
- name: '1858'
num_bytes: 214657030
num_examples: 171877
- name: '1859'
num_bytes: 178711188
num_examples: 160924
- name: '1860'
num_bytes: 163889573
num_examples: 150590
- name: '1861'
num_bytes: 215595661
num_examples: 173990
- name: '1862'
num_bytes: 228323685
num_examples: 171021
- name: '1863'
num_bytes: 197294365
num_examples: 151485
- name: '1864'
num_bytes: 125113713
num_examples: 94415
- name: '1865'
num_bytes: 133515217
num_examples: 99728
- name: '1866'
num_bytes: 180768118
num_examples: 135316
- name: '1867'
num_bytes: 213571876
num_examples: 161180
- name: '1868'
num_bytes: 202156635
num_examples: 140521
- name: '1869'
num_bytes: 236506656
num_examples: 171455
- name: '1870'
num_bytes: 242779857
num_examples: 174061
- name: '1871'
num_bytes: 203189927
num_examples: 151652
- name: '1872'
num_bytes: 242624062
num_examples: 194784
- name: '1873'
num_bytes: 302626176
num_examples: 241902
- name: '1874'
num_bytes: 280814742
num_examples: 213813
- name: '1875'
num_bytes: 319815222
num_examples: 274269
- name: '1876'
num_bytes: 381483980
num_examples: 288199
- name: '1877'
num_bytes: 317703263
num_examples: 254946
- name: '1878'
num_bytes: 381274032
num_examples: 307865
- name: '1879'
num_bytes: 371703798
num_examples: 287784
- name: '1880'
num_bytes: 296465631
num_examples: 272352
- name: '1881'
num_bytes: 294568051
num_examples: 270228
- name: '1882'
num_bytes: 340511400
num_examples: 311920
- name: '1883'
num_bytes: 419078041
num_examples: 387589
- name: '1884'
num_bytes: 329666364
num_examples: 304242
- name: '1885'
num_bytes: 348144660
num_examples: 318732
- name: '1886'
num_bytes: 431746663
num_examples: 423718
- name: '1887'
num_bytes: 493647568
num_examples: 494559
- name: '1888'
num_bytes: 564523528
num_examples: 547165
- name: '1889'
num_bytes: 558168324
num_examples: 536750
- name: '1890'
num_bytes: 566964770
num_examples: 540615
- name: '1891'
num_bytes: 641124243
num_examples: 620461
- name: '1892'
num_bytes: 524812242
num_examples: 527044
- name: '1893'
num_bytes: 645853680
num_examples: 656805
- name: '1894'
num_bytes: 790577208
num_examples: 795408
- name: '1895'
num_bytes: 890097151
num_examples: 897766
- name: '1896'
num_bytes: 1235234882
num_examples: 1175701
- name: '1897'
num_bytes: 1252347746
num_examples: 1275895
- name: '1898'
num_bytes: 1286411001
num_examples: 1323842
- name: '1899'
num_bytes: 1176418162
num_examples: 1218682
- name: '1900'
num_bytes: 1069983237
num_examples: 1118970
- name: '1901'
num_bytes: 1478945214
num_examples: 1468648
- name: '1902'
num_bytes: 1376703767
num_examples: 1417935
- name: '1903'
num_bytes: 1255538379
num_examples: 1319686
- name: '1904'
num_bytes: 1232185827
num_examples: 1340868
- name: '1905'
num_bytes: 1563178627
num_examples: 1635134
- name: '1906'
num_bytes: 1632815247
num_examples: 1683643
- name: '1907'
num_bytes: 1647491794
num_examples: 1714613
- name: '1908'
num_bytes: 1771267430
num_examples: 1842874
- name: '1909'
num_bytes: 1844179657
num_examples: 1926228
- name: '1910'
num_bytes: 1589052587
num_examples: 1684263
- name: '1911'
num_bytes: 1402309564
num_examples: 1510259
- name: '1912'
num_bytes: 1621648367
num_examples: 1774149
- name: '1913'
num_bytes: 1613599136
num_examples: 1822206
- name: '1914'
num_bytes: 1736284455
num_examples: 1931901
- name: '1915'
num_bytes: 1690248452
num_examples: 1878654
- name: '1916'
num_bytes: 1633563499
num_examples: 1838797
- name: '1917'
num_bytes: 1605677226
num_examples: 1810757
- name: '1918'
num_bytes: 1803695589
num_examples: 1920102
- name: '1919'
num_bytes: 1831703767
num_examples: 1981192
- name: '1920'
num_bytes: 1901882705
num_examples: 2041192
- name: '1921'
num_bytes: 2264618667
num_examples: 2334112
- name: '1922'
num_bytes: 2372137567
num_examples: 2405974
- name: '1923'
num_bytes: 812177597
num_examples: 880372
- name: '1924'
num_bytes: 800835690
num_examples: 845520
- name: '1925'
num_bytes: 601426023
num_examples: 662322
- name: '1926'
num_bytes: 565307890
num_examples: 623765
- name: '1927'
num_bytes: 460501197
num_examples: 504835
- name: '1928'
num_bytes: 452526140
num_examples: 487302
- name: '1929'
num_bytes: 366246066
num_examples: 421909
- name: '1930'
num_bytes: 437657836
num_examples: 492695
- name: '1931'
num_bytes: 441972257
num_examples: 493816
- name: '1932'
num_bytes: 640501746
num_examples: 664615
- name: '1933'
num_bytes: 634373318
num_examples: 642380
- name: '1934'
num_bytes: 641841040
num_examples: 654342
- name: '1935'
num_bytes: 612406176
num_examples: 635554
- name: '1936'
num_bytes: 621035178
num_examples: 662015
- name: '1937'
num_bytes: 625107933
num_examples: 676549
- name: '1938'
num_bytes: 616370880
num_examples: 665274
- name: '1939'
num_bytes: 525913265
num_examples: 556283
- name: '1940'
num_bytes: 471830118
num_examples: 496662
- name: '1941'
num_bytes: 599694786
num_examples: 637200
- name: '1942'
num_bytes: 508785410
num_examples: 523923
- name: '1943'
num_bytes: 452079475
num_examples: 467200
- name: '1944'
num_bytes: 442871777
num_examples: 433769
- name: '1945'
num_bytes: 588623743
num_examples: 588477
- name: '1946'
num_bytes: 526027876
num_examples: 470895
- name: '1947'
num_bytes: 461281363
num_examples: 393086
- name: '1948'
num_bytes: 442999943
num_examples: 396660
- name: '1949'
num_bytes: 421752000
num_examples: 419854
- name: '1950'
num_bytes: 403717616
num_examples: 415416
- name: '1951'
num_bytes: 409600217
num_examples: 419622
- name: '1952'
num_bytes: 397051717
num_examples: 396420
- name: '1953'
num_bytes: 366253682
num_examples: 358332
- name: '1954'
num_bytes: 263197428
num_examples: 266338
- name: '1955'
num_bytes: 268993926
num_examples: 273576
- name: '1956'
num_bytes: 85126796
num_examples: 98035
- name: '1957'
num_bytes: 83757036
num_examples: 93543
- name: '1958'
num_bytes: 85807593
num_examples: 98688
- name: '1959'
num_bytes: 112707174
num_examples: 129452
- name: '1960'
num_bytes: 300484826
num_examples: 344550
- name: '1961'
num_bytes: 297225753
num_examples: 339076
- name: '1962'
num_bytes: 231525869
num_examples: 264724
- name: '1963'
num_bytes: 197520960
num_examples: 226859
download_size: 48388744959
dataset_size: 76303058024
---
# Dataset Card for "AmericanStories-parquet"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
Avelina/smollm-corpus | Avelina | "2025-01-11T16:41:28Z" | 15,023 | 5 | [
"task_categories:text-generation",
"language:en",
"license:odc-by",
"size_categories:100M<n<1B",
"region:us"
] | [
"text-generation"
] | "2025-01-11T01:39:39Z" | ---
license: odc-by
dataset_info:
- config_name: default
features:
- name: text
dtype: string
configs:
- config_name: default
data_files:
- split: train
path: data*/train-*
task_categories:
- text-generation
language:
- en
size_categories:
- 100M<n<1B
---
# SmolLM-Corpus: Now shuffled and sharded!
This is a version of the SmolLM-Corpus where the 3 subsets have been interleved, shuffled and sharded as 23698 `jsonl.zst` files for easy streaming!
The dataset is comprised of the `cosmopedia-v2` and `fineweb-edu-dedup` subsets from the original [SmolLM-Corpus repo](https://huggingface.co/datasets/HuggingFaceTB/smollm-corpus), with the `python-edu` subset being pulled from my [python-edu repo](https://huggingface.co/datasets/Avelina/python-edu).
## Dataset Structure
The dataset is split into 24 subdirectories, with the first 23 containing 1000 shards and the 24th containing the final 698. The repository is structured as follows:
```
data00/
├── train-00000-of-23698.jsonl.zst
├── ...
└── train-00999-of-23698.jsonl.zst
data01/
├── train-01000-of-23698.jsonl.zst
├── ...
└── train-01999-of-23698.jsonl.zst
...
data22/
├── train-22000-of-23698.jsonl.zst
├── ...
└── train-22999-of-23698.jsonl.zst
data23/
├── train-23000-of-23698.jsonl.zst
├── ...
└── train-23697-of-23698.jsonl.zst
```
In general, you can obtain the exact download URL for all shards using the following python function:
```py
def get_url_from_shard( index: int ) -> str:
if index >= 23_698:
raise ValueError( f'Shard index must be less than 23,698 but received {index}' )
group = index // 1000
return f'https://huggingface.co/datasets/Avelina/smollm-corpus/resolve/main/data{group:02d}/train-{index:05d}-of-23698.jsonl.zst'
```
## Generation Code
Here is the code which was used to generate the shuffled shards. Note the use of non-contiguous interleaving in attempt to uniformly pull documents from across entire subsets to loosely decouple shard index from original document position.
Please make sure you `pip install zstandard`!!!
```py
import tqdm
import datasets
from datasets import load_dataset
# Output directory and file format. Note that the file extension enforces zst compression is used.
OUTPUT_FMT = '/YOUR/FILE/PATH/HERE/data/train-{index:05d}-of-{num_shards:05d}.jsonl.zst'
# Total number of shards giving approximately 10,000 documents per shard
OUTPUT_NUM_SHARDS = 23698
# Grab the three datasets
ds_python = load_dataset( 'Avelina/python-edu' )
ds_cosmo = load_dataset( 'HuggingFaceTB/smollm-corpus', 'cosmopedia-v2' )
ds_edu = load_dataset( 'HuggingFaceTB/smollm-corpus', 'fineweb-edu-dedup' )
# Retain only the text columns and the train splits
ds_python = ds_python.select_columns( 'text' )[ 'train' ]
ds_cosmo = ds_cosmo.select_columns( 'text' )[ 'train' ]
ds_edu = ds_edu.select_columns( 'text' )[ 'train' ]
# Iterate over all shards with a nice progbar
for index in tqdm.tqdm( range( OUTPUT_NUM_SHARDS ) ):
# Get non-contiguous in-memory sub-shards for the three datasets
curr_python = ds_python.shard( num_shards=OUTPUT_NUM_SHARDS, index=index, contiguous=False, keep_in_memory=True )
curr_cosmo = ds_cosmo.shard( num_shards=OUTPUT_NUM_SHARDS, index=index, contiguous=False, keep_in_memory=True )
curr_edu = ds_edu.shard( num_shards=OUTPUT_NUM_SHARDS, index=index, contiguous=False, keep_in_memory=True )
# Concatenate the sub-shards
curr_shard = datasets.concatenate_datasets( [ curr_python, curr_cosmo, curr_edu ] )
# Deterministically shuffle using the current shard index for reproducibility
curr_shard = curr_shard.shuffle( seed=index, keep_in_memory=True )
# Dump the shards to .jsonl.zst
curr_shard.to_json( OUTPUT_FMT.format( index=index, num_shards=OUTPUT_NUM_SHARDS ) )
```
## In-Memory Decompression
Zstandard was chosen as it enables trivial in-memory decompression to minimise the storage impact of the dataset. Here is some example code which creates a python generator that yields each json line from a compressed shard stored at `file_name`, and a second function which creates a python generator that parses and yields the compressed shard.
```py
import json
from json import JSONDecodeError
import zstandard
def read_lines_zst( file_name ):
# Open the file for reading in binary mode
with open( file_name, 'rb' ) as file_handle:
# Initialise an empty buffer
buffer = ''
# Create a reader for the opened file
reader = zstandard.ZstdDecompressor( max_window_size=2**31 ).stream_reader( file_handle )
while True:
# Read a chunk of up to 128MB
chunk = reader.read( 2**27 ).decode()
# If chunk is empty we've reached the end of the file and can break out
if not chunk:
break
# Combine any prior buffer with the current chunk and split by newline
lines = ( buffer + chunk ).split( '\n' )
# Yield the full lines so far
for line in lines[ : -1 ]:
yield line
# The last 'line' is incomplete, so place in buffer for next chunk
buffer = lines[ -1 ]
# Always remember to close your reader!
reader.close()
def parse_jsonl_zst( file_name ):
# Iterate over the yielded lines of the compressed shard
for i, line in enumerate( read_lines_zst( file_name ) ):
try:
# Convert the line into a python dict and yield the text field
yield json.loads( line )[ 'text' ]
except ( KeyError, JSONDecodeError ):
# Catch KeyError for 'text' not present in dict
# Catch JSONDecodeError for malformed line
print( f'JSON error @ shard={file_name}, line={i}' )
```
Of course you *could* use HuggingFace's in-built streaming mechanics to handle things for you, but in my experience that approach is less reliable, doesn't handle `JSONDecodeError`s if there are malformed lines, can cause memory leaks, and has forced sharding behaviour when used inside a multi-worker PyTorch `DataLoader` which I've not yet found a way to disable! |
INo0121/low_quality_call_voice_preprocessed | INo0121 | "2023-09-21T13:25:07Z" | 14,949 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2023-09-08T05:10:37Z" | ---
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
- split: valid
path: data/valid-*
dataset_info:
features:
- name: input_features
sequence:
sequence: float32
- name: labels
sequence: int64
splits:
- name: train
num_bytes: 64088254376
num_examples: 66720
- name: test
num_bytes: 7476961712
num_examples: 7784
- name: valid
num_bytes: 7476975416
num_examples: 7784
download_size: 521083513
dataset_size: 79042191504
---
# Dataset Card for "low_quality_call_voice_preprocessed"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
Anthropic/hh-rlhf | Anthropic | "2023-05-26T18:47:34Z" | 14,898 | 1,314 | [
"license:mit",
"size_categories:100K<n<1M",
"format:json",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"arxiv:2204.05862",
"region:us",
"human-feedback"
] | null | "2022-12-08T20:11:33Z" | ---
license: mit
tags:
- human-feedback
---
# Dataset Card for HH-RLHF
## Dataset Summary
This repository provides access to two different kinds of data:
1. Human preference data about helpfulness and harmlessness from [Training a Helpful and Harmless Assistant with Reinforcement Learning from Human Feedback](https://arxiv.org/abs/2204.05862). These data are meant to train preference (or reward) models for subsequent RLHF training. These data are *not* meant for supervised training of dialogue agents. Training dialogue agents on these data is likely to lead to harmful models and this shold be avoided.
2. Human-generated and annotated red teaming dialogues from [Red Teaming Language Models to Reduce Harms: Methods, Scaling Behaviors, and Lessons Learned](https://www.anthropic.com/red_teaming.pdf). These data are meant to understand how crowdworkers red team models and what types of red team attacks are succesful or not. The data are *not* meant for fine-tuning or preference modeling (use the data above for preference modeling). These data are entire transcripts of conversations that are derived from the harmlessness preference modeling data described above, where only the chosen response is incorporated into the overall transcript. Furthermore, the transcripts are annotated with human and automated measurements of how harmful the overall dialogues are.
**Disclaimer**: The data (especially the harmlessness preference data and the red team data) contain content that may be offensive or upsetting. Topics include, but are not limited to, discriminatory language and discussions of abuse, violence, self-harm, exploitation, and other potentially upsetting subject matter. Please only engage with the data in accordance with your own personal risk tolerance. The data are intended for research purposes, especially research that can make models *less* harmful. The views expressed in the data do not reflect the views of Anthropic or any of its employees. As mentioned above, these data are *not* intended for training dialogue agents as this will likely lead to harmful model behavior.
Each of these datasets are described further below.
## Human preference data about helpfulness and harmlessness (PM Data)
The data are described in the paper: [Training a Helpful and Harmless Assistant with Reinforcement Learning from Human Feedback](https://arxiv.org/abs/2204.05862). If you find the data useful, please cite the paper. The data format is very simple -- each line of the jsonl files contains a pair of texts, one "chosen" and one "rejected".
For **helpfulness**, the data are grouped into train/test splits in three tranches: from our base models (context-distilled 52B language models), via rejection sampling (mostly with best-of-16 sampling) against an early preference model, and a dataset sampled during our iterated "online" process.
For **harmlessness**, the data are only collected for our base models, but otherwise formatted in the same way.
Details about the data collection process and crowdworker population can be found in the paper, specifically in section 2 and appendix D.
## Red teaming data (not PM Data)
The data are described in the paper: [Red Teaming Language Models to Reduce Harms: Methods, Scaling Behaviors, and Lessons Learned](https://www.anthropic.com/red_teaming.pdf). If you find the data useful, please cite the paper. At a high level these data can be examined to understand what succesful red team attempts look like.
Details about the data and data collection procedures can be found in the Datasheet in the appendix of the paper.
Each line of the jsonl file contains a dictionary with the following fields:
- `transcript` a text transcript of a conversation between a human adversary (red team member) and an AI assistant
- `min_harmlessness_score_transcript` a real value score of the harmlessness of the AI assistant (lower is more harmful) as obtained from a preference model
- `num_params` number of parameters in the language model powering the AI assistant
- `model_type` type of model powering the AI assistant
- `rating` the red team member's rating of how successful they were at breaking the AI assistant (Likert scale, higher is more successful)
- `task_description` a short text description written by the red team member about how they tried to red team the AI assistant
- `task_description_harmlessness_score` a real value score of the harmlessness of the task description (lower is more harmful) as obtained from a preference model
- `red_team_member_id` an arbitrary identifier of the red team member. one red team member can generate multiple red team attacks
- `is_upworker` a binary indicator that is true if the red team member was from the crowd platform Upwork or false if they were from MTurk
- `tags` a list of up to 6 tags per transcript. tags are short descriptions of the red team attempts generated by crowdworkers who reviewed red team data post-hoc. tags were only provided for a random sample of 1000 red team attempts for two of four model types.
## Usage
Each of the above datasets is located in a separate sub-directory. To load an individual subset, use the `data_dir` argument of the `load_dataset()` function as follows:
```python
from datasets import load_dataset
# Load all helpfulness/harmless subsets (share the same schema)
dataset = load_dataset("Anthropic/hh-rlhf")
# Load one of the harmless subsets
dataset = load_dataset("Anthropic/hh-rlhf", data_dir="harmless-base")
# Load the red teaming subset
dataset = load_dataset("Anthropic/hh-rlhf", data_dir="red-team-attempts")
```
## Contact
The original authors host this dataset on GitHub here: https://github.com/anthropics/hh-rlhf
You can submit inquiries to: [email protected] |
spdenisov/tokenized_udtree | spdenisov | "2023-03-28T21:56:12Z" | 14,872 | 0 | [
"size_categories:1M<n<10M",
"format:parquet",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2023-03-28T21:50:04Z" | ---
dataset_info:
features:
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
splits:
- name: cs_0
num_bytes: 73985244
num_examples: 102133
- name: cs_1
num_bytes: 95459594
num_examples: 102133
- name: cs_2
num_bytes: 95354064
num_examples: 102133
- name: cs_3
num_bytes: 128817619
num_examples: 102133
- name: cs_4
num_bytes: 236925044
num_examples: 102133
- name: cs_5
num_bytes: 115688159
num_examples: 102133
- name: cs_6
num_bytes: 132404489
num_examples: 102133
- name: tr_0
num_bytes: 28666902
num_examples: 60089
- name: tr_1
num_bytes: 31887742
num_examples: 60089
- name: tr_2
num_bytes: 31749302
num_examples: 60089
- name: tr_3
num_bytes: 28498032
num_examples: 60089
- name: tr_4
num_bytes: 57177672
num_examples: 60089
- name: tr_5
num_bytes: 37804587
num_examples: 60089
- name: tr_6
num_bytes: 28280762
num_examples: 60089
- name: ar_0
num_bytes: 32848442
num_examples: 21864
- name: ar_1
num_bytes: 49955197
num_examples: 21864
- name: ar_2
num_bytes: 49285292
num_examples: 21864
- name: ar_3
num_bytes: 69585617
num_examples: 21864
- name: ar_4
num_bytes: 91649737
num_examples: 21864
- name: ar_5
num_bytes: 59303592
num_examples: 21864
- name: ar_6
num_bytes: 50935047
num_examples: 21864
- name: de_0
num_bytes: 112997417
num_examples: 166849
- name: de_1
num_bytes: 149332477
num_examples: 166849
- name: de_2
num_bytes: 157628127
num_examples: 166849
- name: de_3
num_bytes: 155444887
num_examples: 166849
- name: de_4
num_bytes: 309419752
num_examples: 166849
- name: de_5
num_bytes: 191783977
num_examples: 166849
- name: de_6
num_bytes: 138689312
num_examples: 166849
- name: fr_0
num_bytes: 27905013
num_examples: 34921
- name: fr_1
num_bytes: 41237113
num_examples: 34921
- name: fr_2
num_bytes: 45655098
num_examples: 34921
- name: fr_3
num_bytes: 39973853
num_examples: 34921
- name: fr_4
num_bytes: 76420558
num_examples: 34921
- name: fr_5
num_bytes: 56197173
num_examples: 34921
- name: fr_6
num_bytes: 39938223
num_examples: 34921
- name: no_0
num_bytes: 19584526
num_examples: 33282
- name: no_1
num_bytes: 25823376
num_examples: 33282
- name: no_2
num_bytes: 26954416
num_examples: 33282
- name: no_3
num_bytes: 23459636
num_examples: 33282
- name: no_4
num_bytes: 43762856
num_examples: 33282
- name: no_5
num_bytes: 32578281
num_examples: 33282
- name: no_6
num_bytes: 23459636
num_examples: 33282
- name: pt_0
num_bytes: 12627085
num_examples: 30720
- name: pt_1
num_bytes: 16475005
num_examples: 30720
- name: pt_2
num_bytes: 17295815
num_examples: 30720
- name: pt_3
num_bytes: 16917200
num_examples: 30720
- name: pt_4
num_bytes: 24168495
num_examples: 30720
- name: pt_5
num_bytes: 20520155
num_examples: 30720
- name: pt_6
num_bytes: 15115165
num_examples: 30720
- name: es_0
num_bytes: 27551907
num_examples: 28474
- name: es_1
num_bytes: 39391152
num_examples: 28474
- name: es_2
num_bytes: 42349787
num_examples: 28474
- name: es_3
num_bytes: 43743597
num_examples: 28474
- name: es_4
num_bytes: 69878787
num_examples: 28474
- name: es_5
num_bytes: 51203677
num_examples: 28474
- name: es_6
num_bytes: 46914367
num_examples: 28474
- name: ru_0
num_bytes: 57566900
num_examples: 89525
- name: ru_1
num_bytes: 74853550
num_examples: 89525
- name: ru_2
num_bytes: 76555950
num_examples: 89525
- name: ru_3
num_bytes: 67072565
num_examples: 89525
- name: ru_4
num_bytes: 155012405
num_examples: 89525
- name: ru_5
num_bytes: 92396515
num_examples: 89525
- name: ru_6
num_bytes: 98333345
num_examples: 89525
- name: en_0
num_bytes: 14945668
num_examples: 28686
- name: en_1
num_bytes: 20836733
num_examples: 28686
- name: en_2
num_bytes: 23313373
num_examples: 28686
- name: en_3
num_bytes: 21978133
num_examples: 28686
- name: en_4
num_bytes: 32732303
num_examples: 28686
- name: en_5
num_bytes: 28539183
num_examples: 28686
- name: en_6
num_bytes: 28399343
num_examples: 28686
- name: fi_0
num_bytes: 14729969
num_examples: 27198
- name: fi_1
num_bytes: 17656509
num_examples: 27198
- name: fi_2
num_bytes: 16915489
num_examples: 27198
- name: fi_3
num_bytes: 18732354
num_examples: 27198
- name: fi_4
num_bytes: 29894674
num_examples: 27198
- name: fi_5
num_bytes: 20079089
num_examples: 27198
- name: fi_6
num_bytes: 18874279
num_examples: 27198
- name: gd_0
num_bytes: 2829948
num_examples: 3541
- name: gd_1
num_bytes: 3700318
num_examples: 3541
- name: gd_2
num_bytes: 3798313
num_examples: 3541
- name: gd_3
num_bytes: 3907648
num_examples: 3541
- name: gd_4
num_bytes: 5359963
num_examples: 3541
- name: gd_5
num_bytes: 4693368
num_examples: 3541
- name: gd_6
num_bytes: 3383253
num_examples: 3541
- name: gv_0
num_bytes: 456221
num_examples: 1172
- name: gv_1
num_bytes: 597391
num_examples: 1172
- name: gv_2
num_bytes: 609501
num_examples: 1172
- name: gv_3
num_bytes: 542486
num_examples: 1172
- name: gv_4
num_bytes: 785231
num_examples: 1172
- name: gv_5
num_bytes: 729026
num_examples: 1172
- name: gv_6
num_bytes: 542486
num_examples: 1172
- name: ga_0
num_bytes: 3928820
num_examples: 4005
- name: ga_1
num_bytes: 5021230
num_examples: 4005
- name: ga_2
num_bytes: 5059580
num_examples: 4005
- name: ga_3
num_bytes: 4843745
num_examples: 4005
- name: ga_4
num_bytes: 9085760
num_examples: 4005
- name: ga_5
num_bytes: 6197075
num_examples: 4005
- name: ga_6
num_bytes: 4483365
num_examples: 4005
- name: cop_0
num_bytes: 4660032
num_examples: 1379
- name: cop_1
num_bytes: 5726842
num_examples: 1379
- name: cop_2
num_bytes: 4508942
num_examples: 1379
- name: cop_3
num_bytes: 4496787
num_examples: 1379
- name: cop_4
num_bytes: 5425137
num_examples: 1379
- name: cop_5
num_bytes: 4907442
num_examples: 1379
- name: cop_6
num_bytes: 4284382
num_examples: 1379
- name: it_0
num_bytes: 17989232
num_examples: 21724
- name: it_1
num_bytes: 25839627
num_examples: 21724
- name: it_2
num_bytes: 27448052
num_examples: 21724
- name: it_3
num_bytes: 24875027
num_examples: 21724
- name: it_4
num_bytes: 43731272
num_examples: 21724
- name: it_5
num_bytes: 33091747
num_examples: 21724
- name: it_6
num_bytes: 30955017
num_examples: 21724
- name: cy_0
num_bytes: 907518
num_examples: 1111
- name: cy_1
num_bytes: 1180383
num_examples: 1111
- name: cy_2
num_bytes: 1192068
num_examples: 1111
- name: cy_3
num_bytes: 1123428
num_examples: 1111
- name: cy_4
num_bytes: 1834888
num_examples: 1111
- name: cy_5
num_bytes: 1439843
num_examples: 1111
- name: cy_6
num_bytes: 1055223
num_examples: 1111
- name: hu_0
num_bytes: 858340
num_examples: 910
- name: hu_1
num_bytes: 1088085
num_examples: 910
- name: hu_2
num_bytes: 1086220
num_examples: 910
- name: hu_3
num_bytes: 957490
num_examples: 910
- name: hu_4
num_bytes: 1964920
num_examples: 910
- name: hu_5
num_bytes: 1370660
num_examples: 910
- name: hu_6
num_bytes: 957490
num_examples: 910
- name: zh_0
num_bytes: 9051347
num_examples: 7994
- name: zh_1
num_bytes: 12537582
num_examples: 7994
- name: zh_2
num_bytes: 11419717
num_examples: 7994
- name: zh_3
num_bytes: 10888407
num_examples: 7994
- name: zh_4
num_bytes: 10558847
num_examples: 7994
- name: zh_5
num_bytes: 13867342
num_examples: 7994
- name: zh_6
num_bytes: 10167967
num_examples: 7994
- name: hy_0
num_bytes: 5120790
num_examples: 3200
- name: hy_1
num_bytes: 5762195
num_examples: 3200
- name: hy_2
num_bytes: 4712195
num_examples: 3200
- name: hy_3
num_bytes: 4260805
num_examples: 3200
- name: hy_4
num_bytes: 8546900
num_examples: 3200
- name: hy_5
num_bytes: 5442440
num_examples: 3200
- name: hy_6
num_bytes: 4260805
num_examples: 3200
- name: ro_0
num_bytes: 6894274
num_examples: 8043
- name: ro_1
num_bytes: 9156564
num_examples: 8043
- name: ro_2
num_bytes: 9493574
num_examples: 8043
- name: ro_3
num_bytes: 10830604
num_examples: 8043
- name: ro_4
num_bytes: 20320209
num_examples: 8043
- name: ro_5
num_bytes: 11507314
num_examples: 8043
- name: ro_6
num_bytes: 8300564
num_examples: 8043
- name: da_0
num_bytes: 2963139
num_examples: 4383
- name: da_1
num_bytes: 3945104
num_examples: 4383
- name: da_2
num_bytes: 4115634
num_examples: 4383
- name: da_3
num_bytes: 3583269
num_examples: 4383
- name: da_4
num_bytes: 7089004
num_examples: 4383
- name: da_5
num_bytes: 4981724
num_examples: 4383
- name: da_6
num_bytes: 3583269
num_examples: 4383
- name: nl_0
num_bytes: 6741817
num_examples: 12289
- name: nl_1
num_bytes: 8989392
num_examples: 12289
- name: nl_2
num_bytes: 9389757
num_examples: 12289
- name: nl_3
num_bytes: 16004832
num_examples: 12289
- name: nl_4
num_bytes: 12089687
num_examples: 12289
- name: nl_5
num_bytes: 11410547
num_examples: 12289
- name: nl_6
num_bytes: 12631912
num_examples: 12289
download_size: 934434422
dataset_size: 5264208717
---
# Dataset Card for "tokenized_udtree"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
Azathothas/temp | Azathothas | "2025-04-08T06:14:56Z" | 14,862 | 0 | [
"license:unlicense",
"region:us"
] | null | "2025-03-18T03:49:14Z" | ---
license: unlicense
---
> [!NOTE]
> This serves as way to quickly share temp files with other devs, so the files here are ephemeral & often removed |
fixie-ai/covost2 | fixie-ai | "2024-08-27T20:58:08Z" | 14,834 | 2 | [
"size_categories:1M<n<10M",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-07-16T23:40:52Z" | ---
dataset_info:
- config_name: ar_en
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 61607709.192
num_examples: 2283
- name: validation
num_bytes: 56223234.024
num_examples: 1758
- name: test
num_bytes: 54650910.41
num_examples: 1695
download_size: 160468333
dataset_size: 172481853.626
- config_name: ca_en
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 4397026262.322
num_examples: 95854
- name: validation
num_bytes: 544108371.96
num_examples: 12730
- name: test
num_bytes: 604755238.63
num_examples: 12730
download_size: 4957773433
dataset_size: 5545889872.912
- config_name: cy_en
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 51478765.087
num_examples: 1241
- name: validation
num_bytes: 26992697.0
num_examples: 690
- name: test
num_bytes: 28772216.0
num_examples: 690
download_size: 102604972
dataset_size: 107243678.087
- config_name: de_en
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 5680326209.222
num_examples: 127834
- name: validation
num_bytes: 631442490.202
num_examples: 13511
- name: test
num_bytes: 637042944.685
num_examples: 13511
download_size: 6490850158
dataset_size: 6948811644.108999
- config_name: en_ar
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 14105902817.18
num_examples: 289430
- name: validation
num_bytes: 718527564.808
num_examples: 15531
- name: test
num_bytes: 729114452.301
num_examples: 15531
download_size: 13815709729
dataset_size: 15553544834.289001
- config_name: en_ca
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 14099092976.18
num_examples: 289430
- name: validation
num_bytes: 718171719.808
num_examples: 15531
- name: test
num_bytes: 728790610.301
num_examples: 15531
download_size: 13814365593
dataset_size: 15546055306.289001
- config_name: en_cy
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 14098487703.18
num_examples: 289430
- name: validation
num_bytes: 718141953.808
num_examples: 15531
- name: test
num_bytes: 728793811.301
num_examples: 15531
download_size: 13813953593
dataset_size: 15545423468.289001
- config_name: en_de
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 14099886814.18
num_examples: 289430
- name: validation
num_bytes: 718219105.808
num_examples: 15531
- name: test
num_bytes: 728857067.301
num_examples: 15531
download_size: 13815103686
dataset_size: 15546962987.289001
- config_name: en_et
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 14096877545.18
num_examples: 289430
- name: validation
num_bytes: 718057559.808
num_examples: 15531
- name: test
num_bytes: 728710692.301
num_examples: 15531
download_size: 13813410823
dataset_size: 15543645797.289001
- config_name: en_fa
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 14108661241.18
num_examples: 289430
- name: validation
num_bytes: 718670909.808
num_examples: 15531
- name: test
num_bytes: 729271000.301
num_examples: 15531
download_size: 13816798013
dataset_size: 15556603151.289001
- config_name: en_id
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 14098627451.18
num_examples: 289430
- name: validation
num_bytes: 718144327.808
num_examples: 15531
- name: test
num_bytes: 728802322.301
num_examples: 15531
download_size: 13813201260
dataset_size: 15545574101.289001
- config_name: en_ja
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 14103911774.18
num_examples: 289430
- name: validation
num_bytes: 718409304.808
num_examples: 15531
- name: test
num_bytes: 729050991.301
num_examples: 15531
download_size: 13815875328
dataset_size: 15551372070.289001
- config_name: en_lv
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 14098703097.18
num_examples: 289430
- name: validation
num_bytes: 718152571.808
num_examples: 15531
- name: test
num_bytes: 728792572.301
num_examples: 15531
download_size: 13814849886
dataset_size: 15545648241.289001
- config_name: en_mn
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 14113120657.18
num_examples: 289430
- name: validation
num_bytes: 718940418.808
num_examples: 15531
- name: test
num_bytes: 729461016.301
num_examples: 15531
download_size: 13819427515
dataset_size: 15561522092.289001
- config_name: en_sl
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 14097158381.18
num_examples: 289430
- name: validation
num_bytes: 718085673.808
num_examples: 15531
- name: test
num_bytes: 728705188.301
num_examples: 15531
download_size: 13813603812
dataset_size: 15543949243.289001
- config_name: en_sv-SE
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 14097728051.18
num_examples: 289430
- name: validation
num_bytes: 718093292.808
num_examples: 15531
- name: test
num_bytes: 728747422.301
num_examples: 15531
download_size: 13813332908
dataset_size: 15544568766.289001
- config_name: en_ta
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 14135489205.18
num_examples: 289430
- name: validation
num_bytes: 720191394.808
num_examples: 15531
- name: test
num_bytes: 730578783.301
num_examples: 15531
download_size: 13825121271
dataset_size: 15586259383.289001
- config_name: en_tr
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 14098644786.18
num_examples: 289430
- name: validation
num_bytes: 718161996.808
num_examples: 15531
- name: test
num_bytes: 728786654.301
num_examples: 15531
download_size: 13814279798
dataset_size: 15545593437.289001
- config_name: en_zh-CN
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 14095661460.18
num_examples: 289430
- name: validation
num_bytes: 717982705.808
num_examples: 15531
- name: test
num_bytes: 728655191.301
num_examples: 15531
download_size: 13812699892
dataset_size: 15542299357.289001
- config_name: es_en
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: validation
num_bytes: 630615357.241
num_examples: 13221
- name: test
num_bytes: 666447063.067
num_examples: 13221
- name: train
num_bytes: 3769457359.8
num_examples: 79015
download_size: 4531969416
dataset_size: 5066519780.108
- config_name: et_en
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 97124727.544
num_examples: 1782
- name: validation
num_bytes: 80290798.168
num_examples: 1576
- name: test
num_bytes: 81970364.51
num_examples: 1571
download_size: 257604448
dataset_size: 259385890.222
- config_name: fa_en
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 1872724297.149
num_examples: 53949
- name: validation
num_bytes: 140067911.23
num_examples: 3445
- name: test
num_bytes: 149319550.35
num_examples: 3445
download_size: 1679853440
dataset_size: 2162111758.729
- config_name: fr_en
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: validation
num_bytes: 632191608.84
num_examples: 14760
- name: test
num_bytes: 698178059.08
num_examples: 14760
- name: train
num_bytes: 8128016830.77
num_examples: 207374
download_size: 8900934523
dataset_size: 9458386498.69
- config_name: id_en
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 36136135.768
num_examples: 1243
- name: validation
num_bytes: 25058845.0
num_examples: 792
- name: test
num_bytes: 26577467.0
num_examples: 844
download_size: 86110062
dataset_size: 87772447.768
- config_name: it_en
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 1517510665.568
num_examples: 31698
- name: validation
num_bytes: 422409218.1
num_examples: 8940
- name: test
num_bytes: 454569171.595
num_examples: 8951
download_size: 2125529183
dataset_size: 2394489055.2630005
- config_name: ja_en
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 38181610.624
num_examples: 1119
- name: validation
num_bytes: 24623052.0
num_examples: 635
- name: test
num_bytes: 25558787.0
num_examples: 684
download_size: 88228548
dataset_size: 88363449.624
- config_name: lv_en
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 66152116.328
num_examples: 2337
- name: validation
num_bytes: 32655276.0
num_examples: 1125
- name: test
num_bytes: 50997551.638
num_examples: 1629
download_size: 137700207
dataset_size: 149804943.96600002
- config_name: mn_en
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 87891433.547
num_examples: 2067
- name: validation
num_bytes: 77519039.943
num_examples: 1761
- name: test
num_bytes: 83667460.167
num_examples: 1759
download_size: 242638800
dataset_size: 249077933.657
- config_name: nl_en
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 216102081.4
num_examples: 7108
- name: validation
num_bytes: 55386349.319
num_examples: 1699
- name: test
num_bytes: 60219179.711
num_examples: 1699
download_size: 320267264
dataset_size: 331707610.43
- config_name: pt_en
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 274723273.528
num_examples: 9158
- name: validation
num_bytes: 118345891.704
num_examples: 3318
- name: test
num_bytes: 166247624.001
num_examples: 4023
download_size: 540891735
dataset_size: 559316789.233
- config_name: ru_en
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 557219472.672
num_examples: 12112
- name: validation
num_bytes: 290218427.6
num_examples: 6110
- name: test
num_bytes: 312622838.0
num_examples: 6300
download_size: 1112848246
dataset_size: 1160060738.272
- config_name: sl_en
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 55992153.0
num_examples: 1843
- name: validation
num_bytes: 15074155.0
num_examples: 509
- name: test
num_bytes: 10209711.0
num_examples: 360
download_size: 83863293
dataset_size: 81276019.0
- config_name: sv-SE_en
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 48298330.64
num_examples: 2160
- name: validation
num_bytes: 32544646.416
num_examples: 1349
- name: test
num_bytes: 46894324.615
num_examples: 1595
download_size: 121860373
dataset_size: 127737301.671
- config_name: ta_en
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 47757197.616
num_examples: 1358
- name: validation
num_bytes: 13670695.0
num_examples: 384
- name: test
num_bytes: 29891516.0
num_examples: 786
download_size: 87791516
dataset_size: 91319408.616
- config_name: tr_en
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 119299427.798
num_examples: 3966
- name: validation
num_bytes: 52552534.232
num_examples: 1624
- name: test
num_bytes: 59106253.862
num_examples: 1629
download_size: 224018260
dataset_size: 230958215.89200002
- config_name: zh-CN_en
features:
- name: client_id
dtype: string
- name: file
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
- name: translation
dtype: string
- name: id
dtype: string
splits:
- name: validation
num_bytes: 231018998.33
num_examples: 4843
- name: test
num_bytes: 243850956.45
num_examples: 4898
- name: train
num_bytes: 341425113.6
num_examples: 7085
download_size: 766660661
dataset_size: 816295068.38
configs:
- config_name: ar_en
data_files:
- split: train
path: ar_en/train-*
- split: validation
path: ar_en/validation-*
- split: test
path: ar_en/test-*
- config_name: ca_en
data_files:
- split: train
path: ca_en/train-*
- split: validation
path: ca_en/validation-*
- split: test
path: ca_en/test-*
- config_name: cy_en
data_files:
- split: train
path: cy_en/train-*
- split: validation
path: cy_en/validation-*
- split: test
path: cy_en/test-*
- config_name: de_en
data_files:
- split: train
path: de_en/train-*
- split: validation
path: de_en/validation-*
- split: test
path: de_en/test-*
- config_name: en_ar
data_files:
- split: train
path: en_ar/train-*
- split: validation
path: en_ar/validation-*
- split: test
path: en_ar/test-*
- config_name: en_ca
data_files:
- split: train
path: en_ca/train-*
- split: validation
path: en_ca/validation-*
- split: test
path: en_ca/test-*
- config_name: en_cy
data_files:
- split: train
path: en_cy/train-*
- split: validation
path: en_cy/validation-*
- split: test
path: en_cy/test-*
- config_name: en_de
data_files:
- split: train
path: en_de/train-*
- split: validation
path: en_de/validation-*
- split: test
path: en_de/test-*
- config_name: en_et
data_files:
- split: train
path: en_et/train-*
- split: validation
path: en_et/validation-*
- split: test
path: en_et/test-*
- config_name: en_fa
data_files:
- split: train
path: en_fa/train-*
- split: validation
path: en_fa/validation-*
- split: test
path: en_fa/test-*
- config_name: en_id
data_files:
- split: train
path: en_id/train-*
- split: validation
path: en_id/validation-*
- split: test
path: en_id/test-*
- config_name: en_ja
data_files:
- split: train
path: en_ja/train-*
- split: validation
path: en_ja/validation-*
- split: test
path: en_ja/test-*
- config_name: en_lv
data_files:
- split: train
path: en_lv/train-*
- split: validation
path: en_lv/validation-*
- split: test
path: en_lv/test-*
- config_name: en_mn
data_files:
- split: train
path: en_mn/train-*
- split: validation
path: en_mn/validation-*
- split: test
path: en_mn/test-*
- config_name: en_sl
data_files:
- split: train
path: en_sl/train-*
- split: validation
path: en_sl/validation-*
- split: test
path: en_sl/test-*
- config_name: en_sv-SE
data_files:
- split: train
path: en_sv-SE/train-*
- split: validation
path: en_sv-SE/validation-*
- split: test
path: en_sv-SE/test-*
- config_name: en_ta
data_files:
- split: train
path: en_ta/train-*
- split: validation
path: en_ta/validation-*
- split: test
path: en_ta/test-*
- config_name: en_tr
data_files:
- split: train
path: en_tr/train-*
- split: validation
path: en_tr/validation-*
- split: test
path: en_tr/test-*
- config_name: en_zh-CN
data_files:
- split: train
path: en_zh-CN/train-*
- split: validation
path: en_zh-CN/validation-*
- split: test
path: en_zh-CN/test-*
- config_name: es_en
data_files:
- split: validation
path: es_en/validation-*
- split: test
path: es_en/test-*
- split: train
path: es_en/train-*
- config_name: et_en
data_files:
- split: train
path: et_en/train-*
- split: validation
path: et_en/validation-*
- split: test
path: et_en/test-*
- config_name: fa_en
data_files:
- split: train
path: fa_en/train-*
- split: validation
path: fa_en/validation-*
- split: test
path: fa_en/test-*
- config_name: fr_en
data_files:
- split: validation
path: fr_en/validation-*
- split: test
path: fr_en/test-*
- split: train
path: fr_en/train-*
- config_name: id_en
data_files:
- split: train
path: id_en/train-*
- split: validation
path: id_en/validation-*
- split: test
path: id_en/test-*
- config_name: it_en
data_files:
- split: train
path: it_en/train-*
- split: validation
path: it_en/validation-*
- split: test
path: it_en/test-*
- config_name: ja_en
data_files:
- split: train
path: ja_en/train-*
- split: validation
path: ja_en/validation-*
- split: test
path: ja_en/test-*
- config_name: lv_en
data_files:
- split: train
path: lv_en/train-*
- split: validation
path: lv_en/validation-*
- split: test
path: lv_en/test-*
- config_name: mn_en
data_files:
- split: train
path: mn_en/train-*
- split: validation
path: mn_en/validation-*
- split: test
path: mn_en/test-*
- config_name: nl_en
data_files:
- split: train
path: nl_en/train-*
- split: validation
path: nl_en/validation-*
- split: test
path: nl_en/test-*
- config_name: pt_en
data_files:
- split: train
path: pt_en/train-*
- split: validation
path: pt_en/validation-*
- split: test
path: pt_en/test-*
- config_name: ru_en
data_files:
- split: train
path: ru_en/train-*
- split: validation
path: ru_en/validation-*
- split: test
path: ru_en/test-*
- config_name: sl_en
data_files:
- split: train
path: sl_en/train-*
- split: validation
path: sl_en/validation-*
- split: test
path: sl_en/test-*
- config_name: sv-SE_en
data_files:
- split: train
path: sv-SE_en/train-*
- split: validation
path: sv-SE_en/validation-*
- split: test
path: sv-SE_en/test-*
- config_name: ta_en
data_files:
- split: train
path: ta_en/train-*
- split: validation
path: ta_en/validation-*
- split: test
path: ta_en/test-*
- config_name: tr_en
data_files:
- split: train
path: tr_en/train-*
- split: validation
path: tr_en/validation-*
- split: test
path: tr_en/test-*
- config_name: zh-CN_en
data_files:
- split: validation
path: zh-CN_en/validation-*
- split: test
path: zh-CN_en/test-*
- split: train
path: zh-CN_en/train-*
---
This is a partial copy of [CoVoST2](https://huggingface.co/datasets/facebook/covost2) dataset.
The main difference is that the audio data is included in the dataset, which makes usage easier and allows browsing the samples using HF Dataset Viewer.
The limitation of this method is that all audio samples of the `EN_XX` subsets are duplicated, as such the size of the dataset is larger.
As such, not all the data is included: Only the `validation` and `test` subsets are available.
From the `XX_EN` subsets, only `fr`, `es`, and `zh-CN` are included. |
HAERAE-HUB/KMMLU-HARD | HAERAE-HUB | "2024-03-09T23:46:06Z" | 14,798 | 8 | [
"task_categories:question-answering",
"language:ko",
"license:cc-by-nd-4.0",
"size_categories:1K<n<10K",
"format:csv",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"arxiv:2402.11548",
"region:us",
"haerae",
"mmlu"
] | [
"question-answering"
] | "2024-01-12T05:49:07Z" | ---
configs:
- config_name: maritime_engineering
data_files:
- split: dev
path: data/maritime_engineering-dev.csv
- split: test
path: data/maritime_engineering-hard-test.csv
- config_name: materials_engineering
data_files:
- split: dev
path: data/materials_engineering-dev.csv
- split: test
path: data/materials_engineering-hard-test.csv
- config_name: railway_and_automotive_engineering
data_files:
- split: dev
path: data/railway_and_automotive_engineering-dev.csv
- split: test
path: data/railway_and_automotive_engineering-hard-test.csv
- config_name: biology
data_files:
- split: dev
path: data/biology-dev.csv
- split: test
path: data/biology-hard-test.csv
- config_name: public_safety
data_files:
- split: dev
path: data/public_safety-dev.csv
- split: test
path: data/public_safety-hard-test.csv
- config_name: criminal_law
data_files:
- split: dev
path: data/criminal_law-dev.csv
- split: test
path: data/criminal_law-hard-test.csv
- config_name: information_technology
data_files:
- split: dev
path: data/information_technology-dev.csv
- split: test
path: data/information_technology-hard-test.csv
- config_name: geomatics
data_files:
- split: dev
path: data/geomatics-dev.csv
- split: test
path: data/geomatics-hard-test.csv
- config_name: management
data_files:
- split: dev
path: data/management-dev.csv
- split: test
path: data/management-hard-test.csv
- config_name: math
data_files:
- split: dev
path: data/math-dev.csv
- split: test
path: data/math-hard-test.csv
- config_name: accounting
data_files:
- split: dev
path: data/accounting-dev.csv
- split: test
path: data/accounting-hard-test.csv
- config_name: chemistry
data_files:
- split: dev
path: data/chemistry-dev.csv
- split: test
path: data/chemistry-hard-test.csv
- config_name: nondestructive_testing
data_files:
- split: dev
path: data/nondestructive_testing-dev.csv
- split: test
path: data/nondestructive_testing-hard-test.csv
- config_name: computer_science
data_files:
- split: dev
path: data/computer_science-dev.csv
- split: test
path: data/computer_science-hard-test.csv
- config_name: ecology
data_files:
- split: dev
path: data/ecology-dev.csv
- split: test
path: data/ecology-hard-test.csv
- config_name: health
data_files:
- split: dev
path: data/health-dev.csv
- split: test
path: data/health-hard-test.csv
- config_name: political_science_and_sociology
data_files:
- split: dev
path: data/political_science_and_sociology-dev.csv
- split: test
path: data/political_science_and_sociology-hard-test.csv
- config_name: patent
data_files:
- split: dev
path: data/patent-dev.csv
- split: test
path: data/patent-hard-test.csv
- config_name: electrical_engineering
data_files:
- split: dev
path: data/electrical_engineering-dev.csv
- split: test
path: data/electrical_engineering-hard-test.csv
- config_name: electronics_engineering
data_files:
- split: dev
path: data/electronics_engineering-dev.csv
- split: test
path: data/electronics_engineering-hard-test.csv
- config_name: korean_history
data_files:
- split: dev
path: data/korean_history-dev.csv
- split: test
path: data/korean_history-hard-test.csv
- config_name: gas_technology_and_engineering
data_files:
- split: dev
path: data/gas_technology_and_engineering-dev.csv
- split: test
path: data/gas_technology_and_engineering-hard-test.csv
- config_name: machine_design_and_manufacturing
data_files:
- split: dev
path: data/machine_design_and_manufacturing-dev.csv
- split: test
path: data/machine_design_and_manufacturing-hard-test.csv
- config_name: chemical_engineering
data_files:
- split: dev
path: data/chemical_engineering-dev.csv
- split: test
path: data/chemical_engineering-hard-test.csv
- config_name: telecommunications_and_wireless_technology
data_files:
- split: dev
path: data/telecommunications_and_wireless_technology-dev.csv
- split: test
path: data/telecommunications_and_wireless_technology-hard-test.csv
- config_name: food_processing
data_files:
- split: dev
path: data/food_processing-dev.csv
- split: test
path: data/food_processing-hard-test.csv
- config_name: social_welfare
data_files:
- split: dev
path: data/social_welfare-dev.csv
- split: test
path: data/social_welfare-hard-test.csv
- config_name: real_estate
data_files:
- split: dev
path: data/real_estate-dev.csv
- split: test
path: data/real_estate-hard-test.csv
- config_name: marketing
data_files:
- split: dev
path: data/marketing-dev.csv
- split: test
path: data/marketing-hard-test.csv
- config_name: mechanical_engineering
data_files:
- split: dev
path: data/mechanical_engineering-dev.csv
- split: test
path: data/mechanical_engineering-hard-test.csv
- config_name: fashion
data_files:
- split: dev
path: data/fashion-dev.csv
- split: test
path: data/fashion-hard-test.csv
- config_name: psychology
data_files:
- split: dev
path: data/psychology-dev.csv
- split: test
path: data/psychology-hard-test.csv
- config_name: taxation
data_files:
- split: dev
path: data/taxation-dev.csv
- split: test
path: data/taxation-hard-test.csv
- config_name: environmental_science
data_files:
- split: dev
path: data/environmental_science-dev.csv
- split: test
path: data/environmental_science-hard-test.csv
- config_name: refrigerating_machinery
data_files:
- split: dev
path: data/refrigerating_machinery-dev.csv
- split: test
path: data/refrigerating_machinery-hard-test.csv
- config_name: education
data_files:
- split: dev
path: data/education-dev.csv
- split: test
path: data/education-hard-test.csv
- config_name: industrial_engineer
data_files:
- split: dev
path: data/industrial_engineer-dev.csv
- split: test
path: data/industrial_engineer-hard-test.csv
- config_name: civil_engineering
data_files:
- split: dev
path: data/civil_engineering-dev.csv
- split: test
path: data/civil_engineering-hard-test.csv
- config_name: energy_management
data_files:
- split: dev
path: data/energy_management-dev.csv
- split: test
path: data/energy_management-hard-test.csv
- config_name: law
data_files:
- split: dev
path: data/law-dev.csv
- split: test
path: data/law-hard-test.csv
- config_name: agricultural_sciences
data_files:
- split: dev
path: data/agricultural_sciences-dev.csv
- split: test
path: data/agricultural_sciences-hard-test.csv
- config_name: interior_architecture_and_design
data_files:
- split: dev
path: data/interior_architecture_and_design-dev.csv
- split: test
path: data/interior_architecture_and_design-hard-test.csv
- config_name: aviation_engineering_and_maintenance
data_files:
- split: dev
path: data/aviation_engineering_and_maintenance-dev.csv
- split: test
path: data/aviation_engineering_and_maintenance-hard-test.csv
- config_name: construction
data_files:
- split: dev
path: data/construction-dev.csv
- split: test
path: data/construction-hard-test.csv
- config_name: economics
data_files:
- split: dev
path: data/economics-dev.csv
- split: test
path: data/economics-hard-test.csv
license: cc-by-nd-4.0
task_categories:
- question-answering
language:
- ko
tags:
- haerae
- mmlu
size_categories:
- 100K<n<1M
---
### KMMLU (Korean-MMLU)
We propose KMMLU, a new Korean benchmark with 35,030 expert-level multiple-choice questions across 45 subjects ranging from humanities to STEM.
Unlike previous Korean benchmarks that are translated from existing English benchmarks, KMMLU is collected from original Korean exams, capturing linguistic and cultural aspects of the Korean language.
We test 26 publically available and proprietary LLMs, identifying significant room for improvement.
The best publicly available model achieves 50.54% on KMMLU, far below the average human performance of 62.6%.
This model was primarily trained for English and Chinese, not Korean.
Current LLMs tailored to Korean, such as Polyglot-Ko, perform far worse. Surprisingly, even the most capable proprietary LLMs, e.g., GPT-4 and HyperCLOVA X, achieve 59.95% and 53.40%, respectively.
This suggests that further work is needed to improve Korean LLMs, and KMMLU offers the right tool to track this progress.
We make our dataset publicly available on the Hugging Face Hub and integrate the benchmark into EleutherAI's Language Model Evaluation Harness.
Link to Paper: [KMMLU: Measuring Massive Multitask Language Understanding in Korean](https://arxiv.org/abs/2402.11548)
### KMMLU Statistics
| Category | # Questions |
|------------------------------|-------------|
| **Prerequisites** | |
| None | 59,909 |
| 1 Prerequisite Test | 12,316 |
| 2 Prerequisite Tests | 776 |
| 2+ Years of Experience | 65,135 |
| 4+ Years of Experience | 98,678 |
| 9+ Years of Experience | 6,963 |
| **Question Type** | |
| Positive | 207,030 |
| Negation | 36,777 |
| **Split** | |
| Train | 208,522 |
| Validation | 225 |
| Test | 35,030 |
| **Total** | 243,777 |
### Categories
To reimplement the categories in the paper, refer to the following:
```
supercategories = {
"accounting": "HUMSS",
"agricultural_sciences": "Other",
"aviation_engineering_and_maintenance": "Applied Science",
"biology": "STEM",
"chemical_engineering": "STEM",
"chemistry": "STEM",
"civil_engineering": "STEM",
"computer_science": "STEM",
"construction": "Other",
"criminal_law": "HUMSS",
"ecology": "STEM",
"economics": "HUMSS",
"education": "HUMSS",
"electrical_engineering": "STEM",
"electronics_engineering": "Applied Science",
"energy_management": "Applied Science",
"environmental_science": "Applied Science",
"fashion": "Other",
"food_processing": "Other",
"gas_technology_and_engineering": "Applied Science",
"geomatics": "Applied Science",
"health": "Other",
"industrial_engineer": "Applied Science",
"information_technology": "STEM",
"interior_architecture_and_design": "Other",
"law": "HUMSS",
"machine_design_and_manufacturing": "Applied Science",
"management": "HUMSS",
"maritime_engineering": "Applied Science",
"marketing": "Other",
"materials_engineering": "STEM",
"mechanical_engineering": "STEM",
"nondestructive_testing": "Applied Science",
"patent": "Other",
"political_science_and_sociology": "HUMSS",
"psychology": "HUMSS",
"public_safety": "Other",
"railway_and_automotive_engineering": "Applied Science",
"real_estate": "Other",
"refrigerating_machinery": "Other",
"social_welfare": "HUMSS",
"taxation": "HUMSS",
"telecommunications_and_wireless_technology": "Applied Science",
"korean_history": "HUMSS",
"math": "STEM"
}
```
### Point of Contact
For any questions contact us via the following email:)
```
[email protected]
``` |
amitness/logits-mt-it-en-128 | amitness | "2023-09-27T10:27:21Z" | 14,792 | 0 | [
"size_categories:10M<n<100M",
"format:parquet",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2023-09-25T19:22:48Z" | ---
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
dataset_info:
features:
- name: input_ids
sequence: int32
- name: token_type_ids
sequence: int8
- name: attention_mask
sequence: int8
- name: labels
sequence: int64
- name: teacher_logits
sequence:
sequence: float64
- name: teacher_indices
sequence:
sequence: int64
- name: teacher_mask_indices
sequence: int64
splits:
- name: train
num_bytes: 184467361976
num_examples: 40721350
- name: test
num_bytes: 32556394204
num_examples: 7186121
download_size: 0
dataset_size: 217023756180
---
# Dataset Card for "logits-mt-it-en-128"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
distil-whisper/librispeech_long | distil-whisper | "2023-11-02T14:22:54Z" | 14,753 | 2 | [
"size_categories:n<1K",
"format:parquet",
"modality:audio",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2023-11-02T14:22:51Z" | ---
dataset_info:
config_name: clean
features:
- name: audio
dtype: audio
splits:
- name: validation
num_bytes: 1998609.0
num_examples: 1
download_size: 1984721
dataset_size: 1998609.0
configs:
- config_name: clean
data_files:
- split: validation
path: clean/validation-*
---
# Dataset Card for "librispeech_long"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
alvations/c4p0-x1-es-en | alvations | "2024-03-24T03:52:27Z" | 14,723 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-03-23T10:16:40Z" | ---
dataset_info:
features:
- name: source
dtype: string
- name: target
dtype: string
- name: target_backto_source
dtype: string
- name: raw_target
list:
- name: generated_text
dtype: string
- name: raw_target_backto_source
list:
- name: generated_text
dtype: string
- name: prompt
dtype: string
- name: reverse_prompt
dtype: string
- name: source_langid
dtype: string
- name: target_langid
dtype: string
- name: target_backto_source_langid
dtype: string
- name: doc_id
dtype: int64
- name: sent_id
dtype: int64
- name: timestamp
dtype: timestamp[us]
- name: url
dtype: string
- name: doc_hash
dtype: string
splits:
- name: train
num_bytes: 3583
num_examples: 4
download_size: 14029
dataset_size: 3583
configs:
- config_name: default
data_files:
- split: train
path: d595973734f0a325/train-*
---
|
KShivendu/dbpedia-entities-openai-1M | KShivendu | "2024-02-19T08:24:43Z" | 14,680 | 20 | [
"task_categories:feature-extraction",
"language:en",
"license:mit",
"size_categories:1M<n<10M",
"format:parquet",
"modality:text",
"modality:timeseries",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [
"feature-extraction"
] | "2023-06-20T22:29:43Z" | ---
license: mit
dataset_info:
features:
- name: _id
dtype: string
- name: title
dtype: string
- name: text
dtype: string
- name: openai
sequence: float32
splits:
- name: train
num_bytes: 12383152
num_examples: 1000000
download_size: 12383152
dataset_size: 1000000
language:
- en
task_categories:
- feature-extraction
pretty_name: OpenAI 1M with DBPedia Entities
size_categories:
- 1M<n<10M
---
1M OpenAI Embeddings -- 1536 dimensions
Created: June 2023.
Text used for Embedding: title (string) + text (string)
Embedding Model: text-embedding-ada-002
First used for the pgvector vs VectorDB (Qdrant) benchmark: https://nirantk.com/writing/pgvector-vs-qdrant/
### Future work
We are planning to take this up to 10M (and possibly 100M) vectors. Contact [@KShivendu_](https://twitter.com/KShivendu_) on Twitter or mail to [email protected] if you want to help :)
### Credits:
This dataset was generated from the first 1M entries of https://huggingface.co/datasets/BeIR/dbpedia-entity |
hoskinson-center/proof-pile | hoskinson-center | "2023-08-19T03:24:11Z" | 14,644 | 59 | [
"task_categories:text-generation",
"task_ids:language-modeling",
"annotations_creators:no-annotation",
"language_creators:found",
"multilinguality:monolingual",
"language:en",
"license:apache-2.0",
"size_categories:100K<n<1M",
"modality:text",
"library:datasets",
"library:mlcroissant",
"region:us",
"math",
"mathematics",
"formal-mathematics"
] | [
"text-generation"
] | "2022-08-08T20:57:56Z" | ---
annotations_creators:
- no-annotation
language:
- en
language_creators:
- found
license: [apache-2.0]
multilinguality:
- monolingual
pretty_name: proof-pile
size_categories: []
source_datasets: []
tags:
- math
- mathematics
- formal-mathematics
task_categories:
- text-generation
task_ids:
- language-modeling
---
# Dataset Description
The `proof-pile` is a 13GB pre-training dataset of mathematical text that comprises 8.3 billion tokens (using the `gpt-neox` tokenizer). Models trained on this dataset are coming soon :) The dataset is composed of diverse sources of both informal and formal mathematics, namely
- ArXiv.math (10GB)
- Open-source math textbooks (50MB)
- Formal mathematics libraries (500MB)
- Lean mathlib and other Lean repositories
- Isabelle AFP
- Coq mathematical components and other Coq repositories
- HOL Light
- set.mm
- Mizar Mathematical Library
- Math Overflow and Math Stack Exchange (2.5GB)
- Wiki-style sources (50MB)
- ProofWiki
- Wikipedia math articles
- MATH dataset (6MB)
The construction of the dataset is reproducible using the code and instructions in the [proof-pile Github
repo](https://github.com/zhangir-azerbayev/proof-pile).
# Supported Tasks
This dataset is intended to be used for pre-training and fine-tuning language models. We envision models trained on the `proof-pile` will have many downstream applications, including informal quantitative reasoning, formal theorem proving, semantic search for formal mathematics, and autoformalization.
# Languages
All informal mathematics in the `proof-pile` is written in English and LaTeX (arXiv articles in other languages are filtered out using [languagedetect](https://github.com/shuyo/language-detection/blob/wiki/ProjectHome.md)). Formal theorem proving languages represented in this dataset are Lean 3, Isabelle, Coq, HOL Light, Metamath, and Mizar.
# Evaluation
The version of `set.mm` in this dataset has 10% of proofs replaced with the `?` character in order to preserve a validation and test set for Metamath provers pre-trained on the `proof-pile`. The precise split can be found here: [validation](https://github.com/zhangir-azerbayev/mm-extract/blob/main/valid_decls.json) and [test](https://github.com/zhangir-azerbayev/mm-extract/blob/main/test_decls.json).
The Lean mathlib commit used in this dataset is `6313863`. Theorems created in subsequent commits can be used for evaluating Lean theorem provers.
This dataset contains only the training set of the [MATH dataset](https://github.com/hendrycks/math). However, because this dataset contains ProofWiki, the Stacks Project, Trench's Analysis, and Stein's Number Theory, models trained on it cannot be evaluated on the [NaturalProofs dataset](https://github.com/wellecks/naturalproofs).
# Data Preprocessing
This section describes any significant filtering and transformations made to various subsets of the data.
**arXiv.math.**
The arXiv.math dataset is large, heterogeneous, and contains a great deal of noise. We used the following heuristics
when choosing which files from arXiv.math source folders to include in the dataset:
- Keep only files with a `.tex` extension.
- Only include files that use either a `utf-8/16/32` or `latin-1` text encoding.
- Discard files that do not contain a part, chapter, section, sub...section, paragraph, or subparagraph heading.
- Delete files that contain the keyword `gnuplot`. Gnuplot-latex is an old command line utility that generates blocks
of entirely unintelligible source.
- Include only articles in English, as determined by the [langdetect library](https://pypi.org/project/langdetect/). \n",
"\n",
- Exclude files shorter than 280 characters (characters counted after substring removal described below).
In addition, we apply the following transformations to arXiv.math texts:
- Delete everything outside of `\begin{document}` and `\end{document}`.
- Delete everything including or after `\Refs`, `\begin{thebibliography}`, or `\begin{bibdiv}`
- Delete comments.
- Any more than three consecutive newlines are replaced by three consecutive newlines.
In [this notebook](https://github.com/zhangir-azerbayev/proof-pile/blob/main/analysis/arxiv_noisedetection.ipynb), we provide an analysis of the prevalence of noisy documents in the arXiv.math subset of the
proof-pile.
**Stack Exchange.**
We only include questions that have at least 5 upvotes and an answer. We format Stack Exchange posts as follows
```
QUESTION [{num_upvotes} upvotes]: {text of question}
REPLY [{num_upvotes} votes]: {text of reply}
REPLY [{num_upvotes} votes]: {text of reply}
.
.
.
```
**set.mm.**
We converted `set.mm` into human-readable form by following the instructions in the [mm-extract repo](https://github.com/zhangir-azerbayev/mm-extract)
## Contributions
Authors: Zhangir Azerbayev, Edward Ayers, Bartosz Piotrowski.
We would like to thank Jeremy Avigad, Albert Jiang, and Wenda Li for their invaluable guidance, and the Hoskinson Center for Formal Mathematics for its support.
|
MBZUAI/Bactrian-X | MBZUAI | "2023-05-27T12:54:05Z" | 14,623 | 116 | [
"task_categories:text-generation",
"language:af",
"language:ar",
"language:az",
"language:bn",
"language:cs",
"language:de",
"language:en",
"language:es",
"language:et",
"language:fi",
"language:fr",
"language:gl",
"language:gu",
"language:he",
"language:hi",
"language:hr",
"language:id",
"language:it",
"language:ja",
"language:ka",
"language:kk",
"language:km",
"language:ko",
"language:lt",
"language:lv",
"language:mk",
"language:ml",
"language:mn",
"language:mr",
"language:my",
"language:ne",
"language:nl",
"language:pl",
"language:ps",
"language:pt",
"language:ro",
"language:ru",
"language:si",
"language:sl",
"language:sv",
"language:sw",
"language:ta",
"language:te",
"language:th",
"language:tl",
"language:tr",
"language:uk",
"language:ur",
"language:vi",
"language:xh",
"language:zh",
"license:cc-by-nc-4.0",
"size_categories:1M<n<10M",
"modality:text",
"library:datasets",
"library:mlcroissant",
"arxiv:2008.00401",
"arxiv:2305.15011",
"region:us",
"instruction-finetuning",
"multilingual"
] | [
"text-generation"
] | "2023-04-22T12:42:39Z" | ---
license: cc-by-nc-4.0
task_categories:
- text-generation
language:
- af
- ar
- az
- bn
- cs
- de
- en
- es
- et
- fi
- fr
- gl
- gu
- he
- hi
- hr
- id
- it
- ja
- ka
- kk
- km
- ko
- lt
- lv
- mk
- ml
- mn
- mr
- my
- ne
- nl
- pl
- ps
- pt
- ro
- ru
- si
- sl
- sv
- sw
- ta
- te
- th
- tl
- tr
- uk
- ur
- vi
- xh
- zh
tags:
- instruction-finetuning
- multilingual
pretty_name: Bactrian-X
---
# Dataset Card for "Bactrian-X"
## Table of Contents
- [Dataset Description](#a-dataset-description)
- [Dataset Summary](#dataset-summary)
- [Languages](#languages)
- [Dataset Structure](#b-dataset-structure)
- [Data Fields](#data-fields)
- [Data Instances](#data-instances)
- [Data in 52 Languages](#data-in-52-languages)
- [Dataset Creation](#c-dataset-creation)
- [Considerations for Using the Data](#d-considerations-for-using-the-data)
- [Additional Information](#e-additional-information)
## A. Dataset Description
- **Homepage:** https://github.com/mbzuai-nlp/Bactrian-X
- **Repository:** https://huggingface.co/datasets/MBZUAI/Bactrian-X
- **Paper:** to-be-soon released
### Dataset Summary
<h3 align="center">
<img src="https://raw.githubusercontent.com/fajri91/eval_picts/master/BactrianX_dataset.jpg" width="950" align="center">
</h3>
The Bactrain-X dataset is a collection of 3.4M instruction-response pairs in 52 languages, that are obtained by translating 67K English instructions ([alpaca-52k](https://github.com/tatsu-lab/stanford_alpaca) + [dolly-15k](https://github.com/databrickslabs/dolly/tree/master/data)) into 51 languages using Google Translate API. The translated instructions are then fed to ChatGPT (`gpt-3.5-turbo`) to obtain its natural responses, resulting in 3.4M instruction-response pairs in 52 languages (52 languages x 67k instances = 3.4M instances).
### Languages
We follow 52 languages of [mBART-50](https://arxiv.org/abs/2008.00401), with details can be found [below](#data-in-52-languages).
## B. Dataset Structure
### Data Fields
The data fields are as follows:
* `instruction`: describes the task the model should perform. Each of the 67K instructions is unique.
* `input`: optional context or input for the task. For example, when the instruction is "Summarize the following article", the input is the article. Around 40% of the examples have an input.
* `output`: the answer to the instruction as generated by `gpt-3.5-turbo`.
* `id`: the unique index for each data piece in a specific language. The same `id` across splits corresponds to the same original English data.
### Data Instances
An example in `en` looks as follows.
```json
{
"instruction": "What is the main idea of this text?",
"input": "The robotics industry has become one of the fastest growing sectors in today’s economy. Robotics technology is being applied in various industries and applications, such as manufacturing, healthcare, and defense.",
"output": "The main idea of the text is that the robotics industry is rapidly growing and its technology is being used in many different industries and applications.",
"id": "alpaca-107"
}
```
An example in `ar` looks as follows.
```json
{
"instruction": "قم بالوصول إلى إعدادات المتصفح الافتراضية على جهاز الكمبيوتر.",
"input": "",
"output": "لا يمكن لي كمساعد افتراضي القيام بهذه المهمة كونها تحتاج إلى الوصول إلى الجهاز المادي. يرجى الرجوع إلى دليل المستخدم للكمبيوتر الخاص بك أو البحث على الإنترنت لمعرفة كيفية الوصول إلى إعدادات المتصفح الافتراضية على جهاز الكمبيوتر الخاص بك.",
"id": "alpaca-41997"
}
```
An example in `id` looks as follows.
```json
{
"instruction": "Berapa banyak angka penting yang harus digunakan untuk nilai 0,000187?",
"input": "",
"output": "Ada tiga angka penting dalam nilai 0,000187.",
"id": "alpaca-40382"
}
```
An example in `de` looks as follows.
```json
{
"instruction": "Komponieren Sie ein Musikstück mit einem Titel, der Trauer bezeichnet.",
"input": "",
"output": ""Schmerzendes Herz"\n\nDas Stück beginnt mit einer langsamen, traurigen Melodie, gespielt von einem Solo-Violoncello. Die Harmonien, die von einem Klavier begleitet werden, sind durcheinander und dissonant, um ein Gefühl von Schmerz und Trauer zu vermitteln.\n\nIm Laufe des Stücks treten andere Instrumente ein, wie Streicher, Flöte und Oboe, um die Melodie zu unterstützen und zu intensivieren. Der Rhythmus bleibt langsam und schwerfällig, um die Schwere des Schmerzes darzustellen.\n\nIn der Mitte des Stücks gibt es einen Höhepunkt, wo die Melodie und Harmonien sich verdichten und ein Gefühl von Überwältigung erzeugen. Der Solo-Violoncello spielt eine traurige und schnelle Phrase, die die Verzweiflung und Isolation des Protagonisten ausdrückt.\n\nDas Stück endet mit einer Wiederholung der ersten Melodie, gespielt von einem Solo-Klavier, während die anderen Instrumente auslaufen. Die Harmonien werden jedoch weicher und sanfter, um einen Hauch von Hoffnung zu vermitteln, dass der Schmerz langsam vergeht.",
"id": "alpaca-33619"
}
```
An example in `fr` looks as follows.
```json
{
"instruction": "Les Ceanothus ont-ils des racines peu profondes ?",
"input": "",
"output": "En tant qu'IA, je ne peux pas répondre avec une certitude absolue, mais voici des informations générales sur les racines de Ceanothus : \n\nLes Ceanothus ont tendance à avoir un système racinaire étendu et peu profond. Les racines peuvent s'étaler jusqu'à 4 à 6 fois la taille de la plante et s'étendre à environ 60 à 90 cm de profondeur. Cependant, cela peut varier en fonction du type de sol et de la variété de Ceanothus. Certaines variétés de Ceanothus pourraient avoir des racines plus profondes si les conditions s'y prêtent.",
"id": "dolly-10940"
}
```
### Data in 52 Languages
| No | Languages | Code | Train |
| ---|---------------- | ----- | ----- |
| 1 | [Afrikaans](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/af) | af_ZA | 67017 |
| 2 | [Arabic](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/ar) | ar_AR | 67017 |
| 3 | [Azerbaijani](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/az) | az_AZ | 67017 |
| 4 | [Bengali](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/bn) | bn_IN | 67017 |
| 5 | [Czech](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/cs) | cs_CZ | 67017 |
| 6 | [German](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/de) | de_DE | 67017 |
| 7 | [English](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/en) | en_XX | 67017 |
| 8 | [Spanish](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/es) | es_XX | 67017 |
| 9 | [Estonian](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/et) | et_EE | 67017 |
| 10 | [Persian](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/fa) | fa_IR | 67017 |
| 11 | [Finnish](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/fi) | fi_FI | 67017 |
| 12 | [French](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/fr) | fr_XX | 67017 |
| 13 | [Galician](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/gl) | gl_ES | 67017 |
| 14 | [Gujarati](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/gu) | gu_IN | 67017 |
| 15 | [Hebrew](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/he) | he_IL | 67017 |
| 16 | [Hindi](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/hi) | hi_IN | 67017 |
| 17 | [Croatian](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/hr) | hr_HR | 67017 |
| 18 | [Indonesian](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/id) | id_ID | 67017 |
| 19 | [Italian](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/it) | it_IT | 67017 |
| 20 | [Japanese](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/ja) | ja_XX | 67017 |
| 21 | [Georgian](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/ka) | ka_GE | 67017 |
| 22 | [Kazakh](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/kk) | kk_KZ | 67017 |
| 23 | [Khmer](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/km) | km_KH | 67017 |
| 24 | [Korean](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/ko) | ko_KR | 67017 |
| 25 | [Lithuanian](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/lt) | lt_LT | 67017 |
| 26 | [Latvian](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/lv) | lv_LV | 67017 |
| 27 | [Macedonian](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/mk) | mk_MK | 67017 |
| 28 | [Malayalam](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/ml) | ml_IN | 67017 |
| 29 | [Mongolian](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/mn) | mn_MN | 67017 |
| 30 | [Marathi](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/mr) | mr_IN | 67017 |
| 31 | [Burmese](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/my) | my_MM | 67017 |
| 32 | [Nepali](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/ne) | ne_NP | 67017 |
| 33 | [Dutch](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/nl) | nl_XX | 67017 |
| 34 | [Polish](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/pl) | pl_PL | 67017 |
| 35 | [Pashto](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/ps) | ps_AF | 67017 |
| 36 | [Portuguese](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/pt) | pt_XX | 67017 |
| 37 | [Romanian](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/ro) | ro_RO | 67017 |
| 38 | [Russian](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/ru) | ru_RU | 67017 |
| 39 | [Sinhala](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/si) | si_LK | 67017 |
| 40 | [Slovene](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/sl) | sl_SI | 67017 |
| 41 | [Swedish](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/sv) | sv_SE | 67017 |
| 42 | [Swahili](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/sw) | sw_KE | 67017 |
| 43 | [Tamil](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/ta) | ta_IN | 67017 |
| 44 | [Telugu](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/te) | te_IN | 67017 |
| 45 | [Thai](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/th) | th_TH | 67017 |
| 46 | [Tagalog](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/tl) | tl_XX | 67017 |
| 47 | [Turkish](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/tr) | tr_TR | 67017 |
| 48 | [Ukrainian](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/uk) | uk_UA | 67017 |
| 49 | [Urdu](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/ur) | ur_PK | 67017 |
| 50 | [Vietnamese](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/vi) | vi_VN | 67017 |
| 51 | [Xhosa](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/xh) | xh_ZA | 67017 |
| 52 | [Chinese](https://huggingface.co/datasets/MBZUAI/Bactrian-X/viewer/zh) | zh_CN | 67017 |
## C. Dataset Creation
1. English Instructions: The English instuctions are obtained from [alpaca-53k](https://github.com/tatsu-lab/stanford_alpaca), and [dolly-15k](https://github.com/databrickslabs/dolly/tree/master/data).
2. Instruction Translation: The instructions (and inputs) are translated into 51 languages using Google Translation API (conducted on April 2023).
3. Output Generation: We generate output from `gpt-3.5-turbo` for each language (conducted on April 2023).
## D. Considerations for Using the Data
### Social Impact of Dataset
NLP for everyone: this dataset helps to democratize the cutting-edge instruction-following models in 52 languages. This dataset also allows the first experiment on the multilingual LoRA-based LLaMA model.
### Discussion of Biases
(1) Translation bias; (2) Potential English-culture bias in the translated dataset.
### Other Known Limitations
The `Bactrian-X` data is generated by a language model (`gpt-3.5-turbo`) and inevitably contains some errors or biases. We encourage users to use this data with caution and propose new methods to filter or improve the imperfections.
## E. Additional Information
### Dataset Curators
[Haonan Li](https://haonan-li.github.io/) and [Fajri Koto](http://www.fajrikoto.com)
### Licensing Information
The dataset is available under the [Creative Commons NonCommercial (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/legalcode).
### Citation Information
```
@misc{li2023bactrianx,
title={Bactrian-X : A Multilingual Replicable Instruction-Following Model with Low-Rank Adaptation},
author={Haonan Li and Fajri Koto and Minghao Wu and Alham Fikri Aji and Timothy Baldwin},
year={2023},
eprint={2305.15011},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
```
### Contributions
Thanks to [@haonan-li](https://github.com/haonan-li), [@fajri91](https://github.com/fajri91) for adding this dataset.
|
qiaojin/PubMedQA | qiaojin | "2024-03-06T01:50:16Z" | 14,569 | 204 | [
"task_categories:question-answering",
"task_ids:multiple-choice-qa",
"annotations_creators:expert-generated",
"annotations_creators:machine-generated",
"language_creators:expert-generated",
"multilinguality:monolingual",
"source_datasets:original",
"language:en",
"license:mit",
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"arxiv:1909.06146",
"region:us"
] | [
"question-answering"
] | "2022-03-02T23:29:22Z" | ---
annotations_creators:
- expert-generated
- machine-generated
language_creators:
- expert-generated
language:
- en
license:
- mit
multilinguality:
- monolingual
size_categories:
- 100K<n<1M
- 10K<n<100K
- 1K<n<10K
source_datasets:
- original
task_categories:
- question-answering
task_ids:
- multiple-choice-qa
paperswithcode_id: pubmedqa
pretty_name: PubMedQA
config_names:
- pqa_artificial
- pqa_labeled
- pqa_unlabeled
dataset_info:
- config_name: pqa_artificial
features:
- name: pubid
dtype: int32
- name: question
dtype: string
- name: context
sequence:
- name: contexts
dtype: string
- name: labels
dtype: string
- name: meshes
dtype: string
- name: long_answer
dtype: string
- name: final_decision
dtype: string
splits:
- name: train
num_bytes: 443501057
num_examples: 211269
download_size: 233411194
dataset_size: 443501057
- config_name: pqa_labeled
features:
- name: pubid
dtype: int32
- name: question
dtype: string
- name: context
sequence:
- name: contexts
dtype: string
- name: labels
dtype: string
- name: meshes
dtype: string
- name: reasoning_required_pred
dtype: string
- name: reasoning_free_pred
dtype: string
- name: long_answer
dtype: string
- name: final_decision
dtype: string
splits:
- name: train
num_bytes: 2088898
num_examples: 1000
download_size: 1075513
dataset_size: 2088898
- config_name: pqa_unlabeled
features:
- name: pubid
dtype: int32
- name: question
dtype: string
- name: context
sequence:
- name: contexts
dtype: string
- name: labels
dtype: string
- name: meshes
dtype: string
- name: long_answer
dtype: string
splits:
- name: train
num_bytes: 125922964
num_examples: 61249
download_size: 66010017
dataset_size: 125922964
configs:
- config_name: pqa_artificial
data_files:
- split: train
path: pqa_artificial/train-*
- config_name: pqa_labeled
data_files:
- split: train
path: pqa_labeled/train-*
- config_name: pqa_unlabeled
data_files:
- split: train
path: pqa_unlabeled/train-*
---
# Dataset Card for [Dataset Name]
## Table of Contents
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** [PubMedQA homepage](https://pubmedqa.github.io/ )
- **Repository:** [PubMedQA repository](https://github.com/pubmedqa/pubmedqa)
- **Paper:** [PubMedQA: A Dataset for Biomedical Research Question Answering](https://arxiv.org/abs/1909.06146)
- **Leaderboard:** [PubMedQA: Leaderboard](https://pubmedqa.github.io/)
### Dataset Summary
The task of PubMedQA is to answer research questions with yes/no/maybe (e.g.: Do preoperative statins reduce atrial fibrillation after coronary artery bypass grafting?) using the corresponding abstracts.
### Supported Tasks and Leaderboards
The official leaderboard is available at: https://pubmedqa.github.io/.
500 questions in the `pqa_labeled` are used as the test set. They can be found at https://github.com/pubmedqa/pubmedqa.
### Languages
English
## Dataset Structure
### Data Instances
[More Information Needed]
### Data Fields
[More Information Needed]
### Data Splits
[More Information Needed]
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
[More Information Needed]
### Annotations
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
[More Information Needed]
### Citation Information
[More Information Needed]
### Contributions
Thanks to [@tuner007](https://github.com/tuner007) for adding this dataset. |
zhoujt1994/HumanCellEpigenomeAtlas_sc_allc | zhoujt1994 | "2025-03-17T13:52:53Z" | 14,564 | 0 | [
"license:mit",
"region:us"
] | null | "2025-03-16T22:31:29Z" | ---
license: mit
---
|
eduagarcia-temp/OSCAR-2301_meta | eduagarcia-temp | "2023-08-28T14:07:22Z" | 14,552 | 0 | [
"size_categories:10M<n<100M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2023-08-27T20:24:54Z" | ---
dataset_info:
features:
- name: id
dtype: int64
- name: text
dtype: string
- name: meta
struct:
- name: categories
sequence: string
- name: dedup
struct:
- name: exact_norm
struct:
- name: cluster_main_idx
dtype: int64
- name: cluster_size
dtype: int64
- name: exact_hash_idx
dtype: int64
- name: is_duplicate
dtype: bool
- name: minhash
struct:
- name: cluster_main_idx
dtype: int64
- name: cluster_size
dtype: int64
- name: is_duplicate
dtype: bool
- name: minhash_idx
dtype: int64
- name: harmful_pp
dtype: float64
- name: identification
struct:
- name: label
dtype: string
- name: prob
dtype: float64
- name: quality_warnings
sequence: string
- name: sentence_identifications
list:
- name: label
dtype: string
- name: prob
dtype: float64
- name: tlsh
dtype: string
- name: warc_headers
struct:
- name: content-length
dtype: int64
- name: content-type
dtype: string
- name: warc-block-digest
dtype: string
- name: warc-date
dtype: string
- name: warc-identified-content-language
dtype: string
- name: warc-record-id
dtype: string
- name: warc-refers-to
dtype: string
- name: warc-target-uri
dtype: string
- name: warc-type
dtype: string
splits:
- name: train
num_bytes: 127702717461
num_examples: 18031400
download_size: 40317121912
dataset_size: 127702717461
---
# Dataset Card for "OSCAR-2301_meta"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |
OpenGVLab/OmniCorpus-CC | OpenGVLab | "2025-03-20T12:32:06Z" | 14,513 | 16 | [
"task_categories:image-to-text",
"task_categories:visual-question-answering",
"language:en",
"license:cc-by-4.0",
"size_categories:100M<n<1B",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"arxiv:2406.08418",
"region:us"
] | [
"image-to-text",
"visual-question-answering"
] | "2024-08-30T06:16:02Z" | ---
language:
- en
license: cc-by-4.0
size_categories:
- 100M<n<1B
task_categories:
- image-to-text
- visual-question-answering
dataset_info:
- config_name: CC-MAIN-2013-20
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 19908676196
num_examples: 3878063
download_size: 9303464923
dataset_size: 19908676196
- config_name: CC-MAIN-2013-48
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 15282078925
num_examples: 3091537
download_size: 6965036866
dataset_size: 15282078925
- config_name: CC-MAIN-2014-10
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 7227087609
num_examples: 1390034
download_size: 3259239561
dataset_size: 7227087609
- config_name: CC-MAIN-2014-15
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 10106913108
num_examples: 1968361
download_size: 4567738362
dataset_size: 10106913108
- config_name: CC-MAIN-2014-23
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 7997621043
num_examples: 1455331
download_size: 3468852905
dataset_size: 7997621043
- config_name: CC-MAIN-2014-35
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 6228103779
num_examples: 1219200
download_size: 2849584613
dataset_size: 6228103779
- config_name: CC-MAIN-2014-41
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 8321822952
num_examples: 1573955
download_size: 3775989970
dataset_size: 8321822952
- config_name: CC-MAIN-2014-42
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 7732679416
num_examples: 1511931
download_size: 3505766162
dataset_size: 7732679416
- config_name: CC-MAIN-2014-49
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 4473311810
num_examples: 837735
download_size: 1982728919
dataset_size: 4473311810
- config_name: CC-MAIN-2014-52
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 7292722888
num_examples: 1304730
download_size: 2957626766
dataset_size: 7292722888
- config_name: CC-MAIN-2015-06
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 5775826679
num_examples: 1061940
download_size: 2462379667
dataset_size: 5775826679
- config_name: CC-MAIN-2015-11
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 6263650452
num_examples: 1129411
download_size: 2528026633
dataset_size: 6263650452
- config_name: CC-MAIN-2015-14
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 4524425019
num_examples: 885221
download_size: 1939222111
dataset_size: 4524425019
- config_name: CC-MAIN-2015-18
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 6195227565
num_examples: 1104115
download_size: 2634204322
dataset_size: 6195227565
- config_name: CC-MAIN-2015-22
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 7008276790
num_examples: 1290530
download_size: 2913627974
dataset_size: 7008276790
- config_name: CC-MAIN-2015-27
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 4320140953
num_examples: 784496
download_size: 1828575226
dataset_size: 4320140953
- config_name: CC-MAIN-2015-32
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 4952806590
num_examples: 875601
download_size: 2065207099
dataset_size: 4952806590
- config_name: CC-MAIN-2015-35
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 6053257306
num_examples: 1086470
download_size: 2632032769
dataset_size: 6053257306
- config_name: CC-MAIN-2015-40
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 5206096790
num_examples: 924036
download_size: 2203603087
dataset_size: 5206096790
- config_name: CC-MAIN-2015-48
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 8343050753
num_examples: 1537468
download_size: 3489600630
dataset_size: 8343050753
- config_name: CC-MAIN-2016-07
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 9329220105
num_examples: 1738650
download_size: 4005599785
dataset_size: 9329220105
- config_name: CC-MAIN-2016-18
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 3897220786
num_examples: 747570
download_size: 1675500816
dataset_size: 3897220786
- config_name: CC-MAIN-2016-22
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 4623903344
num_examples: 857060
download_size: 2000624854
dataset_size: 4623903344
- config_name: CC-MAIN-2016-26
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 3414418701
num_examples: 627995
download_size: 1403890884
dataset_size: 3414418701
- config_name: CC-MAIN-2016-30
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 7244342539
num_examples: 1183776
download_size: 2913394840
dataset_size: 7244342539
- config_name: CC-MAIN-2016-36
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 5402565529
num_examples: 915878
download_size: 2248454753
dataset_size: 5402565529
- config_name: CC-MAIN-2016-40
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 5938544915
num_examples: 1113534
download_size: 2530904625
dataset_size: 5938544915
- config_name: CC-MAIN-2016-44
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 15819536321
num_examples: 3528637
download_size: 6516546200
dataset_size: 15819536321
- config_name: CC-MAIN-2016-50
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 10822695594
num_examples: 2215939
download_size: 4439728574
dataset_size: 10822695594
- config_name: CC-MAIN-2017-04
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 11949732148
num_examples: 2441316
download_size: 5045763620
dataset_size: 11949732148
- config_name: CC-MAIN-2017-09
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 12473370126
num_examples: 2561539
download_size: 5398993614
dataset_size: 12473370126
- config_name: CC-MAIN-2017-13
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 12209904783
num_examples: 2458486
download_size: 5422393873
dataset_size: 12209904783
- config_name: CC-MAIN-2017-17
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 13763109013
num_examples: 2615558
download_size: 6025106556
dataset_size: 13763109013
- config_name: CC-MAIN-2017-22
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 14456991831
num_examples: 2775332
download_size: 6258001465
dataset_size: 14456991831
- config_name: CC-MAIN-2017-26
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 15036103558
num_examples: 2973499
download_size: 6813218532
dataset_size: 15036103558
- config_name: CC-MAIN-2017-30
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 18833639414
num_examples: 3870197
download_size: 8464443468
dataset_size: 18833639414
- config_name: CC-MAIN-2017-34
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 25828116836
num_examples: 4848154
download_size: 11599137919
dataset_size: 25828116836
- config_name: CC-MAIN-2017-39
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 29432150311
num_examples: 4840435
download_size: 13172655761
dataset_size: 29432150311
- config_name: CC-MAIN-2017-43
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 32672966840
num_examples: 5724493
download_size: 15041820212
dataset_size: 32672966840
- config_name: CC-MAIN-2017-47
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 34301891443
num_examples: 5291581
download_size: 15593452226
dataset_size: 34301891443
- config_name: CC-MAIN-2017-51
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 30012533603
num_examples: 5466672
download_size: 14005518471
dataset_size: 30012533603
- config_name: CC-MAIN-2018-05
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 47738703452
num_examples: 8053879
download_size: 22533983733
dataset_size: 47738703452
- config_name: CC-MAIN-2018-09
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 45503126107
num_examples: 8045410
download_size: 21900491411
dataset_size: 45503126107
- config_name: CC-MAIN-2018-13
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 43904789090
num_examples: 7980931
download_size: 21178075620
dataset_size: 43904789090
- config_name: CC-MAIN-2018-17
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 44481167440
num_examples: 8699878
download_size: 21623780968
dataset_size: 44481167440
- config_name: CC-MAIN-2018-22
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 64369136465
num_examples: 13332059
download_size: 32293951649
dataset_size: 64369136465
- config_name: CC-MAIN-2018-26
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 81232597180
num_examples: 16249638
download_size: 41007491366
dataset_size: 81232597180
- config_name: CC-MAIN-2018-30
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 151537007358
num_examples: 32535697
download_size: 77517210537
dataset_size: 151537007358
- config_name: CC-MAIN-2018-34
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 39026071869
num_examples: 6347230
download_size: 19285382621
dataset_size: 39026071869
- config_name: CC-MAIN-2018-39
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 35948493161
num_examples: 6372711
download_size: 17597722170
dataset_size: 35948493161
- config_name: CC-MAIN-2018-43
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 50928918805
num_examples: 8758225
download_size: 25291022646
dataset_size: 50928918805
- config_name: CC-MAIN-2018-47
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 43961213014
num_examples: 7270815
download_size: 22024998684
dataset_size: 43961213014
- config_name: CC-MAIN-2018-51
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 58902353921
num_examples: 10215384
download_size: 29497256483
dataset_size: 58902353921
- config_name: CC-MAIN-2019-04
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 54814836003
num_examples: 9930553
download_size: 27458854931
dataset_size: 54814836003
- config_name: CC-MAIN-2019-09
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 54426174385
num_examples: 8897510
download_size: 28125345656
dataset_size: 54426174385
- config_name: CC-MAIN-2019-13
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 48712051219
num_examples: 7803004
download_size: 25156014252
dataset_size: 48712051219
- config_name: CC-MAIN-2019-18
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 48203751852
num_examples: 7532171
download_size: 24844412087
dataset_size: 48203751852
- config_name: CC-MAIN-2019-22
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 51674379059
num_examples: 8339842
download_size: 26257475492
dataset_size: 51674379059
- config_name: CC-MAIN-2019-26
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 43336967638
num_examples: 7320268
download_size: 21900316910
dataset_size: 43336967638
- config_name: CC-MAIN-2019-30
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 46313133200
num_examples: 7682281
download_size: 23262218065
dataset_size: 46313133200
- config_name: CC-MAIN-2019-35
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 49570657315
num_examples: 8098108
download_size: 24938729240
dataset_size: 49570657315
- config_name: CC-MAIN-2019-39
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 43538081906
num_examples: 7102645
download_size: 21728983014
dataset_size: 43538081906
- config_name: CC-MAIN-2019-43
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 52817470138
num_examples: 8567061
download_size: 26105523209
dataset_size: 52817470138
- config_name: CC-MAIN-2019-47
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 42252827792
num_examples: 6775943
download_size: 21228532199
dataset_size: 42252827792
- config_name: CC-MAIN-2019-51
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 38926356094
num_examples: 6415558
download_size: 19510339598
dataset_size: 38926356094
- config_name: CC-MAIN-2020-05
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 48189844491
num_examples: 7921372
download_size: 24235687030
dataset_size: 48189844491
- config_name: CC-MAIN-2020-10
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 48904133840
num_examples: 8211791
download_size: 24576159189
dataset_size: 48904133840
- config_name: CC-MAIN-2020-16
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 51243682770
num_examples: 8578633
download_size: 25485035979
dataset_size: 51243682770
- config_name: CC-MAIN-2020-24
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 59424939072
num_examples: 10438139
download_size: 29827361603
dataset_size: 59424939072
- config_name: CC-MAIN-2020-29
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 66229730938
num_examples: 11475631
download_size: 33030161773
dataset_size: 66229730938
- config_name: CC-MAIN-2020-34
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 54287690582
num_examples: 9495610
download_size: 27018821467
dataset_size: 54287690582
- config_name: CC-MAIN-2020-40
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 71587907978
num_examples: 12058149
download_size: 35795677487
dataset_size: 71587907978
- config_name: CC-MAIN-2020-45
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 59172857400
num_examples: 9694734
download_size: 29495814784
dataset_size: 59172857400
- config_name: CC-MAIN-2020-50
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 58557861606
num_examples: 9539918
download_size: 29083801775
dataset_size: 58557861606
- config_name: CC-MAIN-2021-04
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 74507336015
num_examples: 12273028
download_size: 36874887518
dataset_size: 74507336015
- config_name: CC-MAIN-2021-10
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 60802783945
num_examples: 10176190
download_size: 30326513365
dataset_size: 60802783945
- config_name: CC-MAIN-2021-17
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 75061494488
num_examples: 12343366
download_size: 37345114890
dataset_size: 75061494488
- config_name: CC-MAIN-2021-21
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 70036417178
num_examples: 11584034
download_size: 34806730527
dataset_size: 70036417178
- config_name: CC-MAIN-2021-25
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 73653674063
num_examples: 12065281
download_size: 36581310312
dataset_size: 73653674063
- config_name: CC-MAIN-2021-31
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 79535885182
num_examples: 13383552
download_size: 39702500971
dataset_size: 79535885182
- config_name: CC-MAIN-2021-39
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 90302065651
num_examples: 14794773
download_size: 45211764750
dataset_size: 90302065651
- config_name: CC-MAIN-2021-43
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 108356023335
num_examples: 17698206
download_size: 54292215300
dataset_size: 108356023335
- config_name: CC-MAIN-2021-49
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 95867022229
num_examples: 15643875
download_size: 47902433321
dataset_size: 95867022229
- config_name: CC-MAIN-2022-05
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 97602903488
num_examples: 15772898
download_size: 48711364812
dataset_size: 97602903488
- config_name: CC-MAIN-2022-21
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 127495492928
num_examples: 21745889
download_size: 63379692210
dataset_size: 127495492928
- config_name: CC-MAIN-2022-27
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 128061655541
num_examples: 21580054
download_size: 63763936007
dataset_size: 128061655541
- config_name: CC-MAIN-2022-33
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 126436062118
num_examples: 21495687
download_size: 63067252044
dataset_size: 126436062118
- config_name: CC-MAIN-2022-40
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 123806739937
num_examples: 20206120
download_size: 61929035270
dataset_size: 123806739937
- config_name: CC-MAIN-2022-49
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 152577158166
num_examples: 24634059
download_size: 76529854484
dataset_size: 152577158166
- config_name: CC-MAIN-2023-06
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 174815301023
num_examples: 28962355
download_size: 87301203013
dataset_size: 174815301023
- config_name: CC-MAIN-2023-14
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 227631152876
num_examples: 37223376
download_size: 114188282465
dataset_size: 227631152876
- config_name: CC-MAIN-2023-23
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 321036722459
num_examples: 52119692
download_size: 161491274249
dataset_size: 321036722459
- config_name: CC-MAIN-2023-40
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 648032999611
num_examples: 101292016
download_size: 317965522325
dataset_size: 648032999611
- config_name: CC-MAIN-2023-50
features:
- name: general_metadata
struct:
- name: domain
sequence: string
- name: fluency_prob
dtype: float64
- name: id
dtype: string
- name: non_advertisement_prob
dtype: float64
- name: politics_prob
dtype: float64
- name: porn_prob
dtype: float64
- name: toxic_prob
dtype: float64
- name: url
dtype: string
- name: images
sequence: string
- name: texts
sequence: string
- name: metadata
list:
- name: aesthetic_prob
dtype: float64
- name: bytes
dtype: int64
- name: d_hash
dtype: string
- name: d_hash_dup_count
dtype: int64
- name: height
dtype: int64
- name: img_url_sha
dtype: string
- name: p_hash
dtype: string
- name: p_hash_dup_count
dtype: int64
- name: unsafe_prob
dtype: float64
- name: width
dtype: int64
splits:
- name: train
num_bytes: 744768384551
num_examples: 117073004
download_size: 365332295606
dataset_size: 744768384551
configs:
- config_name: CC-MAIN-2013-20
data_files:
- split: train
path: CC-MAIN-2013-20/train-*
- config_name: CC-MAIN-2013-48
data_files:
- split: train
path: CC-MAIN-2013-48/train-*
- config_name: CC-MAIN-2014-10
data_files:
- split: train
path: CC-MAIN-2014-10/train-*
- config_name: CC-MAIN-2014-15
data_files:
- split: train
path: CC-MAIN-2014-15/train-*
- config_name: CC-MAIN-2014-23
data_files:
- split: train
path: CC-MAIN-2014-23/train-*
- config_name: CC-MAIN-2014-35
data_files:
- split: train
path: CC-MAIN-2014-35/train-*
- config_name: CC-MAIN-2014-41
data_files:
- split: train
path: CC-MAIN-2014-41/train-*
- config_name: CC-MAIN-2014-42
data_files:
- split: train
path: CC-MAIN-2014-42/train-*
- config_name: CC-MAIN-2014-49
data_files:
- split: train
path: CC-MAIN-2014-49/train-*
- config_name: CC-MAIN-2014-52
data_files:
- split: train
path: CC-MAIN-2014-52/train-*
- config_name: CC-MAIN-2015-06
data_files:
- split: train
path: CC-MAIN-2015-06/train-*
- config_name: CC-MAIN-2015-11
data_files:
- split: train
path: CC-MAIN-2015-11/train-*
- config_name: CC-MAIN-2015-14
data_files:
- split: train
path: CC-MAIN-2015-14/train-*
- config_name: CC-MAIN-2015-18
data_files:
- split: train
path: CC-MAIN-2015-18/train-*
- config_name: CC-MAIN-2015-22
data_files:
- split: train
path: CC-MAIN-2015-22/train-*
- config_name: CC-MAIN-2015-27
data_files:
- split: train
path: CC-MAIN-2015-27/train-*
- config_name: CC-MAIN-2015-32
data_files:
- split: train
path: CC-MAIN-2015-32/train-*
- config_name: CC-MAIN-2015-35
data_files:
- split: train
path: CC-MAIN-2015-35/train-*
- config_name: CC-MAIN-2015-40
data_files:
- split: train
path: CC-MAIN-2015-40/train-*
- config_name: CC-MAIN-2015-48
data_files:
- split: train
path: CC-MAIN-2015-48/train-*
- config_name: CC-MAIN-2016-07
data_files:
- split: train
path: CC-MAIN-2016-07/train-*
- config_name: CC-MAIN-2016-18
data_files:
- split: train
path: CC-MAIN-2016-18/train-*
- config_name: CC-MAIN-2016-22
data_files:
- split: train
path: CC-MAIN-2016-22/train-*
- config_name: CC-MAIN-2016-26
data_files:
- split: train
path: CC-MAIN-2016-26/train-*
- config_name: CC-MAIN-2016-30
data_files:
- split: train
path: CC-MAIN-2016-30/train-*
- config_name: CC-MAIN-2016-36
data_files:
- split: train
path: CC-MAIN-2016-36/train-*
- config_name: CC-MAIN-2016-40
data_files:
- split: train
path: CC-MAIN-2016-40/train-*
- config_name: CC-MAIN-2016-44
data_files:
- split: train
path: CC-MAIN-2016-44/train-*
- config_name: CC-MAIN-2016-50
data_files:
- split: train
path: CC-MAIN-2016-50/train-*
- config_name: CC-MAIN-2017-04
data_files:
- split: train
path: CC-MAIN-2017-04/train-*
- config_name: CC-MAIN-2017-09
data_files:
- split: train
path: CC-MAIN-2017-09/train-*
- config_name: CC-MAIN-2017-13
data_files:
- split: train
path: CC-MAIN-2017-13/train-*
- config_name: CC-MAIN-2017-17
data_files:
- split: train
path: CC-MAIN-2017-17/train-*
- config_name: CC-MAIN-2017-22
data_files:
- split: train
path: CC-MAIN-2017-22/train-*
- config_name: CC-MAIN-2017-26
data_files:
- split: train
path: CC-MAIN-2017-26/train-*
- config_name: CC-MAIN-2017-30
data_files:
- split: train
path: CC-MAIN-2017-30/train-*
- config_name: CC-MAIN-2017-34
data_files:
- split: train
path: CC-MAIN-2017-34/train-*
- config_name: CC-MAIN-2017-39
data_files:
- split: train
path: CC-MAIN-2017-39/train-*
- config_name: CC-MAIN-2017-43
data_files:
- split: train
path: CC-MAIN-2017-43/train-*
- config_name: CC-MAIN-2017-47
data_files:
- split: train
path: CC-MAIN-2017-47/train-*
- config_name: CC-MAIN-2017-51
data_files:
- split: train
path: CC-MAIN-2017-51/train-*
- config_name: CC-MAIN-2018-05
data_files:
- split: train
path: CC-MAIN-2018-05/train-*
- config_name: CC-MAIN-2018-09
data_files:
- split: train
path: CC-MAIN-2018-09/train-*
- config_name: CC-MAIN-2018-13
data_files:
- split: train
path: CC-MAIN-2018-13/train-*
- config_name: CC-MAIN-2018-17
data_files:
- split: train
path: CC-MAIN-2018-17/train-*
- config_name: CC-MAIN-2018-22
data_files:
- split: train
path: CC-MAIN-2018-22/train-*
- config_name: CC-MAIN-2018-26
data_files:
- split: train
path: CC-MAIN-2018-26/train-*
- config_name: CC-MAIN-2018-30
data_files:
- split: train
path: CC-MAIN-2018-30/train-*
- config_name: CC-MAIN-2018-34
data_files:
- split: train
path: CC-MAIN-2018-34/train-*
- config_name: CC-MAIN-2018-39
data_files:
- split: train
path: CC-MAIN-2018-39/train-*
- config_name: CC-MAIN-2018-43
data_files:
- split: train
path: CC-MAIN-2018-43/train-*
- config_name: CC-MAIN-2018-47
data_files:
- split: train
path: CC-MAIN-2018-47/train-*
- config_name: CC-MAIN-2018-51
data_files:
- split: train
path: CC-MAIN-2018-51/train-*
- config_name: CC-MAIN-2019-04
data_files:
- split: train
path: CC-MAIN-2019-04/train-*
- config_name: CC-MAIN-2019-09
data_files:
- split: train
path: CC-MAIN-2019-09/train-*
- config_name: CC-MAIN-2019-13
data_files:
- split: train
path: CC-MAIN-2019-13/train-*
- config_name: CC-MAIN-2019-18
data_files:
- split: train
path: CC-MAIN-2019-18/train-*
- config_name: CC-MAIN-2019-22
data_files:
- split: train
path: CC-MAIN-2019-22/train-*
- config_name: CC-MAIN-2019-26
data_files:
- split: train
path: CC-MAIN-2019-26/train-*
- config_name: CC-MAIN-2019-30
data_files:
- split: train
path: CC-MAIN-2019-30/train-*
- config_name: CC-MAIN-2019-35
data_files:
- split: train
path: CC-MAIN-2019-35/train-*
- config_name: CC-MAIN-2019-39
data_files:
- split: train
path: CC-MAIN-2019-39/train-*
- config_name: CC-MAIN-2019-43
data_files:
- split: train
path: CC-MAIN-2019-43/train-*
- config_name: CC-MAIN-2019-47
data_files:
- split: train
path: CC-MAIN-2019-47/train-*
- config_name: CC-MAIN-2019-51
data_files:
- split: train
path: CC-MAIN-2019-51/train-*
- config_name: CC-MAIN-2020-05
data_files:
- split: train
path: CC-MAIN-2020-05/train-*
- config_name: CC-MAIN-2020-10
data_files:
- split: train
path: CC-MAIN-2020-10/train-*
- config_name: CC-MAIN-2020-16
data_files:
- split: train
path: CC-MAIN-2020-16/train-*
- config_name: CC-MAIN-2020-24
data_files:
- split: train
path: CC-MAIN-2020-24/train-*
- config_name: CC-MAIN-2020-29
data_files:
- split: train
path: CC-MAIN-2020-29/train-*
- config_name: CC-MAIN-2020-34
data_files:
- split: train
path: CC-MAIN-2020-34/train-*
- config_name: CC-MAIN-2020-40
data_files:
- split: train
path: CC-MAIN-2020-40/train-*
- config_name: CC-MAIN-2020-45
data_files:
- split: train
path: CC-MAIN-2020-45/train-*
- config_name: CC-MAIN-2020-50
data_files:
- split: train
path: CC-MAIN-2020-50/train-*
- config_name: CC-MAIN-2021-04
data_files:
- split: train
path: CC-MAIN-2021-04/train-*
- config_name: CC-MAIN-2021-10
data_files:
- split: train
path: CC-MAIN-2021-10/train-*
- config_name: CC-MAIN-2021-17
data_files:
- split: train
path: CC-MAIN-2021-17/train-*
- config_name: CC-MAIN-2021-21
data_files:
- split: train
path: CC-MAIN-2021-21/train-*
- config_name: CC-MAIN-2021-25
data_files:
- split: train
path: CC-MAIN-2021-25/train-*
- config_name: CC-MAIN-2021-31
data_files:
- split: train
path: CC-MAIN-2021-31/train-*
- config_name: CC-MAIN-2021-39
data_files:
- split: train
path: CC-MAIN-2021-39/train-*
- config_name: CC-MAIN-2021-43
data_files:
- split: train
path: CC-MAIN-2021-43/train-*
- config_name: CC-MAIN-2021-49
data_files:
- split: train
path: CC-MAIN-2021-49/train-*
- config_name: CC-MAIN-2022-05
data_files:
- split: train
path: CC-MAIN-2022-05/train-*
- config_name: CC-MAIN-2022-21
data_files:
- split: train
path: CC-MAIN-2022-21/train-*
- config_name: CC-MAIN-2022-27
data_files:
- split: train
path: CC-MAIN-2022-27/train-*
- config_name: CC-MAIN-2022-33
data_files:
- split: train
path: CC-MAIN-2022-33/train-*
- config_name: CC-MAIN-2022-40
data_files:
- split: train
path: CC-MAIN-2022-40/train-*
- config_name: CC-MAIN-2022-49
data_files:
- split: train
path: CC-MAIN-2022-49/train-*
- config_name: CC-MAIN-2023-06
data_files:
- split: train
path: CC-MAIN-2023-06/train-*
- config_name: CC-MAIN-2023-14
data_files:
- split: train
path: CC-MAIN-2023-14/train-*
- config_name: CC-MAIN-2023-23
data_files:
- split: train
path: CC-MAIN-2023-23/train-*
- config_name: CC-MAIN-2023-40
data_files:
- split: train
path: CC-MAIN-2023-40/train-*
- config_name: CC-MAIN-2023-50
data_files:
- split: train
path: CC-MAIN-2023-50/train-*
---
<p align="center">
<h1 align="center">🐳 OmniCorpus: A Unified Multimodal Corpus of 10 Billion-Level Images Interleaved with Text</h1>
</p>
> ⭐️ **NOTE:** Several parquet files were marked unsafe (viruses) by official scaning of hf, while they are reported safe by ClamAV and Virustotal.
> We found [many false positive cases](https://discuss.huggingface.co/u/mcpotato/summary) of the hf automatic scanning in hf discussions and raise [one discussion](https://discuss.huggingface.co/t/one-parquet-file-of-my-dataset-was-marked-unsafe/113745) to ask for a re-scanning.
This is the repository of OmniCorpus-CC, which contains 988 million image-text interleaved documents collected from [Common Crawl](https://commoncrawl.org/).
- Repository: https://github.com/OpenGVLab/OmniCorpus
- Paper (ICLR 2025 Spotlight): https://arxiv.org/abs/2406.08418
OmniCorpus dataset is a large-scale image-text interleaved dataset, which pushes the boundaries of scale and diversity by encompassing **8.6 billion images** interleaved with **1,696 text tokens** from diverse sources, significantly surpassing previous datasets.
This dataset demonstrates several advantages over its counterparts:
1. **Larger data scale:** Our dataset is 1.7 times larger in images and 12.5 times larger in texts compared to the previously largest multimodal dataset, LAION-5B, while maintaining excellent data quality.
2. **Richer data diversity:** Drawing from a broader range of data sources, our dataset is more diverse than other image-text interleaved datasets. It includes bilingual multimodal data in both Chinese and English, and encompasses text-centric and vision-centric documents extracted from common websites and video platforms.
3. **More flexible format:** The streaming data format of our dataset offers exceptional flexibility, allowing adaptation to various data structures, including pure text corpora, image-text pairs, and interleaved data formats.
<img width="578" alt="image" src="https://github.com/OpenGVLab/OmniCorpus/assets/47669167/641a6427-ba50-41e6-8634-8810113fd803">
The OmniCorpus contains three sections:
- **OmniCorpus-CC**: processed from dumps in Common Crawl from 2013 to Nov./Dec. 2023.
- **OmniCorpus-CW**: sourced from Chinese internet resources, will be availiable in [OpenDataLab](https://opendatalab.com/) platform.
- **OmniCorpus-YT**: samples Youtube video frames as images and collects subtitles as texts.
Code for pre-training, evaluating, main body extracting, and filtering have been released in the official [repository](https://github.com/OpenGVLab/OmniCorpus). A pre-trained model is availiable [here](https://huggingface.co/Qingyun/OmniCorpus-InternVL).
# Data Pipeline
Our data pipeline consists of five key stages: main body extraction, preliminary text filtering, document deduplication, image downloading \& filtering, and detailed text filtering. Each stage efficiently reduces the dataset to retain only high-quality data.
Please refer to our paper for more details about the data pipeline.
<img width="723" alt="image" src="https://github.com/OpenGVLab/OmniCorpus/assets/47669167/a6de8928-58fb-4ff4-8ef9-4bd90e9ada5f">
# Usages
The image-text interleaved documents are recommanded for the following usages:
- Pre-training multimodal large language model (MLLM): Recent MLLMs (such as Flamingo series, EMU series, IDEFICS series, MM1, Cambrian-1, and xGen-MM) have shown that image-text interleaved data aids multimodal in-context learning and maintains the capabilities of large language models during multimodal fine-tuning.
- Long text-image retrieval: We provide image-text similarities calculated with CLIP, which can convert the documents to image-text retrieval dataset with longer text. A retrieval model pre-trained on such data can retrieval images based on longer text, which can be used for multimodal RAG, converting pure text to multimodal sample, etc.
- Source for futher dataset research: Our data is large-scale, which can serve as the source for researches for data curation strategies. We provide many useful attributes as metadata for each document, which can enrich the filtering strategy and reduce the cost.
- ......
# Data Format
Following common practices, the data is organized into Parquet file format.
You might encounter errors when using `pandas.read_parquet` (because the data structure contains nested elements). We recommend using fastparquet to load the parquet files.
```Python
import fastparquet
df = fastparquet.ParquetFile(parquet_file_path).to_pandas()
# You can also use iter_batches
parquet_file = pq.ParquetFile(filepath)
for batch in parquet_file.iter_batches():
df = batch.to_pandas()
```
You can convert the i-th document and convert it into a dictionary.
```Python
doc_dict = df.iloc[i].to_dict()
```
The document format is as follow:
```json
{
'images': [
<str: image_1_url>,
None,
<str: image_2_url>,
None,
],
'texts': [
None,
<str: text_paragraph_1_content>
None,
<str: text_paragraph_2_content>,
]
'metadata': [
<dict: image_1_metadata>,
None,
<dict: image_2_metadata>,
None
],
'general_metadata': {
"url": <str: document url>,
"id": <str: document id>,
"domain": <list[str]: domains extracted from document url>,
"fluency_prob": <float: the probability of fluency>,
"non_advertisement_prob": <float: the probability of non-advertisement>,
"porn_prob": <float: the probability of porn content>,
"politics_prob": <float: the probability of politics content>,
"toxic_prob": <float: the probability of toxic content>,
}
}
```
Each image metadata is as follow:
```json
{
"img_url_sha": <str: sha code of image url>,
"width": <int: image width>,
"height": <int: image height>,
"bytes": <int: byte number of the image file>,
"d_hash": <str: d_hash code of the image, used for image deduplication>,
"p_hash": <str: p_hash code of the image, used for image deduplication>,
"d_hash_dup_count": <int: duplicated times detected by d_hash code>,
"p_hash_dup_count": <int: duplicated times detected by p_hash code>,
"aesthetic prob": <float: aesthetic probility>,
"unsafe prob": <float: NSFW probility>,
}
```
# License and Terms of Use
The OmniCorpus dataset is distributed under [the CC BY 4.0 License](https://creativecommons.org/licenses/by/4.0/). The open-source code is released under the [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0).
The Terms of Use (ToUs) have been developed based on widely accepted standards. By accessing or using this dataset, users acknowledge their responsibility to comply with all relevant legal, regulatory, and ethical standards.
- All users, whether from academia or industry, must comply with the ToUs outlined in the CC BY 4.0 License.
- Any derived datasets or models must acknowledge the use of the OmniCorpus dataset to maintain transparency.
- The OmniCorpus must not be used in any project involving sensitive content or harmful outcomes, including but not limited to political manipulation, hate speech generation, misinformation propagation, or tasks that perpetuate harmful stereotypes or biases.
- The use of this dataset in any manner that violates rights, such as copyright infringement, privacy breaches, or misuse of sensitive information, is strictly prohibited.
- While we do not enforce jurisdiction-specific terms, we strongly recommend that users ensure compliance with applicable local laws and regulations.
- The use of specific subset must comply with the ToUs of the primary source. Specifically, the use of OmniCorpus-CC, OmniCorpus-CW, and OmniCorpus-YT must comply with [the Common Crawl ToUs](https://commoncrawl.org/terms-of-use), the [regulations](https://www.gov.cn/zhengce/content/202409/content\_6977766.htm) on the security management of Internet data in China, and [YouTube’s ToUs](https://www.youtube.com/terms), respectively.
- These ToUs do not supersede the ToUs of the original content sources. Users must ensure that any use of the dataset’s content complies with the original ToUs and the rights of the data subjects.
# Citation
```
@inproceedings{li2024omnicorpus,
title={OmniCorpus: A Unified Multimodal Corpus of 10 Billion-Level Images Interleaved with Text},
author={Li, Qingyun and Chen, Zhe and Wang, Weiyun and Wang, Wenhai and Ye, Shenglong and Jin, Zhenjiang and others},
booktitle={The Thirteenth International Conference on Learning Representations},
year={2025}
}
```
|
haonan-li/cmmlu | haonan-li | "2023-07-13T10:19:29Z" | 14,504 | 66 | [
"task_categories:multiple-choice",
"task_categories:question-answering",
"language:zh",
"license:cc-by-nc-4.0",
"size_categories:10K<n<100K",
"modality:text",
"library:datasets",
"library:mlcroissant",
"arxiv:2306.09212",
"region:us",
"chinese",
"llm",
"evaluation"
] | [
"multiple-choice",
"question-answering"
] | "2023-06-25T16:37:44Z" | ---
license: cc-by-nc-4.0
task_categories:
- multiple-choice
- question-answering
language:
- zh
tags:
- chinese
- llm
- evaluation
pretty_name: CMMLU
size_categories:
- 10K<n<100K
---
# CMMLU: Measuring massive multitask language understanding in Chinese
- **Homepage:** [https://github.com/haonan-li/CMMLU](https://github.com/haonan-li/CMMLU)
- **Repository:** [https://huggingface.co/datasets/haonan-li/cmmlu](https://huggingface.co/datasets/haonan-li/cmmlu)
- **Paper:** [CMMLU: Measuring Chinese Massive Multitask Language Understanding](https://arxiv.org/abs/2306.09212).
## Table of Contents
- [Introduction](#introduction)
- [Leaderboard](#leaderboard)
- [Data](#data)
- [Citation](#citation)
- [License](#license)
## Introduction
CMMLU is a comprehensive Chinese assessment suite specifically designed to evaluate the advanced knowledge and reasoning abilities of LLMs within the Chinese language and cultural context.
CMMLU covers a wide range of subjects, comprising 67 topics that span from elementary to advanced professional levels. It includes subjects that require computational expertise, such as physics and mathematics, as well as disciplines within humanities and social sciences.
Many of these tasks are not easily translatable from other languages due to their specific contextual nuances and wording.
Furthermore, numerous tasks within CMMLU have answers that are specific to China and may not be universally applicable or considered correct in other regions or languages.
## Leaderboard
Latest leaderboard is in our [github](https://github.com/haonan-li/CMMLU).
## Data
We provide development and test dataset for each of 67 subjects, with 5 questions in development set and 100+ quesitons in test set.
Each question in the dataset is a multiple-choice questions with 4 choices and only one choice as the correct answer.
Here are two examples:
```
题目:同一物种的两类细胞各产生一种分泌蛋白,组成这两种蛋白质的各种氨基酸含量相同,但排列顺序不同。其原因是参与这两种蛋白质合成的:
A. tRNA种类不同
B. 同一密码子所决定的氨基酸不同
C. mRNA碱基序列不同
D. 核糖体成分不同
答案是:C
```
```
题目:某种植物病毒V是通过稻飞虱吸食水稻汁液在水稻间传播的。稻田中青蛙数量的增加可减少该病毒在水稻间的传播。下列叙述正确的是:
A. 青蛙与稻飞虱是捕食关系
B. 水稻和病毒V是互利共生关系
C. 病毒V与青蛙是寄生关系
D. 水稻与青蛙是竞争关系
答案是:
```
#### Load data
```python
from datasets import load_dataset
cmmlu=load_dataset(r"haonan-li/cmmlu", 'agronomy')
print(cmmlu['test'][0])
```
#### Load all data at once
```python
task_list = ['agronomy', 'anatomy', 'ancient_chinese', 'arts', 'astronomy', 'business_ethics', 'chinese_civil_service_exam', 'chinese_driving_rule', 'chinese_food_culture', 'chinese_foreign_policy', 'chinese_history', 'chinese_literature',
'chinese_teacher_qualification', 'clinical_knowledge', 'college_actuarial_science', 'college_education', 'college_engineering_hydrology', 'college_law', 'college_mathematics', 'college_medical_statistics', 'college_medicine', 'computer_science',
'computer_security', 'conceptual_physics', 'construction_project_management', 'economics', 'education', 'electrical_engineering', 'elementary_chinese', 'elementary_commonsense', 'elementary_information_and_technology', 'elementary_mathematics',
'ethnology', 'food_science', 'genetics', 'global_facts', 'high_school_biology', 'high_school_chemistry', 'high_school_geography', 'high_school_mathematics', 'high_school_physics', 'high_school_politics', 'human_sexuality',
'international_law', 'journalism', 'jurisprudence', 'legal_and_moral_basis', 'logical', 'machine_learning', 'management', 'marketing', 'marxist_theory', 'modern_chinese', 'nutrition', 'philosophy', 'professional_accounting', 'professional_law',
'professional_medicine', 'professional_psychology', 'public_relations', 'security_study', 'sociology', 'sports_science', 'traditional_chinese_medicine', 'virology', 'world_history', 'world_religions']
from datasets import load_dataset
cmmlu = {k: load_dataset(r"haonan-li/cmmlu", k) for k in task_list}
```
## Citation
```
@misc{li2023cmmlu,
title={CMMLU: Measuring massive multitask language understanding in Chinese},
author={Haonan Li and Yixuan Zhang and Fajri Koto and Yifei Yang and Hai Zhao and Yeyun Gong and Nan Duan and Timothy Baldwin},
year={2023},
eprint={2306.09212},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
```
## License
The CMMLU dataset is licensed under a
[Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License](http://creativecommons.org/licenses/by-nc-sa/4.0/).
|
desmondlzy/bigs-data | desmondlzy | "2025-03-11T21:09:56Z" | 14,468 | 0 | [
"license:cc-by-sa-4.0",
"modality:image",
"arxiv:2408.13370",
"region:us",
"olat"
] | null | "2025-03-10T08:45:14Z" | ---
license: cc-by-sa-4.0
tags:
- olat
pretty_name: 'Dataset for BiGS: Bidirectional Primitives for Relightable 3D Gaussian Splatting'
---
# BiGS Dataset
The OLAT dataset used in the paper _BiGS: Bidirectional Primitives for Relightable 3D Gaussian Splatting_ (3DV 2025). Check out our [project page](https://desmondlzy.me/publications/bigs/).
We provide 7 synthetic scenes in the dataset, featuring various complex light transport effects, such as subsurface scattering, fuzzy surfaces, and iridescent reflection.
Each scene (1.8 ~ 3.2 GB) consists of:
- 40 training OLAT conditions (`olat_1` - `olat_40`) with 48 views per light condition;
- 58 test OLAT conditions (`olat_41` - `olat_98`) with 1 view per light condition;
- 1 all-light-on conditions (`olat_all`) with 48 views per light conditions.
Each light condition includes `.exr` images, object masks, transforms with camera poses, light positions and intensities.
Please refer to our [github repo](https://github.com/desmondlzy/bigs) for how to use the dataset provided here to train BiGS,
and our [paper (arxiv)](https://www.arxiv.org/abs/2408.13370) for details of BiGS.
## Citation
If you use our dataset in your research, please consider citing us with the below bibtex entry:
```
@misc{zhenyuan2024bigs,
title={BiGS: Bidirectional Primitives for Relightable 3D Gaussian Splatting},
author={Liu Zhenyuan and Yu Guo and Xinyuan Li and Bernd Bickel and Ran Zhang},
year={2024},
eprint={2408.13370},
url={https://arxiv.org/abs/2408.13370},
}
```
## Acknowledgments
Our synthetic data is generated using [Mitsuba](https://mitsuba.readthedocs.io/en/stable/).
We thank the 3D models' creators:
Keenan Crane for _Spot_;
Stanford Computer Graphics Laboratory for the models _Dragon_ and _Bunny_;
Wenzel Jakob for the model _Mistuba Ball_.
Special thanks to Changxi Zheng for supporting the internship program at Tencent Pixel Lab.
|
kevinjesse/ManyRefactors4C | kevinjesse | "2022-09-25T12:59:34Z" | 14,461 | 0 | [
"license:cc-by-2.0",
"size_categories:10M<n<100M",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2022-09-25T06:28:58Z" | ---
license: cc-by-2.0
---
|
fixie-ai/peoples_speech | fixie-ai | "2024-08-11T17:26:01Z" | 14,320 | 2 | [
"size_categories:1M<n<10M",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | null | "2024-08-05T18:35:01Z" | ---
dataset_info:
- config_name: clean
features:
- name: id
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: duration_ms
dtype: int32
- name: text
dtype: string
- name: continuation
dtype: string
splits:
- name: validation
num_bytes: 2511523987.692
num_examples: 18622
- name: test
num_bytes: 4259695510.794
num_examples: 34898
- name: train
num_bytes: 401646320552.671
num_examples: 1501271
download_size: 398922548670
dataset_size: 408417540051
- config_name: dirty_sa
features:
- name: id
dtype: string
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: duration_ms
dtype: int32
- name: text
dtype: string
- name: continuation
dtype: string
splits:
- name: train
num_bytes: 144432442623.054
num_examples: 548014
- name: validation
num_bytes: 2511524241.692
num_examples: 18622
- name: test
num_bytes: 4259695588.794
num_examples: 34898
download_size: 149491764186
dataset_size: 151203662453.53998
configs:
- config_name: clean
data_files:
- split: validation
path: clean/validation-*
- split: test
path: clean/test-*
- split: train
path: data/train-*
- config_name: dirty_sa
data_files:
- split: train
path: dirty_sa/train-*
- split: validation
path: dirty_sa/validation-*
- split: test
path: dirty_sa/test-*
---
|
Subsets and Splits