File size: 12,998 Bytes
a089b7d 61894ce f04bb8b f7a3f90 f04bb8b f7a3f90 e6c0a60 f7a3f90 61894ce f7a3f90 509379a 61894ce 42ba589 ceb9ee9 42ba589 ceb9ee9 3732534 42ba589 3732534 42ba589 3732534 f7a3f90 61894ce 42ba589 3732534 42ba589 3732534 42ba589 3732534 0bec3a0 6823f82 61894ce 509379a f4aa997 ee02ee6 a089b7d ee02ee6 a089b7d ee02ee6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 |
displayname2datasetname = {
'LibriSpeech-Clean' : 'librispeech_test_clean',
'LibriSpeech-Other' : 'librispeech_test_other',
'CommonVoice-15-EN' : 'common_voice_15_en_test',
'Peoples-Speech' : 'peoples_speech_test',
'GigaSpeech-1' : 'gigaspeech_test',
'Earnings-21' : 'earnings21_test',
'Earnings-22' : 'earnings22_test',
'TED-LIUM-3' : 'tedlium3_test',
'TED-LIUM-3-LongForm' : 'tedlium3_long_form_test',
'AISHELL-ASR-ZH' : 'aishell_asr_zh_test',
'CoVoST2-EN-ID' : 'covost2_en_id_test',
'CoVoST2-EN-ZH' : 'covost2_en_zh_test',
'CoVoST2-EN-TA' : 'covost2_en_ta_test',
'CoVoST2-ID-EN' : 'covost2_id_en_test',
'CoVoST2-ZH-EN' : 'covost2_zh_en_test',
'CoVoST2-TA-EN' : 'covost2_ta_en_test',
'CN-College-Listen-MCQ': 'cn_college_listen_mcq_test',
'DREAM-TTS-MCQ' : 'dream_tts_mcq_test',
'SLUE-P2-SQA5' : 'slue_p2_sqa5_test',
'Public-SG-Speech-QA' : 'public_sg_speech_qa_test',
'Spoken-SQuAD' : 'spoken_squad_test',
'OpenHermes-Audio' : 'openhermes_audio_test',
'ALPACA-Audio' : 'alpaca_audio_test',
'WavCaps' : 'wavcaps_test',
'AudioCaps' : 'audiocaps_test',
'Clotho-AQA' : 'clotho_aqa_test',
'WavCaps-QA' : 'wavcaps_qa_test',
'AudioCaps-QA' : 'audiocaps_qa_test',
'VoxCeleb-Accent' : 'voxceleb_accent_test',
'MNSC-AR-Sentence' : 'imda_ar_sentence',
'MNSC-AR-Dialogue' : 'imda_ar_dialogue',
'VoxCeleb-Gender' : 'voxceleb_gender_test',
'IEMOCAP-Gender' : 'iemocap_gender_test',
'IEMOCAP-Emotion' : 'iemocap_emotion_test',
'MELD-Sentiment' : 'meld_sentiment_test',
'MELD-Emotion' : 'meld_emotion_test',
'MuChoMusic' : 'muchomusic_test',
'MNSC-PART1-ASR' : 'imda_part1_asr_test',
'MNSC-PART2-ASR' : 'imda_part2_asr_test',
'MNSC-PART3-ASR' : 'imda_part3_30s_asr_test',
'MNSC-PART4-ASR' : 'imda_part4_30s_asr_test',
'MNSC-PART5-ASR' : 'imda_part5_30s_asr_test',
'MNSC-PART6-ASR' : 'imda_part6_30s_asr_test',
'MNSC-PART3-SQA' : 'imda_part3_30s_sqa_human_test',
'MNSC-PART4-SQA' : 'imda_part4_30s_sqa_human_test',
'MNSC-PART5-SQA' : 'imda_part5_30s_sqa_human_test',
'MNSC-PART6-SQA' : 'imda_part6_30s_sqa_human_test',
'MNSC-PART3-SDS' : 'imda_part3_30s_ds_human_test',
'MNSC-PART4-SDS' : 'imda_part4_30s_ds_human_test',
'MNSC-PART5-SDS' : 'imda_part5_30s_ds_human_test',
'MNSC-PART6-SDS' : 'imda_part6_30s_ds_human_test',
'CNA' : 'cna_test',
'IDPC' : 'idpc_test',
'Parliament' : 'parliament_test',
'UKUS-News' : 'ukusnews_test',
'Mediacorp' : 'mediacorp_test',
'IDPC-Short' : 'idpc_short_test',
'Parliament-Short': 'parliament_short_test',
'UKUS-News-Short' : 'ukusnews_short_test',
'Mediacorp-Short' : 'mediacorp_short_test',
'YouTube ASR: English Singapore Content': 'ytb_asr_batch1',
'YouTube ASR: English with Strong Emotion': 'ytb_asr_batch2',
'YouTube ASR: Malay English Prompt': 'ytb_asr_batch3_ms',
'YouTube ASR: Malay with Malay Prompt': 'ytb_asr_batch3_ms_ms_prompt',
'SEAME-Dev-Mandarin' : 'seame_dev_man',
'SEAME-Dev-Singlish' : 'seame_dev_sge',
'YouTube SQA: English with Singapore Content': 'ytb_sqa_batch1',
'YouTube SDS: English with Singapore Content': 'ytb_sds_batch1',
'YouTube PQA: English with Singapore Content': 'ytb_pqa_batch1',
}
datasetname2diaplayname = {datasetname: displayname for displayname, datasetname in displayname2datasetname.items()}
dataset_diaplay_information = {
'LibriSpeech-Clean' : 'A clean, high-quality testset of the LibriSpeech dataset, used for ASR testing.',
'LibriSpeech-Other' : 'A more challenging, noisier testset of the LibriSpeech dataset for ASR testing.',
'CommonVoice-15-EN' : 'Test set from the Common Voice project, which is a crowd-sourced, multilingual speech dataset.',
'Peoples-Speech' : 'A large-scale, open-source speech recognition dataset, with diverse accents and domains.',
'GigaSpeech-1' : 'A large-scale ASR dataset with diverse audio sources like podcasts, interviews, etc.',
'Earnings-21' : 'ASR test dataset focused on earnings calls from 2021, with professional speech and financial jargon.',
'Earnings-22' : 'Similar to Earnings21, but covering earnings calls from 2022.',
'TED-LIUM-3' : 'A test set derived from TED talks, covering diverse speakers and topics.',
'TED-LIUM-3-LongForm' : 'A longer version of the TED-LIUM dataset, containing extended audio samples. This poses challenges to existing fusion methods in handling long audios. However, it provides benchmark for future development.',
'AISHELL-ASR-ZH' : 'ASR test dataset for Mandarin Chinese, based on the Aishell dataset.',
'CoVoST2-EN-ID' : 'CoVoST 2 dataset for speech translation from English to Indonesian.',
'CoVoST2-EN-ZH' : 'CoVoST 2 dataset for speech translation from English to Chinese.',
'CoVoST2-EN-TA' : 'CoVoST 2 dataset for speech translation from English to Tamil.',
'CoVoST2-ID-EN' : 'CoVoST 2 dataset for speech translation from Indonesian to English.',
'CoVoST2-ZH-EN' : 'CoVoST 2 dataset for speech translation from Chinese to English.',
'CoVoST2-TA-EN' : 'CoVoST 2 dataset for speech translation from Tamil to English.',
'CN-College-Listen-MCQ': 'Chinese College English Listening Test, with multiple-choice questions.',
'DREAM-TTS-MCQ' : 'DREAM dataset for spoken question-answering, derived from textual data and synthesized speech.',
'SLUE-P2-SQA5' : 'Spoken Language Understanding Evaluation (SLUE) dataset, part 2, focused on QA tasks.',
'Public-SG-Speech-QA' : 'Public dataset for speech-based question answering, gathered from Singapore.',
'Spoken-SQuAD' : 'Spoken SQuAD dataset, based on the textual SQuAD dataset, converted into audio.',
'OpenHermes-Audio' : 'Test set for spoken instructions. Synthesized from the OpenHermes dataset.',
'ALPACA-Audio' : 'Spoken version of the ALPACA dataset, used for evaluating instruction following in audio.',
'WavCaps' : 'WavCaps is a dataset for testing audio captioning, where models generate textual descriptions of audio clips.',
'AudioCaps' : 'AudioCaps dataset, used for generating captions from general audio events.',
'Clotho-AQA' : 'Clotho dataset adapted for audio-based question answering, containing audio clips and questions.',
'WavCaps-QA' : 'Question-answering test dataset derived from WavCaps, focusing on audio content.',
'AudioCaps-QA' : 'AudioCaps adapted for question-answering tasks, using audio events as input for Q&A.',
'VoxCeleb-Accent' : 'Test dataset for accent recognition, based on VoxCeleb, a large speaker identification dataset.',
'MNSC-AR-Sentence' : 'Accent recognition based on the IMDA NSC dataset, focusing on sentence-level accents.',
'MNSC-AR-Dialogue' : 'Accent recognition based on the IMDA NSC dataset, focusing on dialogue-level accents.',
'VoxCeleb-Gender': 'Test dataset for gender classification, also derived from VoxCeleb.',
'IEMOCAP-Gender' : 'Gender classification based on the IEMOCAP dataset.',
'IEMOCAP-Emotion': 'Emotion recognition test data from the IEMOCAP dataset, focusing on identifying emotions in speech.',
'MELD-Sentiment' : 'Sentiment recognition from speech using the MELD dataset, classifying positive, negative, or neutral sentiments.',
'MELD-Emotion' : 'Emotion classification in speech using MELD, detecting specific emotions like happiness, anger, etc.',
'MuChoMusic' : 'Test dataset for music understanding, from paper: MuChoMusic: Evaluating Music Understanding in Multimodal Audio-Language Models.',
'MNSC-PART1-ASR' : 'Speech recognition test data from the IMDA NSC project, Part 1.',
'MNSC-PART2-ASR' : 'Speech recognition test data from the IMDA NSC project, Part 2.',
'MNSC-PART3-ASR' : 'Speech recognition test data from the IMDA NSC project, Part 3.',
'MNSC-PART4-ASR' : 'Speech recognition test data from the IMDA NSC project, Part 4.',
'MNSC-PART5-ASR' : 'Speech recognition test data from the IMDA NSC project, Part 5.',
'MNSC-PART6-ASR' : 'Speech recognition test data from the IMDA NSC project, Part 6.',
'MNSC-PART3-SQA' : 'Multitak National Speech Corpus (MNSC) dataset, Question answering task, Part 3.',
'MNSC-PART4-SQA' : 'Multitak National Speech Corpus (MNSC) dataset, Question answering task, Part 4.',
'MNSC-PART5-SQA' : 'Multitak National Speech Corpus (MNSC) dataset, Question answering task, Part 5.',
'MNSC-PART6-SQA' : 'Multitak National Speech Corpus (MNSC) dataset, Question answering task, Part 6.',
'MNSC-PART3-SDS' : 'Multitak National Speech Corpus (MNSC) dataset, dialogue summarization task, Part 3.',
'MNSC-PART4-SDS' : 'Multitak National Speech Corpus (MNSC) dataset, dialogue summarization task, Part 4.',
'MNSC-PART5-SDS' : 'Multitak National Speech Corpus (MNSC) dataset, dialogue summarization task, Part 5.',
'MNSC-PART6-SDS' : 'Multitak National Speech Corpus (MNSC) dataset, dialogue summarization task, Part 6.',
'CNA' : 'Under Development',
'IDPC' : 'Under Development',
'Parliament' : 'Under Development',
'UKUS-News' : 'Under Development',
'Mediacorp' : 'Under Development',
'IDPC-Short' : 'Under Development',
'Parliament-Short': 'Under Development',
'UKUS-News-Short' : 'Under Development',
'Mediacorp-Short' : 'Under Development',
'YouTube ASR: English Singapore Content' : 'YouTube Evaluation Dataset for ASR Task: \n This dataset contains English and Singlish audio clips, featuring Singapore-related content. \n It includes approximately 2.5 hours of audio, with individual clips ranging from 2 seconds to 30 seconds in length.',
'YouTube ASR: English with Strong Emotion' : '''YouTube Evaluation Dataset for ASR Task: \n
This dataset contains English and some unknown languages audio clips, featuring speech with strong emotional expression. \n
It includes approximately 3.9 hours of audio, with each clip lasting 30 seconds.''',
'YouTube ASR: Malay English Prompt': '''YouTube Evaluation Dataset for ASR Task: \n
This dataset mainly contains Malay and some English audio clips, featuring with English prompts. \n
It includes approximately 2.55 hours of audio, with indicidual clips ranging form 30 seconds to 95 seconds in length.''',
'YouTube ASR: Malay with Malay Prompt': '''YouTube Evaluation Dataset for ASR Task: \n
This dataset use the same audio from *YouTube ASR: Malay English Prompt*, except featuring with Malay prompts. \n
It includes approximately 2.55 hours of audio, with indicidual clips ranging form 30 seconds to 95 seconds in length.''',
'SEAME-Dev-Mandarin' : 'Under Development',
'SEAME-Dev-Singlish' : 'Under Development',
'YouTube SQA: English with Singapore Content': '''YouTube Evaluation Dataset for Speech-QA Task: \n
This dataset use the same audio from *YouTube ASR: English Singapore Content*, featuring Singapore-related content. \n
It includes approximately 2.5 hours of audio, with individual clips ranging from 2 seconds to 30 seconds in length.''',
'YouTube SDS: English with Singapore Content': '''YouTube Evaluation Dataset for Summary Task: \n
This dataset use the same audio from *YouTube ASR: English Singapore Content*, featuring Singapore-related content. \n
It includes approximately 2.5 hours of audio, with individual clips ranging from 2 seconds to 30 seconds in length.''',
'YouTube PQA: English with Singapore Content': '''YouTube Evaluation Dataset for Paralinguistics Task: \n
This dataset use the same audio from *YouTube ASR: English Singapore Content*, featuring Singapore-related content. \n
It includes approximately 2.5 hours of audio, with individual clips ranging from 2 seconds to 30 seconds in length.''',
}
metrics_info = {
'wer' : 'Word Error Rate (WER) - The Lower, the better.',
'llama3_70b_judge_binary': 'Model-as-a-Judge Peformance. Using LLAMA-3-70B. Scale from 0-100. The higher, the better.',
'llama3_70b_judge' : 'Model-as-a-Judge Peformance. Using LLAMA-3-70B. Scale from 0-100. The higher, the better.',
'meteor' : 'METEOR Score. The higher, the better.',
'bleu' : 'BLEU Score. The higher, the better.',
}
|