zhuohan-7 commited on
Commit
ec09f03
·
1 Parent(s): de57059

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +4 -0
  2. examples/AC/AudioCaps-Test/sample_0.wav +0 -0
  3. examples/AC/AudioCaps-Test/sample_1.wav +0 -0
  4. examples/AC/AudioCaps-Test/sample_2.wav +0 -0
  5. examples/AC/AudioCaps-Test/state.json +1 -1
  6. examples/AC/WavCaps-Test/sample_0.wav +0 -0
  7. examples/AC/WavCaps-Test/sample_1.wav +0 -0
  8. examples/AC/WavCaps-Test/sample_2.wav +0 -0
  9. examples/AC/WavCaps-Test/state.json +1 -1
  10. examples/AQA/AudioCaps-QA-Test/dataset_info.json +18 -0
  11. examples/AQA/AudioCaps-QA-Test/sample_0.wav +0 -0
  12. examples/AQA/AudioCaps-QA-Test/sample_1.wav +0 -0
  13. examples/AQA/AudioCaps-QA-Test/sample_2.wav +0 -0
  14. examples/AQA/AudioCaps-QA-Test/state.json +2 -1
  15. examples/AQA/Clotho-AQA-Test/dataset_info.json +1 -91
  16. examples/AQA/Clotho-AQA-Test/sample_0.wav +0 -0
  17. examples/AQA/Clotho-AQA-Test/sample_1.wav +0 -0
  18. examples/AQA/Clotho-AQA-Test/sample_2.wav +0 -0
  19. examples/AQA/Clotho-AQA-Test/state.json +2 -7
  20. examples/AQA/WavCaps-QA-Test/dataset_info.json +18 -0
  21. examples/AQA/WavCaps-QA-Test/sample_0.wav +0 -0
  22. examples/AQA/WavCaps-QA-Test/sample_1.wav +0 -0
  23. examples/AQA/WavCaps-QA-Test/sample_2.wav +0 -0
  24. examples/AQA/WavCaps-QA-Test/state.json +2 -1
  25. examples/AR/VoxCeleb-Accent-Test/dataset_info.json +18 -0
  26. examples/AR/VoxCeleb-Accent-Test/sample_0.wav +0 -0
  27. examples/AR/VoxCeleb-Accent-Test/sample_1.wav +0 -0
  28. examples/AR/VoxCeleb-Accent-Test/sample_2.wav +0 -0
  29. examples/AR/VoxCeleb-Accent-Test/state.json +2 -1
  30. examples/ASR/Common-Voice-15-En-Test/dataset_info.json +3 -3
  31. examples/ASR/Common-Voice-15-En-Test/sample_0.wav +0 -0
  32. examples/ASR/Common-Voice-15-En-Test/sample_1.wav +0 -0
  33. examples/ASR/Common-Voice-15-En-Test/sample_2.wav +0 -0
  34. examples/ASR/Common-Voice-15-En-Test/state.json +1 -1
  35. examples/ASR/Earnings21-Test/state.json +1 -1
  36. examples/ASR/Earnings22-Test/state.json +1 -1
  37. examples/ASR/GigaSpeech-Test/sample_0.wav +0 -0
  38. examples/ASR/GigaSpeech-Test/sample_1.wav +0 -0
  39. examples/ASR/GigaSpeech-Test/sample_2.wav +0 -0
  40. examples/ASR/GigaSpeech-Test/state.json +1 -1
  41. examples/ASR/IMDA-Part1-ASR-Test/sample_0.wav +0 -0
  42. examples/ASR/IMDA-Part1-ASR-Test/sample_1.wav +0 -0
  43. examples/ASR/IMDA-Part1-ASR-Test/sample_2.wav +0 -0
  44. examples/ASR/IMDA-Part1-ASR-Test/state.json +1 -1
  45. examples/ASR/IMDA-Part2-ASR-Test/sample_0.wav +0 -0
  46. examples/ASR/IMDA-Part2-ASR-Test/sample_1.wav +0 -0
  47. examples/ASR/IMDA-Part2-ASR-Test/sample_2.wav +0 -0
  48. examples/ASR/IMDA-Part2-ASR-Test/state.json +1 -1
  49. examples/ASR/LibriSpeech-Test-Clean/sample_0.wav +0 -0
  50. examples/ASR/LibriSpeech-Test-Clean/sample_1.wav +0 -0
.gitattributes CHANGED
@@ -52,3 +52,7 @@ examples/SQA/Spoken-Squad-v1/sample_0.wav filter=lfs diff=lfs merge=lfs -text
52
  examples/SQA/Spoken-Squad-v1/sample_1.wav filter=lfs diff=lfs merge=lfs -text
53
  examples/SQA/Spoken-Squad-v1/sample_2.wav filter=lfs diff=lfs merge=lfs -text
54
  examples/SQA/DREAM-TTS-MCQ-Test/sample_2.wav filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
52
  examples/SQA/Spoken-Squad-v1/sample_1.wav filter=lfs diff=lfs merge=lfs -text
53
  examples/SQA/Spoken-Squad-v1/sample_2.wav filter=lfs diff=lfs merge=lfs -text
54
  examples/SQA/DREAM-TTS-MCQ-Test/sample_2.wav filter=lfs diff=lfs merge=lfs -text
55
+ examples/ASR/Tedlium3-Long-form-Test/sample_0.wav filter=lfs diff=lfs merge=lfs -text
56
+ examples/ASR/Tedlium3-Long-form-Test/sample_1.wav filter=lfs diff=lfs merge=lfs -text
57
+ examples/ASR/Tedlium3-Long-form-Test/sample_2.wav filter=lfs diff=lfs merge=lfs -text
58
+ examples/SQA/Public-SG-Speech-QA-Test/sample_2.wav filter=lfs diff=lfs merge=lfs -text
examples/AC/AudioCaps-Test/sample_0.wav CHANGED
Binary files a/examples/AC/AudioCaps-Test/sample_0.wav and b/examples/AC/AudioCaps-Test/sample_0.wav differ
 
examples/AC/AudioCaps-Test/sample_1.wav CHANGED
Binary files a/examples/AC/AudioCaps-Test/sample_1.wav and b/examples/AC/AudioCaps-Test/sample_1.wav differ
 
examples/AC/AudioCaps-Test/sample_2.wav CHANGED
Binary files a/examples/AC/AudioCaps-Test/sample_2.wav and b/examples/AC/AudioCaps-Test/sample_2.wav differ
 
examples/AC/AudioCaps-Test/state.json CHANGED
@@ -4,7 +4,7 @@
4
  "filename": "data-00000-of-00001.arrow"
5
  }
6
  ],
7
- "_fingerprint": "e654a4081bc1365b",
8
  "_format_columns": [
9
  "context",
10
  "instruction",
 
4
  "filename": "data-00000-of-00001.arrow"
5
  }
6
  ],
7
+ "_fingerprint": "3729f6c56764c342",
8
  "_format_columns": [
9
  "context",
10
  "instruction",
examples/AC/WavCaps-Test/sample_0.wav CHANGED
Binary files a/examples/AC/WavCaps-Test/sample_0.wav and b/examples/AC/WavCaps-Test/sample_0.wav differ
 
examples/AC/WavCaps-Test/sample_1.wav CHANGED
Binary files a/examples/AC/WavCaps-Test/sample_1.wav and b/examples/AC/WavCaps-Test/sample_1.wav differ
 
examples/AC/WavCaps-Test/sample_2.wav CHANGED
Binary files a/examples/AC/WavCaps-Test/sample_2.wav and b/examples/AC/WavCaps-Test/sample_2.wav differ
 
examples/AC/WavCaps-Test/state.json CHANGED
@@ -4,7 +4,7 @@
4
  "filename": "data-00000-of-00001.arrow"
5
  }
6
  ],
7
- "_fingerprint": "ce408e4cfa3eec8a",
8
  "_format_columns": [
9
  "context",
10
  "instruction",
 
4
  "filename": "data-00000-of-00001.arrow"
5
  }
6
  ],
7
+ "_fingerprint": "6c3c2a5f2db349d8",
8
  "_format_columns": [
9
  "context",
10
  "instruction",
examples/AQA/AudioCaps-QA-Test/dataset_info.json CHANGED
@@ -122,6 +122,24 @@
122
  "_type": "Value"
123
  }
124
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
  "qwen_audio_chat": {
126
  "answer": {
127
  "dtype": "string",
 
122
  "_type": "Value"
123
  }
124
  },
125
+ "mowe_audio": {
126
+ "answer": {
127
+ "dtype": "string",
128
+ "_type": "Value"
129
+ },
130
+ "model_prediction": {
131
+ "dtype": "string",
132
+ "_type": "Value"
133
+ },
134
+ "task_type": {
135
+ "dtype": "string",
136
+ "_type": "Value"
137
+ },
138
+ "text": {
139
+ "dtype": "string",
140
+ "_type": "Value"
141
+ }
142
+ },
143
  "qwen_audio_chat": {
144
  "answer": {
145
  "dtype": "string",
examples/AQA/AudioCaps-QA-Test/sample_0.wav CHANGED
Binary files a/examples/AQA/AudioCaps-QA-Test/sample_0.wav and b/examples/AQA/AudioCaps-QA-Test/sample_0.wav differ
 
examples/AQA/AudioCaps-QA-Test/sample_1.wav CHANGED
Binary files a/examples/AQA/AudioCaps-QA-Test/sample_1.wav and b/examples/AQA/AudioCaps-QA-Test/sample_1.wav differ
 
examples/AQA/AudioCaps-QA-Test/sample_2.wav CHANGED
Binary files a/examples/AQA/AudioCaps-QA-Test/sample_2.wav and b/examples/AQA/AudioCaps-QA-Test/sample_2.wav differ
 
examples/AQA/AudioCaps-QA-Test/state.json CHANGED
@@ -4,7 +4,7 @@
4
  "filename": "data-00000-of-00001.arrow"
5
  }
6
  ],
7
- "_fingerprint": "b4d0bc420173574a",
8
  "_format_columns": [
9
  "context",
10
  "instruction",
@@ -14,6 +14,7 @@
14
  "wavllm_fairseq",
15
  "Qwen2-Audio-7B-Instruct",
16
  "whisper_large_v3_with_llama_3_8b_instruct",
 
17
  "qwen_audio_chat"
18
  ],
19
  "_format_kwargs": {},
 
4
  "filename": "data-00000-of-00001.arrow"
5
  }
6
  ],
7
+ "_fingerprint": "026dfac674d9ef77",
8
  "_format_columns": [
9
  "context",
10
  "instruction",
 
14
  "wavllm_fairseq",
15
  "Qwen2-Audio-7B-Instruct",
16
  "whisper_large_v3_with_llama_3_8b_instruct",
17
+ "mowe_audio",
18
  "qwen_audio_chat"
19
  ],
20
  "_format_kwargs": {},
examples/AQA/Clotho-AQA-Test/dataset_info.json CHANGED
@@ -32,97 +32,7 @@
32
  "_type": "Value"
33
  }
34
  },
35
- "other_attributes": {},
36
- "salmonn_7b": {
37
- "answer": {
38
- "dtype": "string",
39
- "_type": "Value"
40
- },
41
- "model_prediction": {
42
- "dtype": "string",
43
- "_type": "Value"
44
- },
45
- "task_type": {
46
- "dtype": "string",
47
- "_type": "Value"
48
- },
49
- "text": {
50
- "dtype": "string",
51
- "_type": "Value"
52
- }
53
- },
54
- "wavllm_fairseq": {
55
- "answer": {
56
- "dtype": "string",
57
- "_type": "Value"
58
- },
59
- "model_prediction": {
60
- "dtype": "string",
61
- "_type": "Value"
62
- },
63
- "task_type": {
64
- "dtype": "string",
65
- "_type": "Value"
66
- },
67
- "text": {
68
- "dtype": "string",
69
- "_type": "Value"
70
- }
71
- },
72
- "Qwen2-Audio-7B-Instruct": {
73
- "answer": {
74
- "dtype": "string",
75
- "_type": "Value"
76
- },
77
- "model_prediction": {
78
- "dtype": "string",
79
- "_type": "Value"
80
- },
81
- "task_type": {
82
- "dtype": "string",
83
- "_type": "Value"
84
- },
85
- "text": {
86
- "dtype": "string",
87
- "_type": "Value"
88
- }
89
- },
90
- "whisper_large_v3_with_llama_3_8b_instruct": {
91
- "answer": {
92
- "dtype": "string",
93
- "_type": "Value"
94
- },
95
- "model_prediction": {
96
- "dtype": "string",
97
- "_type": "Value"
98
- },
99
- "task_type": {
100
- "dtype": "string",
101
- "_type": "Value"
102
- },
103
- "text": {
104
- "dtype": "string",
105
- "_type": "Value"
106
- }
107
- },
108
- "qwen_audio_chat": {
109
- "answer": {
110
- "dtype": "string",
111
- "_type": "Value"
112
- },
113
- "model_prediction": {
114
- "dtype": "string",
115
- "_type": "Value"
116
- },
117
- "task_type": {
118
- "dtype": "string",
119
- "_type": "Value"
120
- },
121
- "text": {
122
- "dtype": "string",
123
- "_type": "Value"
124
- }
125
- }
126
  },
127
  "homepage": "",
128
  "license": ""
 
32
  "_type": "Value"
33
  }
34
  },
35
+ "other_attributes": {}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  },
37
  "homepage": "",
38
  "license": ""
examples/AQA/Clotho-AQA-Test/sample_0.wav CHANGED
Binary files a/examples/AQA/Clotho-AQA-Test/sample_0.wav and b/examples/AQA/Clotho-AQA-Test/sample_0.wav differ
 
examples/AQA/Clotho-AQA-Test/sample_1.wav CHANGED
Binary files a/examples/AQA/Clotho-AQA-Test/sample_1.wav and b/examples/AQA/Clotho-AQA-Test/sample_1.wav differ
 
examples/AQA/Clotho-AQA-Test/sample_2.wav CHANGED
Binary files a/examples/AQA/Clotho-AQA-Test/sample_2.wav and b/examples/AQA/Clotho-AQA-Test/sample_2.wav differ
 
examples/AQA/Clotho-AQA-Test/state.json CHANGED
@@ -4,17 +4,12 @@
4
  "filename": "data-00000-of-00001.arrow"
5
  }
6
  ],
7
- "_fingerprint": "3f05c76553bf311d",
8
  "_format_columns": [
9
  "context",
10
  "instruction",
11
  "answer",
12
- "other_attributes",
13
- "salmonn_7b",
14
- "wavllm_fairseq",
15
- "Qwen2-Audio-7B-Instruct",
16
- "whisper_large_v3_with_llama_3_8b_instruct",
17
- "qwen_audio_chat"
18
  ],
19
  "_format_kwargs": {},
20
  "_format_type": null,
 
4
  "filename": "data-00000-of-00001.arrow"
5
  }
6
  ],
7
+ "_fingerprint": "515a1722077187bd",
8
  "_format_columns": [
9
  "context",
10
  "instruction",
11
  "answer",
12
+ "other_attributes"
 
 
 
 
 
13
  ],
14
  "_format_kwargs": {},
15
  "_format_type": null,
examples/AQA/WavCaps-QA-Test/dataset_info.json CHANGED
@@ -118,6 +118,24 @@
118
  "_type": "Value"
119
  }
120
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
  "qwen_audio_chat": {
122
  "answer": {
123
  "dtype": "string",
 
118
  "_type": "Value"
119
  }
120
  },
121
+ "mowe_audio": {
122
+ "answer": {
123
+ "dtype": "string",
124
+ "_type": "Value"
125
+ },
126
+ "model_prediction": {
127
+ "dtype": "string",
128
+ "_type": "Value"
129
+ },
130
+ "task_type": {
131
+ "dtype": "string",
132
+ "_type": "Value"
133
+ },
134
+ "text": {
135
+ "dtype": "string",
136
+ "_type": "Value"
137
+ }
138
+ },
139
  "qwen_audio_chat": {
140
  "answer": {
141
  "dtype": "string",
examples/AQA/WavCaps-QA-Test/sample_0.wav CHANGED
Binary files a/examples/AQA/WavCaps-QA-Test/sample_0.wav and b/examples/AQA/WavCaps-QA-Test/sample_0.wav differ
 
examples/AQA/WavCaps-QA-Test/sample_1.wav CHANGED
Binary files a/examples/AQA/WavCaps-QA-Test/sample_1.wav and b/examples/AQA/WavCaps-QA-Test/sample_1.wav differ
 
examples/AQA/WavCaps-QA-Test/sample_2.wav CHANGED
Binary files a/examples/AQA/WavCaps-QA-Test/sample_2.wav and b/examples/AQA/WavCaps-QA-Test/sample_2.wav differ
 
examples/AQA/WavCaps-QA-Test/state.json CHANGED
@@ -4,7 +4,7 @@
4
  "filename": "data-00000-of-00001.arrow"
5
  }
6
  ],
7
- "_fingerprint": "f88396310248e252",
8
  "_format_columns": [
9
  "context",
10
  "instruction",
@@ -14,6 +14,7 @@
14
  "wavllm_fairseq",
15
  "Qwen2-Audio-7B-Instruct",
16
  "whisper_large_v3_with_llama_3_8b_instruct",
 
17
  "qwen_audio_chat"
18
  ],
19
  "_format_kwargs": {},
 
4
  "filename": "data-00000-of-00001.arrow"
5
  }
6
  ],
7
+ "_fingerprint": "46b38bc22103a7cd",
8
  "_format_columns": [
9
  "context",
10
  "instruction",
 
14
  "wavllm_fairseq",
15
  "Qwen2-Audio-7B-Instruct",
16
  "whisper_large_v3_with_llama_3_8b_instruct",
17
+ "mowe_audio",
18
  "qwen_audio_chat"
19
  ],
20
  "_format_kwargs": {},
examples/AR/VoxCeleb-Accent-Test/dataset_info.json CHANGED
@@ -126,6 +126,24 @@
126
  "_type": "Value"
127
  }
128
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129
  "qwen_audio_chat": {
130
  "answer": {
131
  "dtype": "string",
 
126
  "_type": "Value"
127
  }
128
  },
129
+ "mowe_audio": {
130
+ "answer": {
131
+ "dtype": "string",
132
+ "_type": "Value"
133
+ },
134
+ "model_prediction": {
135
+ "dtype": "string",
136
+ "_type": "Value"
137
+ },
138
+ "task_type": {
139
+ "dtype": "string",
140
+ "_type": "Value"
141
+ },
142
+ "text": {
143
+ "dtype": "string",
144
+ "_type": "Value"
145
+ }
146
+ },
147
  "qwen_audio_chat": {
148
  "answer": {
149
  "dtype": "string",
examples/AR/VoxCeleb-Accent-Test/sample_0.wav CHANGED
Binary files a/examples/AR/VoxCeleb-Accent-Test/sample_0.wav and b/examples/AR/VoxCeleb-Accent-Test/sample_0.wav differ
 
examples/AR/VoxCeleb-Accent-Test/sample_1.wav CHANGED
Binary files a/examples/AR/VoxCeleb-Accent-Test/sample_1.wav and b/examples/AR/VoxCeleb-Accent-Test/sample_1.wav differ
 
examples/AR/VoxCeleb-Accent-Test/sample_2.wav CHANGED
Binary files a/examples/AR/VoxCeleb-Accent-Test/sample_2.wav and b/examples/AR/VoxCeleb-Accent-Test/sample_2.wav differ
 
examples/AR/VoxCeleb-Accent-Test/state.json CHANGED
@@ -4,7 +4,7 @@
4
  "filename": "data-00000-of-00001.arrow"
5
  }
6
  ],
7
- "_fingerprint": "7017504c8eeb5d71",
8
  "_format_columns": [
9
  "context",
10
  "instruction",
@@ -14,6 +14,7 @@
14
  "wavllm_fairseq",
15
  "Qwen2-Audio-7B-Instruct",
16
  "whisper_large_v3_with_llama_3_8b_instruct",
 
17
  "qwen_audio_chat"
18
  ],
19
  "_format_kwargs": {},
 
4
  "filename": "data-00000-of-00001.arrow"
5
  }
6
  ],
7
+ "_fingerprint": "f1df87f5b3ca8c97",
8
  "_format_columns": [
9
  "context",
10
  "instruction",
 
14
  "wavllm_fairseq",
15
  "Qwen2-Audio-7B-Instruct",
16
  "whisper_large_v3_with_llama_3_8b_instruct",
17
+ "mowe_audio",
18
  "qwen_audio_chat"
19
  ],
20
  "_format_kwargs": {},
examples/ASR/Common-Voice-15-En-Test/dataset_info.json CHANGED
@@ -34,11 +34,11 @@
34
  },
35
  "other_attributes": {
36
  "accents": {
37
- "dtype": "string",
38
  "_type": "Value"
39
  },
40
  "age": {
41
- "dtype": "string",
42
  "_type": "Value"
43
  },
44
  "client_id": {
@@ -50,7 +50,7 @@
50
  "_type": "Value"
51
  },
52
  "gender": {
53
- "dtype": "string",
54
  "_type": "Value"
55
  },
56
  "language": {
 
34
  },
35
  "other_attributes": {
36
  "accents": {
37
+ "dtype": "null",
38
  "_type": "Value"
39
  },
40
  "age": {
41
+ "dtype": "null",
42
  "_type": "Value"
43
  },
44
  "client_id": {
 
50
  "_type": "Value"
51
  },
52
  "gender": {
53
+ "dtype": "null",
54
  "_type": "Value"
55
  },
56
  "language": {
examples/ASR/Common-Voice-15-En-Test/sample_0.wav CHANGED
Binary files a/examples/ASR/Common-Voice-15-En-Test/sample_0.wav and b/examples/ASR/Common-Voice-15-En-Test/sample_0.wav differ
 
examples/ASR/Common-Voice-15-En-Test/sample_1.wav CHANGED
Binary files a/examples/ASR/Common-Voice-15-En-Test/sample_1.wav and b/examples/ASR/Common-Voice-15-En-Test/sample_1.wav differ
 
examples/ASR/Common-Voice-15-En-Test/sample_2.wav CHANGED
Binary files a/examples/ASR/Common-Voice-15-En-Test/sample_2.wav and b/examples/ASR/Common-Voice-15-En-Test/sample_2.wav differ
 
examples/ASR/Common-Voice-15-En-Test/state.json CHANGED
@@ -4,7 +4,7 @@
4
  "filename": "data-00000-of-00001.arrow"
5
  }
6
  ],
7
- "_fingerprint": "6342d438049fbc7e",
8
  "_format_columns": [
9
  "context",
10
  "instruction",
 
4
  "filename": "data-00000-of-00001.arrow"
5
  }
6
  ],
7
+ "_fingerprint": "5a02a12eee6eb15a",
8
  "_format_columns": [
9
  "context",
10
  "instruction",
examples/ASR/Earnings21-Test/state.json CHANGED
@@ -4,7 +4,7 @@
4
  "filename": "data-00000-of-00001.arrow"
5
  }
6
  ],
7
- "_fingerprint": "d0ad1703cbc51418",
8
  "_format_columns": [
9
  "context",
10
  "instruction",
 
4
  "filename": "data-00000-of-00001.arrow"
5
  }
6
  ],
7
+ "_fingerprint": "0d42a0f2cebd16d8",
8
  "_format_columns": [
9
  "context",
10
  "instruction",
examples/ASR/Earnings22-Test/state.json CHANGED
@@ -4,7 +4,7 @@
4
  "filename": "data-00000-of-00001.arrow"
5
  }
6
  ],
7
- "_fingerprint": "fb047ff90ed3a443",
8
  "_format_columns": [
9
  "context",
10
  "instruction",
 
4
  "filename": "data-00000-of-00001.arrow"
5
  }
6
  ],
7
+ "_fingerprint": "1427a3866fe2cb1a",
8
  "_format_columns": [
9
  "context",
10
  "instruction",
examples/ASR/GigaSpeech-Test/sample_0.wav CHANGED
Binary files a/examples/ASR/GigaSpeech-Test/sample_0.wav and b/examples/ASR/GigaSpeech-Test/sample_0.wav differ
 
examples/ASR/GigaSpeech-Test/sample_1.wav CHANGED
Binary files a/examples/ASR/GigaSpeech-Test/sample_1.wav and b/examples/ASR/GigaSpeech-Test/sample_1.wav differ
 
examples/ASR/GigaSpeech-Test/sample_2.wav CHANGED
Binary files a/examples/ASR/GigaSpeech-Test/sample_2.wav and b/examples/ASR/GigaSpeech-Test/sample_2.wav differ
 
examples/ASR/GigaSpeech-Test/state.json CHANGED
@@ -4,7 +4,7 @@
4
  "filename": "data-00000-of-00001.arrow"
5
  }
6
  ],
7
- "_fingerprint": "84a02614da440215",
8
  "_format_columns": [
9
  "context",
10
  "instruction",
 
4
  "filename": "data-00000-of-00001.arrow"
5
  }
6
  ],
7
+ "_fingerprint": "9527d9b9b39b34c3",
8
  "_format_columns": [
9
  "context",
10
  "instruction",
examples/ASR/IMDA-Part1-ASR-Test/sample_0.wav CHANGED
Binary files a/examples/ASR/IMDA-Part1-ASR-Test/sample_0.wav and b/examples/ASR/IMDA-Part1-ASR-Test/sample_0.wav differ
 
examples/ASR/IMDA-Part1-ASR-Test/sample_1.wav CHANGED
Binary files a/examples/ASR/IMDA-Part1-ASR-Test/sample_1.wav and b/examples/ASR/IMDA-Part1-ASR-Test/sample_1.wav differ
 
examples/ASR/IMDA-Part1-ASR-Test/sample_2.wav CHANGED
Binary files a/examples/ASR/IMDA-Part1-ASR-Test/sample_2.wav and b/examples/ASR/IMDA-Part1-ASR-Test/sample_2.wav differ
 
examples/ASR/IMDA-Part1-ASR-Test/state.json CHANGED
@@ -4,7 +4,7 @@
4
  "filename": "data-00000-of-00001.arrow"
5
  }
6
  ],
7
- "_fingerprint": "1514e693988caee7",
8
  "_format_columns": [
9
  "context",
10
  "instruction",
 
4
  "filename": "data-00000-of-00001.arrow"
5
  }
6
  ],
7
+ "_fingerprint": "e4d91fe35602a9e0",
8
  "_format_columns": [
9
  "context",
10
  "instruction",
examples/ASR/IMDA-Part2-ASR-Test/sample_0.wav CHANGED
Binary files a/examples/ASR/IMDA-Part2-ASR-Test/sample_0.wav and b/examples/ASR/IMDA-Part2-ASR-Test/sample_0.wav differ
 
examples/ASR/IMDA-Part2-ASR-Test/sample_1.wav CHANGED
Binary files a/examples/ASR/IMDA-Part2-ASR-Test/sample_1.wav and b/examples/ASR/IMDA-Part2-ASR-Test/sample_1.wav differ
 
examples/ASR/IMDA-Part2-ASR-Test/sample_2.wav CHANGED
Binary files a/examples/ASR/IMDA-Part2-ASR-Test/sample_2.wav and b/examples/ASR/IMDA-Part2-ASR-Test/sample_2.wav differ
 
examples/ASR/IMDA-Part2-ASR-Test/state.json CHANGED
@@ -4,7 +4,7 @@
4
  "filename": "data-00000-of-00001.arrow"
5
  }
6
  ],
7
- "_fingerprint": "8a8e117080f24a8b",
8
  "_format_columns": [
9
  "context",
10
  "instruction",
 
4
  "filename": "data-00000-of-00001.arrow"
5
  }
6
  ],
7
+ "_fingerprint": "0ed051d84878e4e9",
8
  "_format_columns": [
9
  "context",
10
  "instruction",
examples/ASR/LibriSpeech-Test-Clean/sample_0.wav CHANGED
Binary files a/examples/ASR/LibriSpeech-Test-Clean/sample_0.wav and b/examples/ASR/LibriSpeech-Test-Clean/sample_0.wav differ
 
examples/ASR/LibriSpeech-Test-Clean/sample_1.wav CHANGED
Binary files a/examples/ASR/LibriSpeech-Test-Clean/sample_1.wav and b/examples/ASR/LibriSpeech-Test-Clean/sample_1.wav differ