cahya commited on
Commit
a47c180
·
1 Parent(s): 6423f2a

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +49 -19
README.md CHANGED
@@ -57,8 +57,8 @@ def load_dataset_sundanese():
57
 
58
  dfs = []
59
 
60
- dfs.append(pd.read_csv(filenames[0], sep='\t\t', names=["path", "sentence"]))
61
- dfs.append(pd.read_csv(filenames[1], sep='\t\t', names=["path", "sentence"]))
62
 
63
  for i, dir in enumerate(data_dirs):
64
  dfs[i]["path"] = dfs[i].apply(lambda row: str(data_dirs[i]) + "/" + row + ".wav", axis=1)
@@ -78,17 +78,17 @@ model = Wav2Vec2ForCTC.from_pretrained("cahya/wav2vec2-large-xlsr-sundanese")
78
  resampler = torchaudio.transforms.Resample(48_000, 16_000)
79
 
80
  # Preprocessing the datasets.
81
- # We need to read the aduio files as arrays
82
  def speech_file_to_array_fn(batch):
83
- \\tspeech_array, sampling_rate = torchaudio.load(batch["path"])
84
- \\tbatch["speech"] = resampler(speech_array).squeeze().numpy()
85
- \\treturn batch
86
 
87
  test_dataset = test_dataset.map(speech_file_to_array_fn)
88
  inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
89
 
90
  with torch.no_grad():
91
- \\tlogits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
92
 
93
  predicted_ids = torch.argmax(logits, dim=-1)
94
 
@@ -108,38 +108,68 @@ from datasets import load_dataset, load_metric
108
  from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
109
  import re
110
 
111
- ## TODO: load from OpenSLR https://openslr.org/41/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
  wer = load_metric("wer")
113
 
114
  processor = Wav2Vec2Processor.from_pretrained("cahya/wav2vec2-large-xlsr-sundanese")
115
  model = Wav2Vec2ForCTC.from_pretrained("cahya/wav2vec2-large-xlsr-sundanese")
116
  model.to("cuda")
117
 
118
- chars_to_ignore_regex = '[\\\\,\\\\?\\\\.\\\\!\\\\-\\\\;\\\\:\\\\"\\\\“\\\\%\\\\‘\\\\'\\\\”]'
119
 
120
  resampler = torchaudio.transforms.Resample(48_000, 16_000)
121
 
122
  # Preprocessing the datasets.
123
  # We need to read the aduio files as arrays
124
  def speech_file_to_array_fn(batch):
125
- \\tbatch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()
126
- \\tspeech_array, sampling_rate = torchaudio.load(batch["path"])
127
- \\tbatch["speech"] = resampler(speech_array).squeeze().numpy()
128
- \\treturn batch
129
 
130
  test_dataset = test_dataset.map(speech_file_to_array_fn)
131
 
132
  # Preprocessing the datasets.
133
- # We need to read the aduio files as arrays
134
  def evaluate(batch):
135
- \\tinputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
136
 
137
- \\twith torch.no_grad():
138
- \\t\\tlogits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits
139
 
140
  pred_ids = torch.argmax(logits, dim=-1)
141
- \\tbatch["pred_strings"] = processor.batch_decode(pred_ids)
142
- \\treturn batch
143
 
144
  result = test_dataset.map(evaluate, batched=True, batch_size=8)
145
 
 
57
 
58
  dfs = []
59
 
60
+ dfs.append(pd.read_csv(filenames[0], sep='\\t\\t', names=["path", "sentence"]))
61
+ dfs.append(pd.read_csv(filenames[1], sep='\\t\\t', names=["path", "sentence"]))
62
 
63
  for i, dir in enumerate(data_dirs):
64
  dfs[i]["path"] = dfs[i].apply(lambda row: str(data_dirs[i]) + "/" + row + ".wav", axis=1)
 
78
  resampler = torchaudio.transforms.Resample(48_000, 16_000)
79
 
80
  # Preprocessing the datasets.
81
+ # We need to read the audio files as arrays
82
  def speech_file_to_array_fn(batch):
83
+ speech_array, sampling_rate = torchaudio.load(batch["path"])
84
+ batch["speech"] = resampler(speech_array).squeeze().numpy()
85
+ return batch
86
 
87
  test_dataset = test_dataset.map(speech_file_to_array_fn)
88
  inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
89
 
90
  with torch.no_grad():
91
+ logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
92
 
93
  predicted_ids = torch.argmax(logits, dim=-1)
94
 
 
108
  from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
109
  import re
110
 
111
+ def load_dataset_sundanese():
112
+ root_dir = Path("/dataset/ASR/sundanese")
113
+ url_sundanese_female = "https://www.openslr.org/resources/44/su_id_female.zip"
114
+ url_sundanese_male = "https://www.openslr.org/resources/44/su_id_male.zip"
115
+ data_dirs = [ root_dir/"su_id_female/wavs", root_dir/"su_id_male/wavs" ]
116
+ filenames = [ root_dir/"su_id_female/line_index.tsv", root_dir/"su_id_male/line_index.tsv" ]
117
+
118
+ if not (root_dir/"su_id_female").exists():
119
+ !wget -P {root_dir} {url_sundanese_female}
120
+ !unzip {root_dir}/su_id_female.zip -d {root_dir}
121
+ if not (root_dir/"su_id_male").exists():
122
+ !wget -P {root_dir} {url_sundanese_male}
123
+ !unzip {root_dir}/su_id_male.zip -d {root_dir}
124
+
125
+ dfs = []
126
+
127
+ dfs.append(pd.read_csv(filenames[0], sep='\\t\\t', names=["path", "sentence"]))
128
+ dfs.append(pd.read_csv(filenames[1], sep='\\t\\t', names=["path", "sentence"]))
129
+
130
+ for i, dir in enumerate(data_dirs):
131
+ dfs[i]["path"] = dfs[i].apply(lambda row: str(data_dirs[i]) + "/" + row + ".wav", axis=1)
132
+ df = pd.concat(dfs)
133
+ # df = df.sample(frac=1, random_state=1).reset_index(drop=True)
134
+ dataset = Dataset.from_pandas(df)
135
+ dataset = dataset.remove_columns('__index_level_0__')
136
+
137
+ return dataset.train_test_split(test_size=0.1, seed=1)
138
+
139
+ dataset = load_dataset_sundanese()
140
+ test_dataset = dataset['test']
141
+
142
  wer = load_metric("wer")
143
 
144
  processor = Wav2Vec2Processor.from_pretrained("cahya/wav2vec2-large-xlsr-sundanese")
145
  model = Wav2Vec2ForCTC.from_pretrained("cahya/wav2vec2-large-xlsr-sundanese")
146
  model.to("cuda")
147
 
148
+ chars_to_ignore_regex = '[\\\\\\\\,\\\\\\\\?\\\\\\\\.\\\\\\\\!\\\\\\\\-\\\\\\\\;\\\\\\\\:\\\\\\\\"\\\\\\\\“\\\\\\\\%\\\\\\\\‘\\\\\\\\'\\\\\\\\”]'
149
 
150
  resampler = torchaudio.transforms.Resample(48_000, 16_000)
151
 
152
  # Preprocessing the datasets.
153
  # We need to read the aduio files as arrays
154
  def speech_file_to_array_fn(batch):
155
+ batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()
156
+ speech_array, sampling_rate = torchaudio.load(batch["path"])
157
+ batch["speech"] = resampler(speech_array).squeeze().numpy()
158
+ return batch
159
 
160
  test_dataset = test_dataset.map(speech_file_to_array_fn)
161
 
162
  # Preprocessing the datasets.
163
+ # We need to read the audio files as arrays
164
  def evaluate(batch):
165
+ inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
166
 
167
+ with torch.no_grad():
168
+ logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits
169
 
170
  pred_ids = torch.argmax(logits, dim=-1)
171
+ batch["pred_strings"] = processor.batch_decode(pred_ids)
172
+ return batch
173
 
174
  result = test_dataset.map(evaluate, batched=True, batch_size=8)
175