kjysmu commited on
Commit
b218289
·
verified ·
1 Parent(s): 2c30cf4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -35
app.py CHANGED
@@ -157,30 +157,30 @@ def resample_waveform(waveform, original_sample_rate, target_sample_rate):
157
 
158
 
159
 
160
- def split_audio(waveform, sample_rate):
161
- segment_samples = segment_duration * sample_rate
162
- total_samples = waveform.size(0)
163
 
164
- segments = []
165
- # If the audio is shorter than the segment duration, just use the entire audio
166
- if total_samples <= segment_samples:
167
- segments.append(waveform)
168
- else:
169
- # Split the audio into segments of the specified duration
170
- for start in range(0, total_samples, segment_samples):
171
- end = min(start + segment_samples, total_samples)
172
- segment = waveform[start:end]
173
- segments.append(segment)
174
 
175
- # Ensure we have at least one segment with a minimum length
176
- if len(segments) == 0 or all(len(segment) < 100 for segment in segments):
177
- # Create a padded segment if audio is too short
178
- padded_segment = torch.zeros(segment_samples)
179
- if total_samples > 0:
180
- padded_segment[:total_samples] = waveform
181
- segments = [padded_segment]
182
 
183
- return segments
184
 
185
  # def split_audio(waveform, sample_rate):
186
  # segment_samples = segment_duration * sample_rate
@@ -203,23 +203,23 @@ def split_audio(waveform, sample_rate):
203
 
204
  # return segments
205
 
206
- # def split_audio(waveform, sample_rate):
207
- # segment_samples = segment_duration * sample_rate
208
- # total_samples = waveform.size(0)
209
 
210
- # segments = []
211
- # for start in range(0, total_samples, segment_samples):
212
- # end = start + segment_samples
213
- # if end <= total_samples:
214
- # segment = waveform[start:end]
215
- # segments.append(segment)
216
 
217
- # # In case audio length is shorter than segment length.
218
- # if len(segments) == 0:
219
- # segment = waveform
220
- # segments.append(segment)
221
 
222
- # return segments
223
 
224
 
225
  def safe_remove_dir(directory):
 
157
 
158
 
159
 
160
+ # def split_audio(waveform, sample_rate):
161
+ # segment_samples = segment_duration * sample_rate
162
+ # total_samples = waveform.size(0)
163
 
164
+ # segments = []
165
+ # # If the audio is shorter than the segment duration, just use the entire audio
166
+ # if total_samples <= segment_samples:
167
+ # segments.append(waveform)
168
+ # else:
169
+ # # Split the audio into segments of the specified duration
170
+ # for start in range(0, total_samples, segment_samples):
171
+ # end = min(start + segment_samples, total_samples)
172
+ # segment = waveform[start:end]
173
+ # segments.append(segment)
174
 
175
+ # # Ensure we have at least one segment with a minimum length
176
+ # if len(segments) == 0 or all(len(segment) < 100 for segment in segments):
177
+ # # Create a padded segment if audio is too short
178
+ # padded_segment = torch.zeros(segment_samples)
179
+ # if total_samples > 0:
180
+ # padded_segment[:total_samples] = waveform
181
+ # segments = [padded_segment]
182
 
183
+ # return segments
184
 
185
  # def split_audio(waveform, sample_rate):
186
  # segment_samples = segment_duration * sample_rate
 
203
 
204
  # return segments
205
 
206
+ def split_audio(waveform, sample_rate):
207
+ segment_samples = segment_duration * sample_rate
208
+ total_samples = waveform.size(0)
209
 
210
+ segments = []
211
+ for start in range(0, total_samples, segment_samples):
212
+ end = start + segment_samples
213
+ if end <= total_samples:
214
+ segment = waveform[start:end]
215
+ segments.append(segment)
216
 
217
+ # In case audio length is shorter than segment length.
218
+ if len(segments) == 0:
219
+ segment = waveform
220
+ segments.append(segment)
221
 
222
+ return segments
223
 
224
 
225
  def safe_remove_dir(directory):