qqwjq1981 commited on
Commit
9b8d377
·
verified ·
1 Parent(s): 3458dd7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -5
app.py CHANGED
@@ -147,7 +147,10 @@ def transcribe_video_with_speakers(video_path):
147
  # Transcribe
148
  result = model.transcribe(audio_path)
149
  logger.info("Audio transcription completed")
150
-
 
 
 
151
  # Alignment
152
  model_a, metadata = whisperx.load_align_model(language_code=result["language"], device=device)
153
  result = whisperx.align(result["segments"], model_a, metadata, audio_path, device)
@@ -196,10 +199,6 @@ def transcribe_video_with_speakers(video_path):
196
  speaker_sample_paths[speaker] = sample_path
197
  logger.info(f"Created sample for {speaker}: {sample_path}")
198
 
199
- # Get the detected language
200
- detected_language = result["language"]
201
- logger.debug(f"Detected language: {detected_language}")
202
-
203
  # Clean up
204
  video.close()
205
  audio_clip.close()
 
147
  # Transcribe
148
  result = model.transcribe(audio_path)
149
  logger.info("Audio transcription completed")
150
+
151
+ # Get the detected language
152
+ detected_language = result["language"]
153
+ logger.debug(f"Detected language: {detected_language}")
154
  # Alignment
155
  model_a, metadata = whisperx.load_align_model(language_code=result["language"], device=device)
156
  result = whisperx.align(result["segments"], model_a, metadata, audio_path, device)
 
199
  speaker_sample_paths[speaker] = sample_path
200
  logger.info(f"Created sample for {speaker}: {sample_path}")
201
 
 
 
 
 
202
  # Clean up
203
  video.close()
204
  audio_clip.close()