Update main.py
Browse files
main.py
CHANGED
@@ -38,25 +38,25 @@ def convert_size(bytes):
|
|
38 |
return f"{bytes / 1024**2:.2f} MB"
|
39 |
|
40 |
def transcribe(audio_path):
|
41 |
-
print(f"Transcribing audio from: {audio_path}", flush=True)
|
42 |
|
43 |
# Load audio and pad/trim it to fit 30 seconds
|
44 |
-
print(" Loading and processing audio...", flush=True)
|
45 |
audio = whisper.load_audio(audio_path)
|
46 |
audio = whisper.pad_or_trim(audio)
|
47 |
|
48 |
# Make log-Mel spectrogram and move to the same device as the model
|
49 |
-
print(" Creating log-Mel spectrogram...", flush=True)
|
50 |
mel = whisper.log_mel_spectrogram(audio).to(model.device)
|
51 |
|
52 |
# Detect the spoken language
|
53 |
-
print(" Detecting language...", flush=True)
|
54 |
_, probs = model.detect_language(mel)
|
55 |
language = max(probs, key=probs.get)
|
56 |
-
print(f" Detected language: {language}", flush=True)
|
57 |
|
58 |
# Decode the audio
|
59 |
-
print(" Decoding audio...", flush=True)
|
60 |
options = whisper.DecodingOptions(fp16=False)
|
61 |
result = whisper.decode(model, mel, options)
|
62 |
|
@@ -106,7 +106,7 @@ def transcribe_audio():
|
|
106 |
response_sent_time, _ = get_time()
|
107 |
|
108 |
# Return the transcription, detected language, and timing information
|
109 |
-
print(f" Transcription: {transcription}, Language: {language}\n", flush=True)
|
110 |
return jsonify({
|
111 |
"transcription": transcription,
|
112 |
"language": language,
|
|
|
38 |
return f"{bytes / 1024**2:.2f} MB"
|
39 |
|
40 |
def transcribe(audio_path):
|
41 |
+
#print(f" Transcribing audio from: {audio_path}", flush=True)
|
42 |
|
43 |
# Load audio and pad/trim it to fit 30 seconds
|
44 |
+
#print(" Loading and processing audio...", flush=True)
|
45 |
audio = whisper.load_audio(audio_path)
|
46 |
audio = whisper.pad_or_trim(audio)
|
47 |
|
48 |
# Make log-Mel spectrogram and move to the same device as the model
|
49 |
+
#print(" Creating log-Mel spectrogram...", flush=True)
|
50 |
mel = whisper.log_mel_spectrogram(audio).to(model.device)
|
51 |
|
52 |
# Detect the spoken language
|
53 |
+
#print(" Detecting language...", flush=True)
|
54 |
_, probs = model.detect_language(mel)
|
55 |
language = max(probs, key=probs.get)
|
56 |
+
#print(f" Detected language: {language}", flush=True)
|
57 |
|
58 |
# Decode the audio
|
59 |
+
#print(" Decoding audio...", flush=True)
|
60 |
options = whisper.DecodingOptions(fp16=False)
|
61 |
result = whisper.decode(model, mel, options)
|
62 |
|
|
|
106 |
response_sent_time, _ = get_time()
|
107 |
|
108 |
# Return the transcription, detected language, and timing information
|
109 |
+
print(f" Transcription: {transcription}, Language: {language}, Processing Time: {transcription_duration_seconds}\n", flush=True)
|
110 |
return jsonify({
|
111 |
"transcription": transcription,
|
112 |
"language": language,
|