Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -13,12 +13,23 @@ model = WhisperForConditionalGeneration.from_pretrained(model_id)
|
|
13 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
14 |
model.to(device)
|
15 |
|
16 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
def transcribe(audio):
|
|
|
|
|
|
|
18 |
# Load the audio file and convert to 16kHz
|
19 |
waveform, sr = librosa.load(audio, sr=16000)
|
20 |
|
21 |
-
# Set chunk size (~
|
22 |
chunk_duration = 4 * 60 # 4 minutes (240 seconds)
|
23 |
max_audio_length = 60 * 60 # 60 minutes
|
24 |
chunks = []
|
@@ -29,6 +40,9 @@ def transcribe(audio):
|
|
29 |
|
30 |
# Split audio into ~4-minute chunks
|
31 |
for i in range(0, len(waveform), sr * chunk_duration):
|
|
|
|
|
|
|
32 |
chunk = waveform[i : i + sr * chunk_duration]
|
33 |
if len(chunk) < sr * 2: # Skip chunks shorter than 2 seconds
|
34 |
continue
|
@@ -37,8 +51,11 @@ def transcribe(audio):
|
|
37 |
# Process each chunk and transcribe
|
38 |
transcriptions = []
|
39 |
for chunk in chunks:
|
|
|
|
|
|
|
40 |
input_features = processor(chunk, sampling_rate=16000, return_tensors="pt").input_features.to(device)
|
41 |
-
|
42 |
with torch.no_grad():
|
43 |
predicted_ids = model.generate(
|
44 |
input_features,
|
@@ -57,13 +74,17 @@ def transcribe(audio):
|
|
57 |
return full_transcription
|
58 |
|
59 |
# Create the Gradio Interface
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
)
|
|
|
|
|
|
|
|
|
67 |
|
68 |
# Launch the Gradio app
|
69 |
iface.launch()
|
|
|
13 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
14 |
model.to(device)
|
15 |
|
16 |
+
# Global variable to control stopping
|
17 |
+
stop_processing = False
|
18 |
+
|
19 |
+
# Function to stop transcription
|
20 |
+
def stop():
|
21 |
+
global stop_processing
|
22 |
+
stop_processing = True # This will break transcription
|
23 |
+
|
24 |
+
# Function to process long audio in chunks
|
25 |
def transcribe(audio):
|
26 |
+
global stop_processing
|
27 |
+
stop_processing = False # Reset stop flag when new transcription starts
|
28 |
+
|
29 |
# Load the audio file and convert to 16kHz
|
30 |
waveform, sr = librosa.load(audio, sr=16000)
|
31 |
|
32 |
+
# Set chunk size (~4 min per chunk)
|
33 |
chunk_duration = 4 * 60 # 4 minutes (240 seconds)
|
34 |
max_audio_length = 60 * 60 # 60 minutes
|
35 |
chunks = []
|
|
|
40 |
|
41 |
# Split audio into ~4-minute chunks
|
42 |
for i in range(0, len(waveform), sr * chunk_duration):
|
43 |
+
if stop_processing:
|
44 |
+
return "⚠️ Transcription Stopped by User ⚠️"
|
45 |
+
|
46 |
chunk = waveform[i : i + sr * chunk_duration]
|
47 |
if len(chunk) < sr * 2: # Skip chunks shorter than 2 seconds
|
48 |
continue
|
|
|
51 |
# Process each chunk and transcribe
|
52 |
transcriptions = []
|
53 |
for chunk in chunks:
|
54 |
+
if stop_processing:
|
55 |
+
return "⚠️ Transcription Stopped by User ⚠️"
|
56 |
+
|
57 |
input_features = processor(chunk, sampling_rate=16000, return_tensors="pt").input_features.to(device)
|
58 |
+
|
59 |
with torch.no_grad():
|
60 |
predicted_ids = model.generate(
|
61 |
input_features,
|
|
|
74 |
return full_transcription
|
75 |
|
76 |
# Create the Gradio Interface
|
77 |
+
with gr.Blocks() as iface:
|
78 |
+
gr.Markdown("# Hebrew Speech-to-Text (Whisper)")
|
79 |
+
|
80 |
+
audio_input = gr.Audio(type="filepath", label="Upload Hebrew Audio")
|
81 |
+
output_text = gr.Textbox(label="Transcription Output")
|
82 |
+
|
83 |
+
start_btn = gr.Button("Start Transcription")
|
84 |
+
stop_btn = gr.Button("Stop Processing", variant="stop")
|
85 |
+
|
86 |
+
start_btn.click(transcribe, inputs=audio_input, outputs=output_text)
|
87 |
+
stop_btn.click(stop) # Calls the stop function when clicked
|
88 |
|
89 |
# Launch the Gradio app
|
90 |
iface.launch()
|