Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -4,6 +4,7 @@ import requests
|
|
4 |
import threading
|
5 |
import torch
|
6 |
import librosa
|
|
|
7 |
|
8 |
from flask import Flask, request, jsonify
|
9 |
from transformers import WhisperProcessor, WhisperForConditionalGeneration
|
@@ -25,38 +26,31 @@ model = WhisperForConditionalGeneration.from_pretrained(model_id)
|
|
25 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
26 |
model.to(device)
|
27 |
|
28 |
-
# Force Hebrew transcription
|
29 |
forced_decoder_ids = processor.get_decoder_prompt_ids(language="he", task="transcribe")
|
30 |
|
31 |
-
# Where we send the final transcription
|
32 |
WEBHOOK_URL = "https://hook.eu1.make.com/86zogci73u394k2uqpulp5yjjwgm8b9x"
|
33 |
|
34 |
###############################################################################
|
35 |
# 2) Background transcription function
|
36 |
###############################################################################
|
37 |
-
def transcribe_in_background(audio_url, file_id, company, user):
|
38 |
-
"""
|
39 |
-
Called by a background thread. Downloads & transcribes audio,
|
40 |
-
then sends results to your Make.com webhook.
|
41 |
-
"""
|
42 |
try:
|
43 |
-
#
|
44 |
r = requests.get(audio_url)
|
45 |
audio_path = "/tmp/temp_audio.wav"
|
46 |
with open(audio_path, "wb") as f:
|
47 |
f.write(r.content)
|
48 |
|
49 |
-
#
|
50 |
waveform, sr = librosa.load(audio_path, sr=16000)
|
51 |
-
|
52 |
-
# Optional: limit to ~1 hour
|
53 |
max_sec = 3600
|
54 |
waveform = waveform[: sr * max_sec]
|
55 |
|
56 |
-
# Calculate
|
57 |
-
call_duration = int(len(waveform) / sr)
|
58 |
|
59 |
-
#
|
60 |
chunk_sec = 25
|
61 |
chunk_size = sr * chunk_sec
|
62 |
chunks = [waveform[i : i + chunk_size] for i in range(0, len(waveform), chunk_size)]
|
@@ -75,58 +69,64 @@ def transcribe_in_background(audio_url, file_id, company, user):
|
|
75 |
transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
|
76 |
partial_text += transcription + "\n"
|
77 |
|
78 |
-
#
|
79 |
payload = {
|
80 |
"Transcription": partial_text.strip(),
|
81 |
"callDuration": call_duration,
|
82 |
"fileId": file_id,
|
83 |
"company": company,
|
84 |
-
"user": user
|
|
|
85 |
}
|
86 |
requests.post(WEBHOOK_URL, json=payload)
|
87 |
|
88 |
except Exception as e:
|
89 |
-
# In case of errors, notify the webhook
|
90 |
error_payload = {
|
91 |
"error": str(e),
|
92 |
"fileId": file_id,
|
93 |
"company": company,
|
94 |
-
"user": user
|
95 |
-
|
96 |
}
|
97 |
requests.post(WEBHOOK_URL, json=error_payload)
|
98 |
|
99 |
###############################################################################
|
100 |
-
# 3) Flask route:
|
101 |
###############################################################################
|
102 |
@app.route("/transcribe", methods=["POST"])
|
103 |
def transcribe_endpoint():
|
104 |
-
# 1)
|
|
|
|
|
|
|
|
|
|
|
105 |
data = request.get_json()
|
106 |
audio_url = data.get("audio_url")
|
107 |
if not audio_url:
|
108 |
return jsonify({"error": "Missing 'audio_url' in request"}), 400
|
109 |
|
110 |
-
#
|
111 |
file_id = request.headers.get("fileId", "")
|
112 |
company = request.headers.get("company", "")
|
113 |
user = request.headers.get("user", "")
|
|
|
114 |
|
115 |
-
#
|
116 |
thread = threading.Thread(
|
117 |
target=transcribe_in_background,
|
118 |
-
args=(audio_url, file_id, company, user)
|
119 |
)
|
120 |
thread.start()
|
121 |
|
122 |
-
#
|
123 |
return jsonify({
|
124 |
"status": "Received. Transcription in progress.",
|
125 |
"note": "Results will be sent via webhook once done."
|
126 |
}), 202
|
127 |
|
128 |
###############################################################################
|
129 |
-
# 4) Run app
|
130 |
###############################################################################
|
131 |
if __name__ == "__main__":
|
132 |
app.run(host="0.0.0.0", port=7860)
|
|
|
4 |
import threading
|
5 |
import torch
|
6 |
import librosa
|
7 |
+
import psutil
|
8 |
|
9 |
from flask import Flask, request, jsonify
|
10 |
from transformers import WhisperProcessor, WhisperForConditionalGeneration
|
|
|
26 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
27 |
model.to(device)
|
28 |
|
29 |
+
# Force Hebrew transcription
|
30 |
forced_decoder_ids = processor.get_decoder_prompt_ids(language="he", task="transcribe")
|
31 |
|
|
|
32 |
WEBHOOK_URL = "https://hook.eu1.make.com/86zogci73u394k2uqpulp5yjjwgm8b9x"
|
33 |
|
34 |
###############################################################################
|
35 |
# 2) Background transcription function
|
36 |
###############################################################################
|
37 |
+
def transcribe_in_background(audio_url, file_id, company, user, file_name):
|
|
|
|
|
|
|
|
|
38 |
try:
|
39 |
+
# Download audio
|
40 |
r = requests.get(audio_url)
|
41 |
audio_path = "/tmp/temp_audio.wav"
|
42 |
with open(audio_path, "wb") as f:
|
43 |
f.write(r.content)
|
44 |
|
45 |
+
# Load with librosa, limit to 1 hour
|
46 |
waveform, sr = librosa.load(audio_path, sr=16000)
|
|
|
|
|
47 |
max_sec = 3600
|
48 |
waveform = waveform[: sr * max_sec]
|
49 |
|
50 |
+
# Calculate callDuration
|
51 |
+
call_duration = int(len(waveform) / sr)
|
52 |
|
53 |
+
# Split audio into 25-second chunks
|
54 |
chunk_sec = 25
|
55 |
chunk_size = sr * chunk_sec
|
56 |
chunks = [waveform[i : i + chunk_size] for i in range(0, len(waveform), chunk_size)]
|
|
|
69 |
transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
|
70 |
partial_text += transcription + "\n"
|
71 |
|
72 |
+
# Post final transcription back to Make.com
|
73 |
payload = {
|
74 |
"Transcription": partial_text.strip(),
|
75 |
"callDuration": call_duration,
|
76 |
"fileId": file_id,
|
77 |
"company": company,
|
78 |
+
"user": user,
|
79 |
+
"fileName": file_name
|
80 |
}
|
81 |
requests.post(WEBHOOK_URL, json=payload)
|
82 |
|
83 |
except Exception as e:
|
|
|
84 |
error_payload = {
|
85 |
"error": str(e),
|
86 |
"fileId": file_id,
|
87 |
"company": company,
|
88 |
+
"user": user,
|
89 |
+
"fileName": file_name
|
90 |
}
|
91 |
requests.post(WEBHOOK_URL, json=error_payload)
|
92 |
|
93 |
###############################################################################
|
94 |
+
# 3) Flask route: CPU check and transcription
|
95 |
###############################################################################
|
96 |
@app.route("/transcribe", methods=["POST"])
|
97 |
def transcribe_endpoint():
|
98 |
+
# 1) Check CPU usage
|
99 |
+
cpu_usage = psutil.cpu_percent(interval=0.1) # measure CPU usage over 0.1s
|
100 |
+
if cpu_usage > 1.0:
|
101 |
+
return jsonify({"error": "CPU is busy", "cpuUsage": cpu_usage}), 503
|
102 |
+
|
103 |
+
# 2) Get JSON data
|
104 |
data = request.get_json()
|
105 |
audio_url = data.get("audio_url")
|
106 |
if not audio_url:
|
107 |
return jsonify({"error": "Missing 'audio_url' in request"}), 400
|
108 |
|
109 |
+
# 3) Read headers
|
110 |
file_id = request.headers.get("fileId", "")
|
111 |
company = request.headers.get("company", "")
|
112 |
user = request.headers.get("user", "")
|
113 |
+
file_name = request.headers.get("fileName", "")
|
114 |
|
115 |
+
# 4) Spawn a background thread
|
116 |
thread = threading.Thread(
|
117 |
target=transcribe_in_background,
|
118 |
+
args=(audio_url, file_id, company, user, file_name)
|
119 |
)
|
120 |
thread.start()
|
121 |
|
122 |
+
# 5) Return immediate response
|
123 |
return jsonify({
|
124 |
"status": "Received. Transcription in progress.",
|
125 |
"note": "Results will be sent via webhook once done."
|
126 |
}), 202
|
127 |
|
128 |
###############################################################################
|
129 |
+
# 4) Run app locally; HF Spaces uses gunicorn
|
130 |
###############################################################################
|
131 |
if __name__ == "__main__":
|
132 |
app.run(host="0.0.0.0", port=7860)
|