EladSpamson commited on
Commit
1c7c059
·
verified ·
1 Parent(s): 03d3d29

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -11
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import os
2
 
3
- # Set environment variables VERY early, before HF or Transformers are imported:
4
  os.environ["HF_HOME"] = "/tmp/hf_cache"
5
  os.environ["TRANSFORMERS_CACHE"] = "/tmp/hf_cache"
6
  os.environ["HF_DATASETS_CACHE"] = "/tmp/hf_cache"
@@ -14,7 +14,7 @@ from transformers import WhisperProcessor, WhisperForConditionalGeneration
14
 
15
  app = Flask(__name__)
16
 
17
- # Use a smaller model for CPU
18
  model_id = "openai/whisper-base"
19
  processor = WhisperProcessor.from_pretrained(model_id)
20
  model = WhisperForConditionalGeneration.from_pretrained(model_id)
@@ -22,8 +22,6 @@ model = WhisperForConditionalGeneration.from_pretrained(model_id)
22
  device = "cuda" if torch.cuda.is_available() else "cpu"
23
  model.to(device)
24
 
25
- forced_decoder_ids = processor.get_decoder_prompt_ids(language="he", task="transcribe")
26
-
27
  def transcribe_audio(audio_url):
28
  # 1) Download audio file to /tmp
29
  response = requests.get(audio_url)
@@ -31,14 +29,14 @@ def transcribe_audio(audio_url):
31
  with open(audio_path, "wb") as f:
32
  f.write(response.content)
33
 
34
- # 2) Load with librosa
35
  waveform, sr = librosa.load(audio_path, sr=16000)
36
 
37
- # 3) Truncate to 1 hour
38
  max_duration_sec = 3600
39
  waveform = waveform[:sr * max_duration_sec]
40
 
41
- # 4) Split into 25-second chunks
42
  chunk_duration_sec = 25
43
  chunk_size = sr * chunk_duration_sec
44
  chunks = [waveform[i : i + chunk_size] for i in range(0, len(waveform), chunk_size)]
@@ -48,11 +46,9 @@ def transcribe_audio(audio_url):
48
  inputs = processor(chunk, sampling_rate=16000, return_tensors="pt", padding=True)
49
  input_features = inputs.input_features.to(device)
50
 
 
51
  with torch.no_grad():
52
- predicted_ids = model.generate(
53
- input_features,
54
- forced_decoder_ids=forced_decoder_ids
55
- )
56
 
57
  transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
58
  partial_text += transcription + "\n"
 
1
  import os
2
 
3
+ # Ensure environment variables are set before Transformers are imported
4
  os.environ["HF_HOME"] = "/tmp/hf_cache"
5
  os.environ["TRANSFORMERS_CACHE"] = "/tmp/hf_cache"
6
  os.environ["HF_DATASETS_CACHE"] = "/tmp/hf_cache"
 
14
 
15
  app = Flask(__name__)
16
 
17
+ # Using a smaller model for faster CPU loading
18
  model_id = "openai/whisper-base"
19
  processor = WhisperProcessor.from_pretrained(model_id)
20
  model = WhisperForConditionalGeneration.from_pretrained(model_id)
 
22
  device = "cuda" if torch.cuda.is_available() else "cpu"
23
  model.to(device)
24
 
 
 
25
  def transcribe_audio(audio_url):
26
  # 1) Download audio file to /tmp
27
  response = requests.get(audio_url)
 
29
  with open(audio_path, "wb") as f:
30
  f.write(response.content)
31
 
32
+ # 2) Load audio with librosa
33
  waveform, sr = librosa.load(audio_path, sr=16000)
34
 
35
+ # 3) Optional safety limit (1 hour)
36
  max_duration_sec = 3600
37
  waveform = waveform[:sr * max_duration_sec]
38
 
39
+ # 4) Split into smaller chunks (25s)
40
  chunk_duration_sec = 25
41
  chunk_size = sr * chunk_duration_sec
42
  chunks = [waveform[i : i + chunk_size] for i in range(0, len(waveform), chunk_size)]
 
46
  inputs = processor(chunk, sampling_rate=16000, return_tensors="pt", padding=True)
47
  input_features = inputs.input_features.to(device)
48
 
49
+ # **No** forced_decoder_ids => Whisper auto-detects language
50
  with torch.no_grad():
51
+ predicted_ids = model.generate(input_features)
 
 
 
52
 
53
  transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
54
  partial_text += transcription + "\n"