palbha commited on
Commit
59d13b4
·
verified ·
1 Parent(s): 2077863

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -12
app.py CHANGED
@@ -8,20 +8,10 @@ import os
8
  token = os.getenv("HF_TOKEN")
9
  login(token=token)
10
 
11
- # Whisper Model Optimization
12
- model = "openai/whisper-tiny"
13
- DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
14
 
15
- processor = AutoProcessor.from_pretrained(model)
16
 
17
 
18
- transcriber = pipeline(
19
- "automatic-speech-recognition",
20
- model=model,
21
- tokenizer=processor.tokenizer,
22
- feature_extractor=processor.feature_extractor,
23
- device=0 if torch.cuda.is_available() else "cpu",
24
- )
25
 
26
 
27
 
@@ -29,7 +19,18 @@ transcriber = pipeline(
29
  def process_audio(audio_file):
30
  if audio_file is None:
31
  return "Error: No audio provided!"
32
-
 
 
 
 
 
 
 
 
 
 
 
33
  # Transcribe audio
34
  transcript = transcriber(audio_file,return_timestamps=True)["text"]
35
  del transcriber
 
8
  token = os.getenv("HF_TOKEN")
9
  login(token=token)
10
 
 
 
 
11
 
12
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
13
 
14
 
 
 
 
 
 
 
 
15
 
16
 
17
 
 
19
  def process_audio(audio_file):
20
  if audio_file is None:
21
  return "Error: No audio provided!"
22
+
23
+ # Whisper Model Optimization
24
+ model = "openai/whisper-tiny"
25
+ processor = AutoProcessor.from_pretrained(model)
26
+
27
+ transcriber = pipeline(
28
+ "automatic-speech-recognition",
29
+ model=model,
30
+ tokenizer=processor.tokenizer,
31
+ feature_extractor=processor.feature_extractor,
32
+ device=0 if torch.cuda.is_available() else "cpu",
33
+ )
34
  # Transcribe audio
35
  transcript = transcriber(audio_file,return_timestamps=True)["text"]
36
  del transcriber