pere commited on
Commit
13c82c0
·
1 Parent(s): 85fc302

update test

Browse files
Files changed (1) hide show
  1. app.py +0 -8
app.py CHANGED
@@ -25,16 +25,9 @@ print(f"Using device: {device}")
25
 
26
  @spaces.GPU(duration=60 * 2)
27
  def pipe(file, return_timestamps=False):
28
- # model = WhisperForConditionalGeneration.from_pretrained(MODEL_NAME, torch_dtype=torch.float16, low_cpu_mem_usage=True)
29
- # model.to(device)
30
- # processor = WhisperProcessor.from_pretrained(MODEL_NAME)
31
- # model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=True)
32
- # model.generation_config.cache_implementation = "static"
33
  asr = pipeline(
34
  task="automatic-speech-recognition",
35
  model=MODEL_NAME,
36
- # tokenizer=AutoTokenizer.from_pretrained(MODEL_NAME),
37
- # feature_extractor=AutoFeatureExtractor.from_pretrained(MODEL_NAME),
38
  chunk_length_s=30,
39
  device=device,
40
  token=auth_token,
@@ -46,7 +39,6 @@ def pipe(file, return_timestamps=False):
46
  task="transcribe",
47
  no_timestamps=not return_timestamps,
48
  )
49
- # asr.model.config.no_timestamps_token_id = asr.tokenizer.encode("<|notimestamps|>", add_special_tokens=False)[0]
50
  return asr(file, return_timestamps=return_timestamps, batch_size=24)
51
 
52
  def transcribe(file, return_timestamps=False):
 
25
 
26
  @spaces.GPU(duration=60 * 2)
27
  def pipe(file, return_timestamps=False):
 
 
 
 
 
28
  asr = pipeline(
29
  task="automatic-speech-recognition",
30
  model=MODEL_NAME,
 
 
31
  chunk_length_s=30,
32
  device=device,
33
  token=auth_token,
 
39
  task="transcribe",
40
  no_timestamps=not return_timestamps,
41
  )
 
42
  return asr(file, return_timestamps=return_timestamps, batch_size=24)
43
 
44
  def transcribe(file, return_timestamps=False):