Kr08 commited on
Commit
fd470bd
·
verified ·
1 Parent(s): b792cce

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -15,7 +15,7 @@ summarizer_tokenizer = AutoTokenizer.from_pretrained("facebook/bart-large-cnn")
15
  qa_model = AutoModelForQuestionAnswering.from_pretrained("distilbert-base-cased-distilled-squad").to(device)
16
  qa_tokenizer = AutoTokenizer.from_pretrained("distilbert-base-cased-distilled-squad")
17
 
18
- @spaces.GPU
19
  def transcribe_audio(audio_file, translate, model_size):
20
  language_segments, final_segments = process_audio(audio_file, translate=translate, model_size=model_size)
21
 
@@ -38,14 +38,14 @@ def transcribe_audio(audio_file, translate, model_size):
38
 
39
  return output, full_text
40
 
41
- @spaces.GPU
42
  def summarize_text(text):
43
  inputs = summarizer_tokenizer(text, max_length=1024, truncation=True, return_tensors="pt").to(device)
44
  summary_ids = summarizer_model.generate(inputs["input_ids"], max_length=150, min_length=50, do_sample=False)
45
  summary = summarizer_tokenizer.decode(summary_ids[0], skip_special_tokens=True)
46
  return summary
47
 
48
- @spaces.GPU
49
  def answer_question(context, question):
50
  inputs = qa_tokenizer(question, context, return_tensors="pt").to(device)
51
  outputs = qa_model(**inputs)
@@ -54,13 +54,13 @@ def answer_question(context, question):
54
  answer = qa_tokenizer.decode(inputs["input_ids"][0][answer_start:answer_end])
55
  return answer
56
 
57
- @spaces.GPU
58
  def process_and_summarize(audio_file, translate, model_size):
59
  transcription, full_text = transcribe_audio(audio_file, translate, model_size)
60
  summary = summarize_text(full_text)
61
  return transcription, summary
62
 
63
- @spaces.GPU
64
  def qa_interface(audio_file, translate, model_size, question):
65
  _, full_text = transcribe_audio(audio_file, translate, model_size)
66
  answer = answer_question(full_text, question)
 
15
  qa_model = AutoModelForQuestionAnswering.from_pretrained("distilbert-base-cased-distilled-squad").to(device)
16
  qa_tokenizer = AutoTokenizer.from_pretrained("distilbert-base-cased-distilled-squad")
17
 
18
+ @spaces.GPU(duration=120)
19
  def transcribe_audio(audio_file, translate, model_size):
20
  language_segments, final_segments = process_audio(audio_file, translate=translate, model_size=model_size)
21
 
 
38
 
39
  return output, full_text
40
 
41
+ @spaces.GPU(duration=120)
42
  def summarize_text(text):
43
  inputs = summarizer_tokenizer(text, max_length=1024, truncation=True, return_tensors="pt").to(device)
44
  summary_ids = summarizer_model.generate(inputs["input_ids"], max_length=150, min_length=50, do_sample=False)
45
  summary = summarizer_tokenizer.decode(summary_ids[0], skip_special_tokens=True)
46
  return summary
47
 
48
+ @spaces.GPU(duration=120)
49
  def answer_question(context, question):
50
  inputs = qa_tokenizer(question, context, return_tensors="pt").to(device)
51
  outputs = qa_model(**inputs)
 
54
  answer = qa_tokenizer.decode(inputs["input_ids"][0][answer_start:answer_end])
55
  return answer
56
 
57
+ @spaces.GPU(duration=120)
58
  def process_and_summarize(audio_file, translate, model_size):
59
  transcription, full_text = transcribe_audio(audio_file, translate, model_size)
60
  summary = summarize_text(full_text)
61
  return transcription, summary
62
 
63
+ @spaces.GPU(duration=120)
64
  def qa_interface(audio_file, translate, model_size, question):
65
  _, full_text = transcribe_audio(audio_file, translate, model_size)
66
  answer = answer_question(full_text, question)