Kr08 commited on
Commit
ed2ee59
·
verified ·
1 Parent(s): e1d8168

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -45,7 +45,7 @@ def load_qa_model():
45
  qa_tokenizer = AutoTokenizer.from_pretrained("distilbert-base-cased-distilled-squad")
46
  logger.info("QA model loaded.")
47
 
48
- @spaces.GPU
49
  def transcribe_audio(audio_file, translate, model_size, use_diarization):
50
  language_segments, final_segments = process_audio(audio_file, translate=translate, model_size=model_size, use_diarization=use_diarization)
51
 
@@ -70,7 +70,7 @@ def transcribe_audio(audio_file, translate, model_size, use_diarization):
70
 
71
  return output, full_text
72
 
73
- @spaces.GPU
74
  def summarize_text(text):
75
  load_summarization_model()
76
  inputs = summarizer_tokenizer(text, max_length=1024, truncation=True, return_tensors="pt").to(device)
@@ -78,7 +78,7 @@ def summarize_text(text):
78
  summary = summarizer_tokenizer.decode(summary_ids[0], skip_special_tokens=True)
79
  return summary
80
 
81
- @spaces.GPU
82
  def answer_question(context, question):
83
  load_qa_model()
84
  inputs = qa_tokenizer(question, context, return_tensors="pt").to(device)
@@ -88,13 +88,13 @@ def answer_question(context, question):
88
  answer = qa_tokenizer.decode(inputs["input_ids"][0][answer_start:answer_end])
89
  return answer
90
 
91
- @spaces.GPU
92
  def process_and_summarize(audio_file, translate, model_size, use_diarization):
93
  transcription, full_text = transcribe_audio(audio_file, translate, model_size, use_diarization)
94
  summary = summarize_text(full_text)
95
  return transcription, summary
96
 
97
- @spaces.GPU
98
  def qa_interface(audio_file, translate, model_size, use_diarization, question):
99
  _, full_text = transcribe_audio(audio_file, translate, model_size, use_diarization)
100
  answer = answer_question(full_text, question)
 
45
  qa_tokenizer = AutoTokenizer.from_pretrained("distilbert-base-cased-distilled-squad")
46
  logger.info("QA model loaded.")
47
 
48
+ @spaces.GPU(duration=120)
49
  def transcribe_audio(audio_file, translate, model_size, use_diarization):
50
  language_segments, final_segments = process_audio(audio_file, translate=translate, model_size=model_size, use_diarization=use_diarization)
51
 
 
70
 
71
  return output, full_text
72
 
73
+ @spaces.GPU(duration=120)
74
  def summarize_text(text):
75
  load_summarization_model()
76
  inputs = summarizer_tokenizer(text, max_length=1024, truncation=True, return_tensors="pt").to(device)
 
78
  summary = summarizer_tokenizer.decode(summary_ids[0], skip_special_tokens=True)
79
  return summary
80
 
81
+ @spaces.GPU(duration=120)
82
  def answer_question(context, question):
83
  load_qa_model()
84
  inputs = qa_tokenizer(question, context, return_tensors="pt").to(device)
 
88
  answer = qa_tokenizer.decode(inputs["input_ids"][0][answer_start:answer_end])
89
  return answer
90
 
91
+ @spaces.GPU(duration=120)
92
  def process_and_summarize(audio_file, translate, model_size, use_diarization):
93
  transcription, full_text = transcribe_audio(audio_file, translate, model_size, use_diarization)
94
  summary = summarize_text(full_text)
95
  return transcription, summary
96
 
97
+ @spaces.GPU(duration=120)
98
  def qa_interface(audio_file, translate, model_size, use_diarization, question):
99
  _, full_text = transcribe_audio(audio_file, translate, model_size, use_diarization)
100
  answer = answer_question(full_text, question)