KaiChen1998 commited on
Commit
19605cf
·
1 Parent(s): d264d58

adjust time limit

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -51,11 +51,11 @@ asr_format = "Please recognize the text corresponding to the follwing speech.\n"
51
  tts_format = "Please synthesize the speech corresponding to the follwing text.\n"
52
  chat_format = r'Please recognize the texts, emotion and pitch from the user question speech units and provide the texts, emotion, pitch and speech units for the assistant response. \nEmotion should be chosen from ["neutral", "happy", "sad", "angry", "surprised", "disgusted", "fearful"]. \nPitch should be chosen from ["low", "normal", "high"].\nYour output should be in json format.\nAn output example is:\n{"user question text": "", "user question emotion": "", "user question pitch": "", "assistant response text": "", "assistant response emotion": "", "assistant response pitch": "","assistant response speech": ""}\n\nuser question speech:'
53
 
54
- @spaces.GPU(duration=10)
55
  def s2u_asr(text, audio_file):
56
  return asr_format + s2u_extract_unit_demo(s2u_model, audio_file, model_name=s2u_model_name, reduced=reduced)
57
 
58
- @spaces.GPU(duration=10)
59
  def s2u_chat(text, audio_file):
60
  return chat_format + s2u_extract_unit_demo(s2u_model, audio_file, model_name=s2u_model_name, reduced=reduced)
61
 
@@ -186,7 +186,7 @@ def add_text(state, text, image, image_process_mode, audio_input, audio_mode):
186
  # Input: [state, temperature, top_p, max_output_tokens, speaker]
187
  # Return: [state, chatbot] + btn_list
188
  ############
189
- @spaces.GPU
190
  def http_bot(state, temperature, top_p, max_new_tokens, speaker):
191
  print(f"http_bot.")
192
 
 
51
  tts_format = "Please synthesize the speech corresponding to the follwing text.\n"
52
  chat_format = r'Please recognize the texts, emotion and pitch from the user question speech units and provide the texts, emotion, pitch and speech units for the assistant response. \nEmotion should be chosen from ["neutral", "happy", "sad", "angry", "surprised", "disgusted", "fearful"]. \nPitch should be chosen from ["low", "normal", "high"].\nYour output should be in json format.\nAn output example is:\n{"user question text": "", "user question emotion": "", "user question pitch": "", "assistant response text": "", "assistant response emotion": "", "assistant response pitch": "","assistant response speech": ""}\n\nuser question speech:'
53
 
54
+ @spaces.GPU(duration=20)
55
  def s2u_asr(text, audio_file):
56
  return asr_format + s2u_extract_unit_demo(s2u_model, audio_file, model_name=s2u_model_name, reduced=reduced)
57
 
58
+ @spaces.GPU(duration=20)
59
  def s2u_chat(text, audio_file):
60
  return chat_format + s2u_extract_unit_demo(s2u_model, audio_file, model_name=s2u_model_name, reduced=reduced)
61
 
 
186
  # Input: [state, temperature, top_p, max_output_tokens, speaker]
187
  # Return: [state, chatbot] + btn_list
188
  ############
189
+ @spaces.GPU(duration=120)
190
  def http_bot(state, temperature, top_p, max_new_tokens, speaker):
191
  print(f"http_bot.")
192