jozzy commited on
Commit
621e7ea
·
1 Parent(s): 314105e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -20,7 +20,7 @@ def roleChoice(role):
20
  return "role:" + role
21
 
22
 
23
- def audioGPT(audio):
24
  global messages
25
 
26
  audio_file = open(audio, "rb")
@@ -38,7 +38,7 @@ def audioGPT(audio):
38
  if msg['role'] != 'system':
39
  chats += msg['role'] + ": " + msg['content'] + "\n\n"
40
 
41
- return chats
42
 
43
 
44
  def textGPT(text):
@@ -59,7 +59,7 @@ def textGPT(text):
59
  return chats
60
 
61
 
62
- def siriGPT(audio):
63
  global messages
64
 
65
  audio_file = open(audio, "rb")
@@ -77,7 +77,7 @@ def siriGPT(audio):
77
  narrate_ans = gTTS(text=system_message['content'], lang=lang, slow=False)
78
  narrate_ans.save("narrate.wav")
79
 
80
- return "narrate.wav"
81
 
82
 
83
  def fileGPT(prompt, file_obj):
@@ -131,10 +131,10 @@ with gr.Blocks() as chatHistory:
131
 
132
  role = gr.Interface(fn=roleChoice, inputs="text", outputs="text", description = "Choose your GPT roles, e.g. You are a helpful technology assistant. 你是一位 IT 架构师。 你是一位开发者关系顾问。你是一位机器学习工程师。你是一位高级 C++ 开发人员 ")
133
  text = gr.Interface(fn=textGPT, inputs="text", outputs="text")
134
- audio = gr.Interface(fn=audioGPT, inputs=gr.Audio(source="microphone", type="filepath"), outputs="text")
135
- siri = gr.Interface(fn=siriGPT, inputs=gr.Audio(source="microphone", type="filepath"), outputs = "audio")
136
  file = gr.Interface(fn=fileGPT, inputs=["text", "file"], outputs="text", description = "Enter prompt sentences and your PDF. e.g. lets think step by step, summarize this following text: 或者 让我们一步一步地思考,总结以下的内容:")
137
- demo = gr.TabbedInterface([role, text, audio, siri, file, chatHistory], [ "roleChoice", "chatGPT", "audioGPT", "siriGPT", "fileGPT", "ChatHistory"])
138
 
139
  if __name__ == "__main__":
140
  demo.launch(enable_queue=False, auth=lambda u, p: user_db.get(u) == p,
 
20
  return "role:" + role
21
 
22
 
23
+ """def audioGPT(audio):
24
  global messages
25
 
26
  audio_file = open(audio, "rb")
 
38
  if msg['role'] != 'system':
39
  chats += msg['role'] + ": " + msg['content'] + "\n\n"
40
 
41
+ return chats"""
42
 
43
 
44
  def textGPT(text):
 
59
  return chats
60
 
61
 
62
+ """def siriGPT(audio):
63
  global messages
64
 
65
  audio_file = open(audio, "rb")
 
77
  narrate_ans = gTTS(text=system_message['content'], lang=lang, slow=False)
78
  narrate_ans.save("narrate.wav")
79
 
80
+ return "narrate.wav""""
81
 
82
 
83
  def fileGPT(prompt, file_obj):
 
131
 
132
  role = gr.Interface(fn=roleChoice, inputs="text", outputs="text", description = "Choose your GPT roles, e.g. You are a helpful technology assistant. 你是一位 IT 架构师。 你是一位开发者关系顾问。你是一位机器学习工程师。你是一位高级 C++ 开发人员 ")
133
  text = gr.Interface(fn=textGPT, inputs="text", outputs="text")
134
+ #audio = gr.Interface(fn=audioGPT, inputs=gr.Audio(source="microphone", type="filepath"), outputs="text")
135
+ #siri = gr.Interface(fn=siriGPT, inputs=gr.Audio(source="microphone", type="filepath"), outputs = "audio")
136
  file = gr.Interface(fn=fileGPT, inputs=["text", "file"], outputs="text", description = "Enter prompt sentences and your PDF. e.g. lets think step by step, summarize this following text: 或者 让我们一步一步地思考,总结以下的内容:")
137
+ demo = gr.TabbedInterface([role, text, , file, chatHistory], [ "roleChoice", "chatGPT", "fileGPT", "ChatHistory"])
138
 
139
  if __name__ == "__main__":
140
  demo.launch(enable_queue=False, auth=lambda u, p: user_db.get(u) == p,