jozzy commited on
Commit
4f8729c
·
1 Parent(s): dce545a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -1
app.py CHANGED
@@ -1,6 +1,10 @@
1
  import os
2
  import gradio as gr
3
  import openai
 
 
 
 
4
  openai.api_key = os.environ['OPENAI_API_KEY']
5
 
6
  user_db = {os.environ['username']: os.environ['password']}
@@ -46,6 +50,28 @@ def textGPT(text):
46
  return chats
47
 
48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
 
50
 
51
  def clear():
@@ -77,7 +103,8 @@ with gr.Blocks() as chatHistory:
77
 
78
  text = gr.Interface(fn=textGPT, inputs="text", outputs="text")
79
  audio = gr.Interface(fn=audioGPT, inputs=gr.Audio(source="microphone", type="filepath"), outputs="text")
80
- demo = gr.TabbedInterface([text, audio, chatHistory], [ "chatGPT", "audioGPT", "ChatHistory"])
 
81
 
82
  if __name__ == "__main__":
83
  demo.launch(enable_queue=False, auth=lambda u, p: user_db.get(u) == p,
 
1
  import os
2
  import gradio as gr
3
  import openai
4
+
5
+ from textblob import TextBlob
6
+ from gtts import gTTS
7
+
8
  openai.api_key = os.environ['OPENAI_API_KEY']
9
 
10
  user_db = {os.environ['username']: os.environ['password']}
 
50
  return chats
51
 
52
 
53
+ def siriGPT(audio):
54
+ global messages
55
+
56
+ audio_file = open(audio, "rb")
57
+ transcript = openai.Audio.transcribe("whisper-1", audio_file)
58
+
59
+ messages.append({"role": "user", "content": transcript["text"]})
60
+
61
+ response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages)
62
+
63
+ system_message = response["choices"][0]["message"]
64
+ messages.append(system_message)
65
+
66
+ txblob = TextBlob(system_message)
67
+ lang = txblob.detect_language()
68
+
69
+ narrate_ans = gTTS(text=system_message, lang=lang, slow=False)
70
+ narrate_ans.save("narrate.wav")
71
+
72
+ return "narrate.wav"
73
+
74
+
75
 
76
 
77
  def clear():
 
103
 
104
  text = gr.Interface(fn=textGPT, inputs="text", outputs="text")
105
  audio = gr.Interface(fn=audioGPT, inputs=gr.Audio(source="microphone", type="filepath"), outputs="text")
106
+ siri = gr.Interface(fn=siriGPT, inputs=gr.Audio(source="microphone", type="filepath"), outputs = "audio")
107
+ demo = gr.TabbedInterface([text, audio, siri, chatHistory], [ "chatGPT", "audioGPT", "siriGPT", "ChatHistory"])
108
 
109
  if __name__ == "__main__":
110
  demo.launch(enable_queue=False, auth=lambda u, p: user_db.get(u) == p,