jozzy commited on
Commit
394e7c0
·
1 Parent(s): 48adc09

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -1
app.py CHANGED
@@ -4,6 +4,7 @@ import openai
4
 
5
  from textblob import TextBlob
6
  from gtts import gTTS
 
7
 
8
  openai.api_key = os.environ['OPENAI_API_KEY']
9
 
@@ -70,7 +71,27 @@ def siriGPT(audio):
70
  narrate_ans.save("narrate.wav")
71
 
72
  return "narrate.wav"
 
 
 
 
73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
 
75
 
76
 
@@ -104,7 +125,8 @@ with gr.Blocks() as chatHistory:
104
  text = gr.Interface(fn=textGPT, inputs="text", outputs="text")
105
  audio = gr.Interface(fn=audioGPT, inputs=gr.Audio(source="microphone", type="filepath"), outputs="text")
106
  siri = gr.Interface(fn=siriGPT, inputs=gr.Audio(source="microphone", type="filepath"), outputs = "audio")
107
- demo = gr.TabbedInterface([text, audio, siri, chatHistory], [ "chatGPT", "audioGPT", "siriGPT", "ChatHistory"])
 
108
 
109
  if __name__ == "__main__":
110
  demo.launch(enable_queue=False, auth=lambda u, p: user_db.get(u) == p,
 
4
 
5
  from textblob import TextBlob
6
  from gtts import gTTS
7
+ from pdfminer.high_level import extract_text
8
 
9
  openai.api_key = os.environ['OPENAI_API_KEY']
10
 
 
71
  narrate_ans.save("narrate.wav")
72
 
73
  return "narrate.wav"
74
+
75
+
76
+ def fileGPT(prompt, file_obj):
77
+ global messages
78
 
79
+ file_text = extract_text(file_obj.name)
80
+ text = prompt.append(file_text)
81
+
82
+ messages.append({"role": "user", "content": text})
83
+
84
+ response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages)
85
+
86
+ system_message = response["choices"][0]["message"]
87
+ messages.append(system_message)
88
+
89
+ chats = ""
90
+ for msg in messages:
91
+ if msg['role'] != 'system':
92
+ chats += msg['role'] + ": " + msg['content'] + "\n\n"
93
+
94
+ return chats
95
 
96
 
97
 
 
125
  text = gr.Interface(fn=textGPT, inputs="text", outputs="text")
126
  audio = gr.Interface(fn=audioGPT, inputs=gr.Audio(source="microphone", type="filepath"), outputs="text")
127
  siri = gr.Interface(fn=siriGPT, inputs=gr.Audio(source="microphone", type="filepath"), outputs = "audio")
128
+ file = gr.Interface(fn=fileGPT, inputs=["text", "file"], outputs="text", description = 'Enter prompt and your PDF, e.g. let's think step by step, summarize this following text:')
129
+ demo = gr.TabbedInterface([text, audio, siri, file, chatHistory], [ "chatGPT", "audioGPT", "siriGPT", "fileGPT", "ChatHistory"])
130
 
131
  if __name__ == "__main__":
132
  demo.launch(enable_queue=False, auth=lambda u, p: user_db.get(u) == p,