File size: 3,998 Bytes
27ac102
1e10170
b2920f0
4f8729c
 
 
394e7c0
4f8729c
39874b2
1e10170
cc992a1
6d71514
1e10170
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4f8729c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
394e7c0
 
 
 
4f8729c
394e7c0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4f8729c
c213287
 
1bbdf9c
a894004
1bbdf9c
 
a6ed2d9
 
 
f08a6e0
 
 
 
 
 
1e10170
 
a6ed2d9
9f6e492
0ba7a96
9f6e492
 
a6ed2d9
dce545a
c94db43
dce545a
a6ed2d9
1e10170
 
 
 
4f8729c
394e7c0
 
1e10170
 
f56fb57
036e9e5
1e10170
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
import os
import gradio as gr
import openai

from textblob import TextBlob
from gtts import gTTS
from pdfminer.high_level import extract_text

openai.api_key = os.environ['OPENAI_API_KEY']

user_db = {os.environ['username']: os.environ['password']}

messages = [{"role": "system", "content": 'You are a helpful technology assistant.'}]

def audioGPT(audio):
    global messages 

    audio_file = open(audio, "rb")
    transcript = openai.Audio.transcribe("whisper-1", audio_file)

    messages.append({"role": "user", "content": transcript["text"]})

    response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages)

    system_message = response["choices"][0]["message"]
    messages.append(system_message)

    chats = ""
    for msg in messages:
        if msg['role'] != 'system':
            chats += msg['role'] + ": " + msg['content'] + "\n\n"

    return chats


def textGPT(text):
    global messages

    messages.append({"role": "user", "content": text})

    response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages)

    system_message = response["choices"][0]["message"]
    messages.append(system_message)

    chats = ""
    for msg in messages:
        if msg['role'] != 'system':
            chats += msg['role'] + ": " + msg['content'] + "\n\n"

    return chats


def siriGPT(audio):
    global messages 

    audio_file = open(audio, "rb")
    transcript = openai.Audio.transcribe("whisper-1", audio_file)

    messages.append({"role": "user", "content": transcript["text"]})

    response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages)

    system_message = response["choices"][0]["message"]
    messages.append(system_message)

    txblob = TextBlob(system_message)
    lang = txblob.detect_language()

    narrate_ans = gTTS(text=system_message, lang=lang, slow=False) 
    narrate_ans.save("narrate.wav") 

    return "narrate.wav"
    

def fileGPT(prompt, file_obj):
    global messages 

    file_text = extract_text(file_obj.name)
    text = prompt.append(file_text)
    
    messages.append({"role": "user", "content": text})
    
    response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages)

    system_message = response["choices"][0]["message"]
    messages.append(system_message)

    chats = ""
    for msg in messages:
        if msg['role'] != 'system':
            chats += msg['role'] + ": " + msg['content'] + "\n\n"

    return chats



def clear():
    global messages
    messages = [{"role": "system", "content": 'You are a helpful technology assistant.'}]
    return
    
def show():
    global messages
    chats = ""
    for msg in messages:
        if msg['role'] != 'system':
            chats += msg['role'] + ": " + msg['content'] + "\n\n"

    return chats


with gr.Blocks() as chatHistory:
    gr.Markdown("Click the Clear button below to remove all the chat history.")
    clear_btn = gr.Button("Clear")
    clear_btn.click(fn=clear, inputs=None, outputs=None, queue=False)

    gr.Markdown("Click the Display button below to show all the chat history.")
    show_out = gr.Textbox()
    show_btn = gr.Button("Display")
    show_btn.click(fn=show, inputs=None, outputs=show_out, queue=False)



text = gr.Interface(fn=textGPT, inputs="text", outputs="text")
audio = gr.Interface(fn=audioGPT, inputs=gr.Audio(source="microphone", type="filepath"), outputs="text")
siri = gr.Interface(fn=siriGPT, inputs=gr.Audio(source="microphone", type="filepath"), outputs = "audio")
file = gr.Interface(fn=fileGPT, inputs=["text", "file"], outputs="text", description = 'Enter prompt and your PDF, e.g. let's think step by step, summarize this following text:')
demo = gr.TabbedInterface([text, audio, siri, file, chatHistory], [ "chatGPT", "audioGPT", "siriGPT", "fileGPT", "ChatHistory"])

if __name__ == "__main__":
    demo.launch(enable_queue=False, auth=lambda u, p: user_db.get(u) == p,
        auth_message="Welcome to Yichuan GPT!")
    #demo.launch()