File size: 1,545 Bytes
27ac102
1e10170
b2920f0
39874b2
1e10170
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import os
import gradio as gr
import openai
openai.api_key = os.environ['OPENAI_API_KEY']

messages = [{"role": "system", "content": 'You are a helpful technology assistant.'}]

def audioGPT(audio):
    global messages 

    audio_file = open(audio, "rb")
    transcript = openai.Audio.transcribe("whisper-1", audio_file)

    messages.append({"role": "user", "content": transcript["text"]})

    response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages)

    system_message = response["choices"][0]["message"]
    messages.append(system_message)

    chats = ""
    for msg in messages:
        if msg['role'] != 'system':
            chats += msg['role'] + ": " + msg['content'] + "\n\n"

    return chats


def textGPT(text):
    global messages

    messages.append({"role": "user", "content": text})

    response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages)

    system_message = response["choices"][0]["message"]
    messages.append(system_message)

    chats = ""
    for msg in messages:
        if msg['role'] != 'system':
            chats += msg['role'] + ": " + msg['content'] + "\n\n"

    return chats







text = gr.Interface(fn=textGPT, inputs="text", outputs="text")
audio = gr.Interface(fn=audioGPT, inputs=gr.Audio(source="microphone", type="filepath"), outputs="text")
demo = gr.TabbedInterface([text, audio], [ "chatGPT", "audioGPT"])

if __name__ == "__main__":
    demo.launch(share=True, auth=("os.environ['username']", "os.environ['password']"))
    #demo.launch()