Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
openai.api_key=os.getenv('openai_appkey')
|
2 |
+
# doc is here https://platform.openai.com/docs/guides/chat/chat-vs-completions?utm_medium=email&_hsmi=248334739&utm_content=248334739&utm_source=hs_email
|
3 |
+
def get_chatgpt(input_text):
|
4 |
+
chat_completion = openai.ChatCompletion.create(
|
5 |
+
model="gpt-3.5-turbo",
|
6 |
+
messages=[
|
7 |
+
# system message first, it helps set the behavior of the assistant
|
8 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
9 |
+
# I am the user, and this is my prompt
|
10 |
+
{"role": "user", "content": input_text},
|
11 |
+
# we can also add the previous conversation
|
12 |
+
# {"role": "assistant", "content": "Episode III."},
|
13 |
+
],
|
14 |
+
)
|
15 |
+
# let's see the reply
|
16 |
+
#print(chat_completion.choices[0].message.content)
|
17 |
+
return chat_completion.choices[0].message.content
|
18 |
+
with gr.Blocks(
|
19 |
+
css="""
|
20 |
+
.message.svelte-w6rprc.svelte-w6rprc.svelte-w6rprc {font-size: 20px; margin-top: 20px}
|
21 |
+
#component-21 > div.wrap.svelte-w6rprc {height: 600px;}
|
22 |
+
"""
|
23 |
+
) as iface:
|
24 |
+
state = gr.State([])
|
25 |
+
#caption_output = None
|
26 |
+
#gr.Markdown(title)
|
27 |
+
#gr.Markdown(description)
|
28 |
+
#gr.Markdown(article)
|
29 |
+
|
30 |
+
with gr.Row():
|
31 |
+
with gr.Column(scale=1):
|
32 |
+
with gr.Row():
|
33 |
+
with gr.Column(scale=1):
|
34 |
+
chat_input = gr.Textbox(lines=1, label="VQA Quesiton Input")
|
35 |
+
with gr.Row():
|
36 |
+
clear_button = gr.Button(value="Clear", interactive=True)
|
37 |
+
submit_button = gr.Button(
|
38 |
+
value="VQA", interactive=True, variant="primary"
|
39 |
+
)
|
40 |
+
|
41 |
+
with gr.Column():
|
42 |
+
caption_output_v1 = gr.Textbox(lines=0, label="CAP+LLM")
|
43 |
+
|
44 |
+
|
45 |
+
image_input.change(
|
46 |
+
lambda: ( [],""),
|
47 |
+
[],
|
48 |
+
[ state,caption_output_v1],
|
49 |
+
queue=False,
|
50 |
+
)
|
51 |
+
|
52 |
+
submit_button.click(
|
53 |
+
get_chatgpt,
|
54 |
+
[
|
55 |
+
chat_input,
|
56 |
+
],
|
57 |
+
[caption_output,gpt3_output_v1,caption_output_v1],
|
58 |
+
)
|
59 |
+
|
60 |
+
examples=[['bird.jpeg',"How many birds are there in the tree?"]]
|
61 |
+
examples = gr.Examples(
|
62 |
+
examples=examples,
|
63 |
+
inputs=[image_input, chat_input],
|
64 |
+
)
|
65 |
+
|
66 |
+
iface.queue(concurrency_count=1, api_open=False, max_size=10)
|
67 |
+
iface.launch(enable_queue=True)
|