Spaces:
Runtime error
Runtime error
update layout and fixed chatgpt inference code
Browse files
app.py
CHANGED
@@ -33,7 +33,7 @@ def predict_chatgpt(inputs, top_p_chatgpt, temperature_chatgpt, openai_api_key,
|
|
33 |
#Handling the different roles for ChatGPT
|
34 |
if chat_counter_chatgpt != 0 :
|
35 |
messages=[]
|
36 |
-
for data in
|
37 |
temp1 = {}
|
38 |
temp1["role"] = "user"
|
39 |
temp1["content"] = data[0]
|
@@ -75,7 +75,7 @@ def predict_chatgpt(inputs, top_p_chatgpt, temperature_chatgpt, openai_api_key,
|
|
75 |
if chunk.decode() :
|
76 |
chunk = chunk.decode()
|
77 |
# decode each line as response data is in bytes
|
78 |
-
if len(chunk) > 12 and "
|
79 |
partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"]
|
80 |
if token_counter == 0:
|
81 |
history.append(" " + partial_words)
|
@@ -163,7 +163,8 @@ def reset_chat(chatbot, state):
|
|
163 |
return None, []
|
164 |
|
165 |
|
166 |
-
title = """<h1 align="center"
|
|
|
167 |
description = """Language models can be conditioned to act like dialogue agents through a conversational prompt that typically takes the form:
|
168 |
```
|
169 |
User: <utterance>
|
@@ -186,10 +187,10 @@ with gr.Blocks(css="""#col_container {width: 1000px; margin-left: auto; margin-r
|
|
186 |
with gr.Row():
|
187 |
with gr.Column(scale=13):
|
188 |
openai_api_key = gr.Textbox(type='password', label="Enter your OpenAI API key here for ChatGPT")
|
189 |
-
inputs = gr.Textbox(placeholder="Hi there!", label="Type an input and press Enter" )
|
190 |
with gr.Column(scale=1):
|
191 |
-
|
192 |
-
b2 = gr.Button('Clear up Chatbots!', elem_id = 'clear')
|
193 |
state_chatgpt = gr.State([])
|
194 |
state_together = gr.State([])
|
195 |
|
@@ -200,7 +201,7 @@ with gr.Blocks(css="""#col_container {width: 1000px; margin-left: auto; margin-r
|
|
200 |
|
201 |
with gr.Column(scale=2, elem_id='parameters'):
|
202 |
with gr.Box():
|
203 |
-
gr.HTML("Parameters for #
|
204 |
top_p = gr.Slider(minimum=-0, maximum=1.0,value=0.95, step=0.05,interactive=True, label="Top-p",)
|
205 |
temperature = gr.Slider(minimum=-0, maximum=5.0, value=0.5, step=0.1, interactive=True, label="Temperature", )
|
206 |
top_k = gr.Slider( minimum=1, maximum=50, value=4, step=1, interactive=True, label="Top-k",)
|
@@ -225,9 +226,15 @@ with gr.Blocks(css="""#col_container {width: 1000px; margin-left: auto; margin-r
|
|
225 |
inputs.submit( predict_together,
|
226 |
[temp_textbox_together, inputs, top_p, temperature, top_k, repetition_penalty, watermark, chatbot_together, state_together, ],
|
227 |
[chatbot_together, state_together],)
|
|
|
|
|
|
|
|
|
|
|
|
|
228 |
|
229 |
b2.click(reset_chat, [chatbot_chatgpt, state_chatgpt], [chatbot_chatgpt, state_chatgpt])
|
230 |
b2.click(reset_chat, [chatbot_together, state_together], [chatbot_together, state_together])
|
231 |
|
232 |
gr.Markdown(description)
|
233 |
-
demo.queue(concurrency_count=16).launch(height= 2500, debug=True)
|
|
|
33 |
#Handling the different roles for ChatGPT
|
34 |
if chat_counter_chatgpt != 0 :
|
35 |
messages=[]
|
36 |
+
for data in chatbot_chatgpt:
|
37 |
temp1 = {}
|
38 |
temp1["role"] = "user"
|
39 |
temp1["content"] = data[0]
|
|
|
75 |
if chunk.decode() :
|
76 |
chunk = chunk.decode()
|
77 |
# decode each line as response data is in bytes
|
78 |
+
if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]["delta"]:
|
79 |
partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"]
|
80 |
if token_counter == 0:
|
81 |
history.append(" " + partial_words)
|
|
|
163 |
return None, []
|
164 |
|
165 |
|
166 |
+
title = """<h1 align="center">🔥🔥Comparison: ChatGPT & OpenChatKit </h1><br>
|
167 |
+
<h3 align="center">🚀A Gradio Streaming Demo</h3>"""
|
168 |
description = """Language models can be conditioned to act like dialogue agents through a conversational prompt that typically takes the form:
|
169 |
```
|
170 |
User: <utterance>
|
|
|
187 |
with gr.Row():
|
188 |
with gr.Column(scale=13):
|
189 |
openai_api_key = gr.Textbox(type='password', label="Enter your OpenAI API key here for ChatGPT")
|
190 |
+
inputs = gr.Textbox(placeholder="Hi there!", label="Type an input and press Enter ⤵️ " )
|
191 |
with gr.Column(scale=1):
|
192 |
+
b1 = gr.Button(elem_id = '🏃Run')
|
193 |
+
b2 = gr.Button('🔄Clear up Chatbots!', elem_id = 'clear') #.style(full_width=True)
|
194 |
state_chatgpt = gr.State([])
|
195 |
state_together = gr.State([])
|
196 |
|
|
|
201 |
|
202 |
with gr.Column(scale=2, elem_id='parameters'):
|
203 |
with gr.Box():
|
204 |
+
gr.HTML("Parameters for #OpenCHAtKit")
|
205 |
top_p = gr.Slider(minimum=-0, maximum=1.0,value=0.95, step=0.05,interactive=True, label="Top-p",)
|
206 |
temperature = gr.Slider(minimum=-0, maximum=5.0, value=0.5, step=0.1, interactive=True, label="Temperature", )
|
207 |
top_k = gr.Slider( minimum=1, maximum=50, value=4, step=1, interactive=True, label="Top-k",)
|
|
|
226 |
inputs.submit( predict_together,
|
227 |
[temp_textbox_together, inputs, top_p, temperature, top_k, repetition_penalty, watermark, chatbot_together, state_together, ],
|
228 |
[chatbot_together, state_together],)
|
229 |
+
b1.click( predict_chatgpt,
|
230 |
+
[inputs, top_p_chatgpt, temperature_chatgpt, openai_api_key, chat_counter_chatgpt, chatbot_chatgpt, state_chatgpt],
|
231 |
+
[chatbot_chatgpt, state_chatgpt, chat_counter_chatgpt],)
|
232 |
+
b1.click( predict_together,
|
233 |
+
[temp_textbox_together, inputs, top_p, temperature, top_k, repetition_penalty, watermark, chatbot_together, state_together, ],
|
234 |
+
[chatbot_together, state_together],)
|
235 |
|
236 |
b2.click(reset_chat, [chatbot_chatgpt, state_chatgpt], [chatbot_chatgpt, state_chatgpt])
|
237 |
b2.click(reset_chat, [chatbot_together, state_together], [chatbot_together, state_together])
|
238 |
|
239 |
gr.Markdown(description)
|
240 |
+
demo.queue(concurrency_count=16).launch(height= 2500, debug=True)
|