Update app.py
Browse files
app.py
CHANGED
@@ -15,12 +15,6 @@ def make_prediction(prompt, max_tokens=None, temperature=None, top_p=None, top_k
|
|
15 |
for chunk in completion:
|
16 |
yield chunk["choices"][0]["text"]
|
17 |
|
18 |
-
def delay_typer(words, delay=0.8):
|
19 |
-
tokens = re.findall(r'\s*\S+\s*', words)
|
20 |
-
for s in tokens:
|
21 |
-
yield s
|
22 |
-
sleep(delay)
|
23 |
-
|
24 |
|
25 |
def clear_chat(chat_history_state, chat_message):
|
26 |
chat_history_state = []
|
@@ -94,16 +88,17 @@ with gr.Blocks(css=CSS) as demo:
|
|
94 |
submit = gr.Button(value="Send message", variant="secondary").style(full_width=True)
|
95 |
clear = gr.Button(value="New topic", variant="secondary").style(full_width=False)
|
96 |
stop = gr.Button(value="Stop", variant="secondary").style(full_width=False)
|
97 |
-
with gr.
|
98 |
-
with gr.
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
|
|
107 |
|
108 |
chat_history_state = gr.State()
|
109 |
clear.click(clear_chat, inputs=[chat_history_state, message], outputs=[chat_history_state, message], queue=False)
|
|
|
15 |
for chunk in completion:
|
16 |
yield chunk["choices"][0]["text"]
|
17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
def clear_chat(chat_history_state, chat_message):
|
20 |
chat_history_state = []
|
|
|
88 |
submit = gr.Button(value="Send message", variant="secondary").style(full_width=True)
|
89 |
clear = gr.Button(value="New topic", variant="secondary").style(full_width=False)
|
90 |
stop = gr.Button(value="Stop", variant="secondary").style(full_width=False)
|
91 |
+
with gr.Accordion("Show Model Parameters"):
|
92 |
+
with gr.Row():
|
93 |
+
with gr.Column():
|
94 |
+
max_tokens = gr.Slider(20, 1000, label="Max Tokens", step=20, value=500)
|
95 |
+
temperature = gr.Slider(0.2, 2.0, label="Temperature", step=0.1, value=0.8)
|
96 |
+
top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.95)
|
97 |
+
top_k = gr.Slider(0, 100, label="Top K", step=1, value=40)
|
98 |
+
repetition_penalty = gr.Slider(0.0, 2.0, label="Repetition Penalty", step=0.1, value=1.1)
|
99 |
+
|
100 |
+
system_msg = gr.Textbox(
|
101 |
+
start_message, label="System Message", interactive=True, visible=True, placeholder="System prompt. Provide instructions which you want the model to remember.", lines=5)
|
102 |
|
103 |
chat_history_state = gr.State()
|
104 |
clear.click(clear_chat, inputs=[chat_history_state, message], outputs=[chat_history_state, message], queue=False)
|