winglian commited on
Commit
ed16925
·
1 Parent(s): 2b019dd

cleanup instruct app. add queue so UI notifies users

Browse files
Files changed (2) hide show
  1. chat.py +3 -5
  2. app.py → instruct.py +7 -8
chat.py CHANGED
@@ -65,9 +65,7 @@ start_message = """
65
  """
66
 
67
 
68
- blocks = gr.Blocks()
69
-
70
- with blocks:
71
  gr.Markdown("# GGML Spaces Demo")
72
 
73
  chatbot = gr.Chatbot()
@@ -107,12 +105,12 @@ with blocks:
107
  clear.click(lambda: None, None, chatbot, queue=False)
108
 
109
  submit_click_event = submit.click(
110
- fn=user, inputs=[message, chat_history_state], outputs=[message, chat_history_state], queue=False
111
  ).then(
112
  fn=chat, inputs=[chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repeat_penalty], outputs=[chatbot, chat_history_state], queue=True
113
  )
114
  message_submit_event = message.submit(
115
- fn=user, inputs=[message, chat_history_state], outputs=[message, chat_history_state], queue=False
116
  ).then(
117
  fn=chat, inputs=[chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repeat_penalty], outputs=[chatbot, chat_history_state], queue=True
118
  )
 
65
  """
66
 
67
 
68
+ with gr.Blocks() as blocks:
 
 
69
  gr.Markdown("# GGML Spaces Demo")
70
 
71
  chatbot = gr.Chatbot()
 
105
  clear.click(lambda: None, None, chatbot, queue=False)
106
 
107
  submit_click_event = submit.click(
108
+ fn=user, inputs=[message, chat_history_state], outputs=[message, chat_history_state], queue=True
109
  ).then(
110
  fn=chat, inputs=[chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repeat_penalty], outputs=[chatbot, chat_history_state], queue=True
111
  )
112
  message_submit_event = message.submit(
113
+ fn=user, inputs=[message, chat_history_state], outputs=[message, chat_history_state], queue=True
114
  ).then(
115
  fn=chat, inputs=[chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repeat_penalty], outputs=[chatbot, chat_history_state], queue=True
116
  )
app.py → instruct.py RENAMED
@@ -23,11 +23,10 @@ description = f"""llama.cpp implementation in python [https://github.com/abetlen
23
  This is the {config["repo"]}/{config["file"]} model.
24
  """
25
 
26
- examples = [
27
- ["Tell me a joke about old houses.", "Why did the old house break up with the new house? Because it was too modern!"],
28
- ["What is the square root of 64?", "The square root of 64 is 8."],
29
- ["Insult me", ""],
30
- ]
31
-
32
- gr.Interface(fn=generate_text, inputs=input_text, outputs=output_text, title="Llama Language Model", description=description, examples=examples).launch()
33
-
 
23
  This is the {config["repo"]}/{config["file"]} model.
24
  """
25
 
26
+ gr.Interface(
27
+ fn=generate_text,
28
+ inputs=input_text,
29
+ outputs=output_text,
30
+ title="Llama Language Model",
31
+ description=description,
32
+ ).queue(max_size=16, concurrency_count=1).launch()