winglian commited on
Commit
ce7dde7
·
1 Parent(s): 988bc04

add prediction settings to ui

Browse files
Files changed (2) hide show
  1. chat.py +30 -13
  2. config.yml +0 -1
chat.py CHANGED
@@ -28,7 +28,7 @@ def user(message, history):
28
  return "", history
29
 
30
 
31
- def chat(history, system_message):
32
  history = history or []
33
 
34
  messages = system_message + \
@@ -36,7 +36,17 @@ def chat(history, system_message):
36
  for item in history])
37
 
38
  history[-1][1] = ""
39
- for output in llm(messages, echo=False, stream=True, **config['chat']):
 
 
 
 
 
 
 
 
 
 
40
  answer = output['choices'][0]['text']
41
  history[-1][1] += answer
42
 
@@ -70,6 +80,22 @@ with blocks:
70
  submit = gr.Button(value="Send message", variant="secondary").style(full_width=True)
71
  clear = gr.Button(value="New topic", variant="secondary").style(full_width=False)
72
  stop = gr.Button(value="Stop", variant="secondary").style(full_width=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
 
74
  system_msg = gr.Textbox(
75
  start_message, label="System Message", interactive=False, visible=False)
@@ -81,22 +107,13 @@ with blocks:
81
  submit_click_event = submit.click(
82
  fn=user, inputs=[message, chat_history_state], outputs=[message, chat_history_state], queue=False
83
  ).then(
84
- fn=chat, inputs=[chat_history_state, system_msg], outputs=[chatbot, chat_history_state], queue=True
85
  )
86
  message_submit_event = message.submit(
87
  fn=user, inputs=[message, chat_history_state], outputs=[message, chat_history_state], queue=False
88
  ).then(
89
- fn=chat, inputs=[chat_history_state, system_msg], outputs=[chatbot, chat_history_state], queue=True
90
  )
91
  stop.click(fn=None, inputs=None, outputs=None, cancels=[submit_click_event, message_submit_event], queue=False)
92
 
93
- gr.Markdown(f"""
94
- - This is the [{config["repo"]}](https://huggingface.co/{config["repo"]}) model file [{config["file"]}](https://huggingface.co/{config["repo"]}/blob/main/{config["file"]})
95
- - This Space uses GGML with GPU support, so it can run larger models on smaller GPUs & VRAM quickly.
96
- - This is running on a smaller, shared GPU, so it may take a few seconds to respond.
97
- - [Duplicate the Space](https://huggingface.co/spaces/openaccess-ai-collective/ggml-ui?duplicate=true) to skip the queue and run in a private space or to use your own GGML models.
98
- - When using your own models, simply update the [config.yml](https://huggingface.co/spaces/openaccess-ai-collective/ggml-ui/blob/main/config.yml)")
99
- - Contribute at [https://github.com/OpenAccess-AI-Collective/ggml-webui](https://github.com/OpenAccess-AI-Collective/ggml-webui)
100
- """)
101
-
102
  blocks.queue(max_size=32, concurrency_count=4).launch(debug=True, server_name="0.0.0.0", server_port=7860)
 
28
  return "", history
29
 
30
 
31
+ def chat(history, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty):
32
  history = history or []
33
 
34
  messages = system_message + \
 
36
  for item in history])
37
 
38
  history[-1][1] = ""
39
+ for output in llm(
40
+ messages,
41
+ echo=False,
42
+ stream=True,
43
+ max_tokens=max_tokens,
44
+ temperature=temperature,
45
+ top_p=top_p,
46
+ top_k=top_k,
47
+ repeat_penalty=repeat_penalty,
48
+ **config['chat']
49
+ ):
50
  answer = output['choices'][0]['text']
51
  history[-1][1] += answer
52
 
 
80
  submit = gr.Button(value="Send message", variant="secondary").style(full_width=True)
81
  clear = gr.Button(value="New topic", variant="secondary").style(full_width=False)
82
  stop = gr.Button(value="Stop", variant="secondary").style(full_width=False)
83
+ with gr.Row():
84
+ with gr.Column():
85
+ gr.Markdown(f"""
86
+ - This is the [{config["repo"]}](https://huggingface.co/{config["repo"]}) model file [{config["file"]}](https://huggingface.co/{config["repo"]}/blob/main/{config["file"]})
87
+ - This Space uses GGML with GPU support, so it can run larger models on smaller GPUs & VRAM quickly.
88
+ - This is running on a smaller, shared GPU, so it may take a few seconds to respond.
89
+ - [Duplicate the Space](https://huggingface.co/spaces/openaccess-ai-collective/ggml-ui?duplicate=true) to skip the queue and run in a private space or to use your own GGML models.
90
+ - When using your own models, simply update the [config.yml](https://huggingface.co/spaces/openaccess-ai-collective/ggml-ui/blob/main/config.yml)")
91
+ - Contribute at [https://github.com/OpenAccess-AI-Collective/ggml-webui](https://github.com/OpenAccess-AI-Collective/ggml-webui)
92
+ """)
93
+ with gr.Column():
94
+ max_tokens = gr.Slider(20, 1000, label="Max Tokens", step=20, value=300)
95
+ temperature = gr.Slider(0.2, 2.0, label="Temperature", step=0.1, value=0.2)
96
+ top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.95)
97
+ top_k = gr.Slider(0, 100, label="Top L", step=1, value=40)
98
+ repeat_penalty = gr.Slider(0.0, 2.0, label="Repetition Penalty", step=0.1, value=1.1)
99
 
100
  system_msg = gr.Textbox(
101
  start_message, label="System Message", interactive=False, visible=False)
 
107
  submit_click_event = submit.click(
108
  fn=user, inputs=[message, chat_history_state], outputs=[message, chat_history_state], queue=False
109
  ).then(
110
+ fn=chat, inputs=[chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repeat_penalty], outputs=[chatbot, chat_history_state], queue=True
111
  )
112
  message_submit_event = message.submit(
113
  fn=user, inputs=[message, chat_history_state], outputs=[message, chat_history_state], queue=False
114
  ).then(
115
+ fn=chat, inputs=[chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repeat_penalty], outputs=[chatbot, chat_history_state], queue=True
116
  )
117
  stop.click(fn=None, inputs=None, outputs=None, cancels=[submit_click_event, message_submit_event], queue=False)
118
 
 
 
 
 
 
 
 
 
 
119
  blocks.queue(max_size=32, concurrency_count=4).launch(debug=True, server_name="0.0.0.0", server_port=7860)
config.yml CHANGED
@@ -7,7 +7,6 @@ llama_cpp:
7
  n_ctx: 2048
8
  n_gpu_layers: 40 # llama 13b has 40 layers
9
  chat:
10
- max_tokens: 1024
11
  stop:
12
  - "</s>"
13
  - "<unk>"
 
7
  n_ctx: 2048
8
  n_gpu_layers: 40 # llama 13b has 40 layers
9
  chat:
 
10
  stop:
11
  - "</s>"
12
  - "<unk>"