Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -30,7 +30,12 @@ def get_messages_formatter_type(model_name):
|
|
30 |
def chat_fn(message, history, model, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty):
|
31 |
history_list = history or []
|
32 |
response_generator = respond(message, history_list, model, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty)
|
33 |
-
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
def respond(
|
36 |
message,
|
@@ -136,7 +141,7 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="violet", secondary_hue="violet"
|
|
136 |
code_background_fill_dark="#292733",
|
137 |
)) as demo:
|
138 |
|
139 |
-
chatbot = gr.Chatbot(scale=1, show_copy_button=True)
|
140 |
message = gr.Textbox(label="Your message")
|
141 |
model_dropdown = gr.Dropdown(
|
142 |
["openbuddy-llama3.2-3b-v23.2-131k-q5_k_m-imat.gguf"],
|
@@ -150,12 +155,8 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="violet", secondary_hue="violet"
|
|
150 |
top_p = gr.Slider(minimum=0.1, maximum=2.0, value=0.9, step=0.05, label="Top-p")
|
151 |
top_k = gr.Slider(minimum=0, maximum=100, value=1, step=1, label="Top-k")
|
152 |
repeat_penalty = gr.Slider(minimum=0.0, maximum=2.0, value=1.1, step=0.1, label="Repetition penalty")
|
153 |
-
|
154 |
history = gr.State([])
|
155 |
|
156 |
-
def chat_fn(message, history, model, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty):
|
157 |
-
return respond(message, history, model, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty)
|
158 |
-
|
159 |
message.submit(chat_fn, [message, history, model_dropdown, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty], [chatbot, history])
|
160 |
|
161 |
gr.Markdown(description)
|
|
|
30 |
def chat_fn(message, history, model, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty):
|
31 |
history_list = history or []
|
32 |
response_generator = respond(message, history_list, model, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty)
|
33 |
+
|
34 |
+
full_response = ""
|
35 |
+
for current_history in response_generator:
|
36 |
+
full_response = current_history[-1][1] # 获取最新的回复
|
37 |
+
yield current_history, history # 在每次迭代中返回当前历史记录和历史记录
|
38 |
+
|
39 |
|
40 |
def respond(
|
41 |
message,
|
|
|
141 |
code_background_fill_dark="#292733",
|
142 |
)) as demo:
|
143 |
|
144 |
+
chatbot = gr.Chatbot(scale=1, show_copy_button=True, type='messages') # 修改 chatbot 类型
|
145 |
message = gr.Textbox(label="Your message")
|
146 |
model_dropdown = gr.Dropdown(
|
147 |
["openbuddy-llama3.2-3b-v23.2-131k-q5_k_m-imat.gguf"],
|
|
|
155 |
top_p = gr.Slider(minimum=0.1, maximum=2.0, value=0.9, step=0.05, label="Top-p")
|
156 |
top_k = gr.Slider(minimum=0, maximum=100, value=1, step=1, label="Top-k")
|
157 |
repeat_penalty = gr.Slider(minimum=0.0, maximum=2.0, value=1.1, step=0.1, label="Repetition penalty")
|
|
|
158 |
history = gr.State([])
|
159 |
|
|
|
|
|
|
|
160 |
message.submit(chat_fn, [message, history, model_dropdown, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty], [chatbot, history])
|
161 |
|
162 |
gr.Markdown(description)
|