Spaces:
Sleeping
Sleeping
Tuchuanhuhuhu
commited on
Commit
·
12a17d8
1
Parent(s):
3d66128
为XMChat加入点赞/点踩的功能
Browse files- ChuanhuChatbot.py +21 -1
- modules/base_model.py +10 -0
- modules/models.py +25 -0
- modules/utils.py +14 -2
ChuanhuChatbot.py
CHANGED
|
@@ -67,6 +67,11 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
| 67 |
retryBtn = gr.Button(i18n("🔄 重新生成"))
|
| 68 |
delFirstBtn = gr.Button(i18n("🗑️ 删除最旧对话"))
|
| 69 |
delLastBtn = gr.Button(i18n("🗑️ 删除最新对话"))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 70 |
|
| 71 |
with gr.Column():
|
| 72 |
with gr.Column(min_width=50, scale=1):
|
|
@@ -269,7 +274,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
| 269 |
|
| 270 |
gr.Markdown(CHUANHU_DESCRIPTION, elem_id="description")
|
| 271 |
gr.HTML(FOOTER.format(versions=versions_html()), elem_id="footer")
|
| 272 |
-
demo.load(refresh_ui_elements_on_load, [current_model], [usageTxt], show_progress=False)
|
| 273 |
chatgpt_predict_args = dict(
|
| 274 |
fn=predict,
|
| 275 |
inputs=[
|
|
@@ -361,6 +366,20 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
| 361 |
show_progress=False
|
| 362 |
)
|
| 363 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 364 |
two_column.change(update_doc_config, [two_column], None)
|
| 365 |
|
| 366 |
# LLM Models
|
|
@@ -368,6 +387,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
|
| 368 |
keyTxt.submit(**get_usage_args)
|
| 369 |
single_turn_checkbox.change(set_single_turn, [current_model, single_turn_checkbox], None)
|
| 370 |
model_select_dropdown.change(get_model, [model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider, top_p_slider, systemPromptTxt], [current_model, status_display, lora_select_dropdown], show_progress=True)
|
|
|
|
| 371 |
lora_select_dropdown.change(get_model, [model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider, top_p_slider, systemPromptTxt], [current_model, status_display], show_progress=True)
|
| 372 |
|
| 373 |
# Template
|
|
|
|
| 67 |
retryBtn = gr.Button(i18n("🔄 重新生成"))
|
| 68 |
delFirstBtn = gr.Button(i18n("🗑️ 删除最旧对话"))
|
| 69 |
delLastBtn = gr.Button(i18n("🗑️ 删除最新对话"))
|
| 70 |
+
with gr.Row(visible=False) as like_dislike_area:
|
| 71 |
+
with gr.Column(min_width=20, scale=1):
|
| 72 |
+
likeBtn = gr.Button(i18n("👍"))
|
| 73 |
+
with gr.Column(min_width=20, scale=1):
|
| 74 |
+
dislikeBtn = gr.Button(i18n("👎"))
|
| 75 |
|
| 76 |
with gr.Column():
|
| 77 |
with gr.Column(min_width=50, scale=1):
|
|
|
|
| 274 |
|
| 275 |
gr.Markdown(CHUANHU_DESCRIPTION, elem_id="description")
|
| 276 |
gr.HTML(FOOTER.format(versions=versions_html()), elem_id="footer")
|
| 277 |
+
demo.load(refresh_ui_elements_on_load, [current_model, model_select_dropdown], [usageTxt, like_dislike_area], show_progress=False)
|
| 278 |
chatgpt_predict_args = dict(
|
| 279 |
fn=predict,
|
| 280 |
inputs=[
|
|
|
|
| 366 |
show_progress=False
|
| 367 |
)
|
| 368 |
|
| 369 |
+
likeBtn.click(
|
| 370 |
+
like,
|
| 371 |
+
[current_model],
|
| 372 |
+
[status_display],
|
| 373 |
+
show_progress=False
|
| 374 |
+
)
|
| 375 |
+
|
| 376 |
+
dislikeBtn.click(
|
| 377 |
+
dislike,
|
| 378 |
+
[current_model],
|
| 379 |
+
[status_display],
|
| 380 |
+
show_progress=False
|
| 381 |
+
)
|
| 382 |
+
|
| 383 |
two_column.change(update_doc_config, [two_column], None)
|
| 384 |
|
| 385 |
# LLM Models
|
|
|
|
| 387 |
keyTxt.submit(**get_usage_args)
|
| 388 |
single_turn_checkbox.change(set_single_turn, [current_model, single_turn_checkbox], None)
|
| 389 |
model_select_dropdown.change(get_model, [model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider, top_p_slider, systemPromptTxt], [current_model, status_display, lora_select_dropdown], show_progress=True)
|
| 390 |
+
model_select_dropdown.change(toggle_like_btn_visibility, [model_select_dropdown], [like_dislike_area], show_progress=False)
|
| 391 |
lora_select_dropdown.change(get_model, [model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider, top_p_slider, systemPromptTxt], [current_model, status_display], show_progress=True)
|
| 392 |
|
| 393 |
# Template
|
modules/base_model.py
CHANGED
|
@@ -549,3 +549,13 @@ class BaseLLMModel:
|
|
| 549 |
except FileNotFoundError:
|
| 550 |
logging.warning(f"{user_name} 没有找到对话历史文件,不执行任何操作")
|
| 551 |
return filename, self.system_prompt, chatbot
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 549 |
except FileNotFoundError:
|
| 550 |
logging.warning(f"{user_name} 没有找到对话历史文件,不执行任何操作")
|
| 551 |
return filename, self.system_prompt, chatbot
|
| 552 |
+
|
| 553 |
+
def like(self):
|
| 554 |
+
"""like the last response, implement if needed
|
| 555 |
+
"""
|
| 556 |
+
return gr.update()
|
| 557 |
+
|
| 558 |
+
def dislike(self):
|
| 559 |
+
"""dislike the last response, implement if needed
|
| 560 |
+
"""
|
| 561 |
+
return gr.update()
|
modules/models.py
CHANGED
|
@@ -99,6 +99,8 @@ class OpenAIClient(BaseLLMModel):
|
|
| 99 |
status_text = STANDARD_ERROR_MSG + READ_TIMEOUT_MSG + ERROR_RETRIEVE_MSG
|
| 100 |
return status_text
|
| 101 |
except Exception as e:
|
|
|
|
|
|
|
| 102 |
logging.error(i18n("获取API使用情况失败:") + str(e))
|
| 103 |
return STANDARD_ERROR_MSG + ERROR_RETRIEVE_MSG
|
| 104 |
|
|
@@ -395,9 +397,11 @@ class XMChat(BaseLLMModel):
|
|
| 395 |
self.image_path = None
|
| 396 |
self.xm_history = []
|
| 397 |
self.url = "https://xmbot.net/web"
|
|
|
|
| 398 |
|
| 399 |
def reset(self):
|
| 400 |
self.session_id = str(uuid.uuid4())
|
|
|
|
| 401 |
return [], "已重置"
|
| 402 |
|
| 403 |
def image_to_base64(self, image_path):
|
|
@@ -444,6 +448,26 @@ class XMChat(BaseLLMModel):
|
|
| 444 |
self.image_bytes = None
|
| 445 |
self.image_path = None
|
| 446 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 447 |
def prepare_inputs(self, real_inputs, use_websearch, files, reply_language, chatbot):
|
| 448 |
fake_inputs = real_inputs
|
| 449 |
display_append = ""
|
|
@@ -479,6 +503,7 @@ class XMChat(BaseLLMModel):
|
|
| 479 |
def get_answer_at_once(self):
|
| 480 |
question = self.history[-1]["content"]
|
| 481 |
conv_id = str(uuid.uuid4())
|
|
|
|
| 482 |
data = {
|
| 483 |
"user_id": self.api_key,
|
| 484 |
"session_id": self.session_id,
|
|
|
|
| 99 |
status_text = STANDARD_ERROR_MSG + READ_TIMEOUT_MSG + ERROR_RETRIEVE_MSG
|
| 100 |
return status_text
|
| 101 |
except Exception as e:
|
| 102 |
+
import traceback
|
| 103 |
+
traceback.print_exc()
|
| 104 |
logging.error(i18n("获取API使用情况失败:") + str(e))
|
| 105 |
return STANDARD_ERROR_MSG + ERROR_RETRIEVE_MSG
|
| 106 |
|
|
|
|
| 397 |
self.image_path = None
|
| 398 |
self.xm_history = []
|
| 399 |
self.url = "https://xmbot.net/web"
|
| 400 |
+
self.last_conv_id = None
|
| 401 |
|
| 402 |
def reset(self):
|
| 403 |
self.session_id = str(uuid.uuid4())
|
| 404 |
+
self.last_conv_id = None
|
| 405 |
return [], "已重置"
|
| 406 |
|
| 407 |
def image_to_base64(self, image_path):
|
|
|
|
| 448 |
self.image_bytes = None
|
| 449 |
self.image_path = None
|
| 450 |
|
| 451 |
+
def like(self):
|
| 452 |
+
if self.last_conv_id is None:
|
| 453 |
+
return "点赞失败,你还没发送过消息"
|
| 454 |
+
data = {
|
| 455 |
+
"uuid": self.last_conv_id,
|
| 456 |
+
"appraise": "good"
|
| 457 |
+
}
|
| 458 |
+
response = requests.post(self.url, json=data)
|
| 459 |
+
return "👍点赞成功,,感谢反馈~"
|
| 460 |
+
|
| 461 |
+
def dislike(self):
|
| 462 |
+
if self.last_conv_id is None:
|
| 463 |
+
return "点踩失败,你还没发送过消息"
|
| 464 |
+
data = {
|
| 465 |
+
"uuid": self.last_conv_id,
|
| 466 |
+
"appraise": "bad"
|
| 467 |
+
}
|
| 468 |
+
response = requests.post(self.url, json=data)
|
| 469 |
+
return "👎点踩成功,感谢反馈~"
|
| 470 |
+
|
| 471 |
def prepare_inputs(self, real_inputs, use_websearch, files, reply_language, chatbot):
|
| 472 |
fake_inputs = real_inputs
|
| 473 |
display_append = ""
|
|
|
|
| 503 |
def get_answer_at_once(self):
|
| 504 |
question = self.history[-1]["content"]
|
| 505 |
conv_id = str(uuid.uuid4())
|
| 506 |
+
self.last_conv_id = conv_id
|
| 507 |
data = {
|
| 508 |
"user_id": self.api_key,
|
| 509 |
"session_id": self.session_id,
|
modules/utils.py
CHANGED
|
@@ -113,6 +113,12 @@ def set_single_turn(current_model, *args):
|
|
| 113 |
def handle_file_upload(current_model, *args):
|
| 114 |
return current_model.handle_file_upload(*args)
|
| 115 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 116 |
|
| 117 |
def count_token(message):
|
| 118 |
encoding = tiktoken.get_encoding("cl100k_base")
|
|
@@ -532,5 +538,11 @@ def get_model_source(model_name, alternative_source):
|
|
| 532 |
if model_name == "gpt2-medium":
|
| 533 |
return "https://huggingface.co/gpt2-medium"
|
| 534 |
|
| 535 |
-
def refresh_ui_elements_on_load(current_model):
|
| 536 |
-
return current_model.billing_info()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 113 |
def handle_file_upload(current_model, *args):
|
| 114 |
return current_model.handle_file_upload(*args)
|
| 115 |
|
| 116 |
+
def like(current_model, *args):
|
| 117 |
+
return current_model.like(*args)
|
| 118 |
+
|
| 119 |
+
def dislike(current_model, *args):
|
| 120 |
+
return current_model.dislike(*args)
|
| 121 |
+
|
| 122 |
|
| 123 |
def count_token(message):
|
| 124 |
encoding = tiktoken.get_encoding("cl100k_base")
|
|
|
|
| 538 |
if model_name == "gpt2-medium":
|
| 539 |
return "https://huggingface.co/gpt2-medium"
|
| 540 |
|
| 541 |
+
def refresh_ui_elements_on_load(current_model, selected_model_name):
|
| 542 |
+
return current_model.billing_info(), toggle_like_btn_visibility(selected_model_name)
|
| 543 |
+
|
| 544 |
+
def toggle_like_btn_visibility(selected_model_name):
|
| 545 |
+
if selected_model_name == "xmchat":
|
| 546 |
+
return gr.update(visible=True)
|
| 547 |
+
else:
|
| 548 |
+
return gr.update(visible=False)
|