Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -70,14 +70,58 @@ def generate_text(message, history):
|
|
70 |
history = ["init", input_prompt]
|
71 |
|
72 |
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
demo.launch(max_threads=10)
|
|
|
70 |
history = ["init", input_prompt]
|
71 |
|
72 |
|
73 |
+
# User message function
|
74 |
+
def user(message, history):
|
75 |
+
new_history = history + [[message, None]]
|
76 |
+
return "", new_history
|
77 |
+
|
78 |
+
# Bot message function
|
79 |
+
def bot(history, system_prompt):
|
80 |
+
tokens = model.tokenize(system_prompt.encode("utf-8"))
|
81 |
+
|
82 |
+
for user_message, bot_message in history[:-1]:
|
83 |
+
message_tokens = model.tokenize(user_message.encode("utf-8"))
|
84 |
+
tokens.extend(message_tokens)
|
85 |
+
|
86 |
+
if bot_message:
|
87 |
+
message_tokens = model.tokenize(bot_message.encode("utf-8"))
|
88 |
+
tokens.extend(message_tokens)
|
89 |
+
|
90 |
+
last_user_message = history[-1][0]
|
91 |
+
message_tokens = model.tokenize(last_user_message.encode("utf-8"))
|
92 |
+
tokens.extend(message_tokens)
|
93 |
+
|
94 |
+
generator = model.generate(tokens)
|
95 |
+
|
96 |
+
partial_text = ""
|
97 |
+
for i, token in enumerate(generator):
|
98 |
+
if token == model.token_eos() or (max_new_tokens is not None and i >= max_new_tokens):
|
99 |
+
break
|
100 |
+
partial_text += model.detokenize([token]).decode("utf-8", "ignore")
|
101 |
+
history[-1][1] = partial_text
|
102 |
+
return history
|
103 |
+
|
104 |
+
# Gradio UI
|
105 |
+
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
106 |
+
favicon = '<img src="https://cdn-uploads.huggingface.co/production/uploads/64740cf7485a7c8e1bd51ac9/seIR5ErFdX5Snr4O7r7tY.png" width="68px" style="display: inline">'
|
107 |
+
gr.Markdown(f"""<h1><center>{favicon}A N I M A</center></h1>ANIMA is an expert in various scientific disciplines.""")
|
108 |
+
with gr.Row():
|
109 |
+
with gr.Column(scale=5):
|
110 |
+
|
111 |
+
chatbot = gr.Chatbot(label="Dialogue").style(height=400)
|
112 |
+
with gr.Row():
|
113 |
+
with gr.Column():
|
114 |
+
msg = gr.Textbox(label="Send Message", placeholder="Send Message", show_label=False).style(container=False)
|
115 |
+
with gr.Column():
|
116 |
+
with gr.Row():
|
117 |
+
submit = gr.Button("Send")
|
118 |
+
stop = gr.Button("Stop")
|
119 |
+
clear = gr.Button("Clear")
|
120 |
+
with gr.Row():
|
121 |
+
gr.Markdown("""WARNING: The model may generate factually or ethically incorrect texts. We are not responsible for this.""")
|
122 |
+
|
123 |
+
submit_event = msg.submit(fn=user, inputs=[msg, chatbot], outputs=[msg, chatbot], queue=False).success(fn=bot, inputs=[chatbot, system_prompt], outputs=chatbot, queue=True)
|
124 |
+
submit_click_event = submit.click(fn=user, inputs=[msg, chatbot], outputs=[msg, chatbot], queue=False).success(fn=bot, inputs=[chatbot, system_prompt], outputs=chatbot, queue=True)
|
125 |
+
stop.click(fn=None, inputs=None, outputs=None, cancels=[submit_event, submit_click_event], queue=False)
|
126 |
+
clear.click(lambda: None, None, chatbot, queue=False)
|
127 |
demo.launch(max_threads=10)
|