Chris STC
commited on
Commit
·
84a2750
1
Parent(s):
0fdda0b
Update app.py
Browse files
app.py
CHANGED
@@ -16,39 +16,27 @@ title = """<h1 align="center">Chat with awesome WizardLM 7b model!</h1><br>"""
|
|
16 |
with gr.Blocks(theme=theme) as demo:
|
17 |
gr.HTML(title)
|
18 |
gr.HTML("This model is awesome for its size! It is only 20th the size of Chatgpt but is around 90% as good as Chatgpt. However, please don't rely on WizardLM to provide 100% true information as it might be wrong sometimes. ")
|
19 |
-
|
20 |
chatbot = gr.Chatbot()
|
21 |
-
msg = gr.Textbox(
|
22 |
clear = gr.ClearButton([msg, chatbot])
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
get_temperature = gr.Slider(minimum=0.1, maximum=1.0, value=0.72, step=0.01, interactive=True, label="Temperature")
|
27 |
-
get_top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.73, step=0.01, interactive=True, label="Top-p")
|
28 |
-
get_top_k = gr.Slider(minimum=1, maximum=100, value=50, step=1, interactive=True, label="Top-k")
|
29 |
-
get_repeat_penalty = gr.Slider(minimum=0.1, maximum=2.0, value=1.1, step=0.1, interactive=True, label="Repeat Penalty")
|
30 |
-
|
31 |
def user(user_message, history):
|
32 |
return gr.update(value="", interactive=True), history + [[user_message, None]]
|
33 |
|
34 |
def bot(history):
|
35 |
-
instruction = history[-1][1] or ""
|
36 |
user_message = history[-1][0]
|
37 |
-
|
38 |
-
#
|
39 |
-
#
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
token_response_header = b"\n\n### Response:"
|
45 |
-
|
46 |
-
tokens = llm2.tokenize(token_user_text + token_response_header)
|
47 |
-
|
48 |
history[-1][1] = ""
|
49 |
count = 0
|
50 |
output = ""
|
51 |
-
for token in llm2.generate(tokens, top_k=
|
52 |
text = llm2.detokenize([token])
|
53 |
output += text.decode()
|
54 |
count += 1
|
|
|
16 |
with gr.Blocks(theme=theme) as demo:
|
17 |
gr.HTML(title)
|
18 |
gr.HTML("This model is awesome for its size! It is only 20th the size of Chatgpt but is around 90% as good as Chatgpt. However, please don't rely on WizardLM to provide 100% true information as it might be wrong sometimes. ")
|
|
|
19 |
chatbot = gr.Chatbot()
|
20 |
+
msg = gr.Textbox()
|
21 |
clear = gr.ClearButton([msg, chatbot])
|
22 |
+
#instruction = gr.Textbox(label="Instruction", placeholder=)
|
23 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
def user(user_message, history):
|
25 |
return gr.update(value="", interactive=True), history + [[user_message, None]]
|
26 |
|
27 |
def bot(history):
|
28 |
+
#instruction = history[-1][1] or ""
|
29 |
user_message = history[-1][0]
|
30 |
+
#token1 = llm.tokenize(b"### Instruction: ")
|
31 |
+
#token2 = llm.tokenize(instruction.encode())
|
32 |
+
#token3 = llm2.tokenize(b"USER: ")
|
33 |
+
tokens3 = llm2.tokenize(user_message.encode())
|
34 |
+
token4 = llm2.tokenize(b"\n\n### Response:")
|
35 |
+
tokens = tokens3 + token4
|
|
|
|
|
|
|
|
|
|
|
36 |
history[-1][1] = ""
|
37 |
count = 0
|
38 |
output = ""
|
39 |
+
for token in llm2.generate(tokens, top_k=50, top_p=0.73, temp=0.72, repeat_penalty=1.1):
|
40 |
text = llm2.detokenize([token])
|
41 |
output += text.decode()
|
42 |
count += 1
|