Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,73 +1,48 @@
|
|
1 |
import torch
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
|
4 |
-
|
5 |
model = AutoModelForCausalLM.from_pretrained(
|
6 |
-
|
7 |
-
load_in_8bit=True,
|
8 |
-
device_map="auto"
|
9 |
-
)
|
10 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, use_fast=False)
|
11 |
|
12 |
-
|
|
|
13 |
|
14 |
-
def
|
15 |
-
chat_history,
|
16 |
temperature=0.3,
|
17 |
-
top_p=0.85,
|
18 |
-
max_new_tokens=2048,
|
19 |
-
repetition_penalty=1.05
|
20 |
-
):
|
21 |
-
# 挙動の指定
|
22 |
-
user_prompt_template = "ユーザー:あなたは日本語で質問やコメントに対して、回答してくれるアシスタントです。関西弁で回答してください"
|
23 |
system_prompt_template = "システム: もちろんやで!どんどん質問してな!今日も気分ええわ!"
|
24 |
|
25 |
# one-shot
|
|
|
|
|
|
|
26 |
user_sample = "ユーザー:日本一の高さの山は? "
|
27 |
system_sample = "システム: 富士山や!最高の眺めを拝めるで!!"
|
28 |
|
29 |
-
user_sample = "大阪で有名な食べ物は? "
|
30 |
-
system_sample = "システム: たこ焼きやで!!外がカリカリ、中がふわふわや"
|
31 |
|
32 |
-
|
33 |
user_prerix = "ユーザー: "
|
34 |
system_prefix = "システム: "
|
35 |
|
36 |
-
prompt = user_prompt_template + "\n" + system_prompt_template + "\n"
|
37 |
-
|
38 |
-
if len(chat_history) < 1:
|
39 |
-
prompt += user_sample + "\n" + system_sample + "\n"
|
40 |
-
else:
|
41 |
-
u = chat_history[-1][0]
|
42 |
-
s = chat_history[-1][1]
|
43 |
-
prompt += user_prerix + u + "\n" + system_prefix + s + "\n"
|
44 |
-
|
45 |
-
prompt += user_prerix + user_question + "\n" + system_prefix
|
46 |
-
|
47 |
-
inputs = tokenizer(prompt, add_special_tokens=False, return_tensors="pt")
|
48 |
-
inputs = inputs.to(model.device)
|
49 |
-
with torch.no_grad():
|
50 |
-
tokens = model.generate(
|
51 |
-
**inputs,
|
52 |
-
temperature=temperature,
|
53 |
-
top_p=top_p,
|
54 |
-
max_new_tokens=max_new_tokens,
|
55 |
-
repetition_penalty=repetition_penalty,
|
56 |
-
do_sample=True,
|
57 |
-
pad_token_id=tokenizer.pad_token_id,
|
58 |
-
bos_token_id=tokenizer.bos_token_id,
|
59 |
-
eos_token_id=tokenizer.eos_token_id
|
60 |
-
)
|
61 |
output = tokenizer.decode(tokens[0], skip_special_tokens=True)
|
62 |
return output[len(prompt):]
|
63 |
|
|
|
|
|
|
|
|
|
64 |
with gr.Blocks() as demo:
|
65 |
chat_history = gr.Chatbot()
|
66 |
-
|
|
|
|
|
67 |
clear = gr.ClearButton([user_message, chat_history])
|
68 |
|
|
|
|
|
|
|
|
|
69 |
def response(user_message, chat_history):
|
70 |
-
system_message = generate_response(user_message, chat_history)
|
71 |
chat_history.append((user_message, system_message))
|
72 |
return "", chat_history
|
73 |
|
@@ -75,3 +50,5 @@ with gr.Blocks() as demo:
|
|
75 |
|
76 |
if __name__ == "__main__":
|
77 |
demo.launch()
|
|
|
|
|
|
1 |
import torch
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
|
4 |
+
|
5 |
model = AutoModelForCausalLM.from_pretrained(
|
6 |
+
"rinna/bilingual-gpt-neox-4b-instruction-ppo",
|
|
|
|
|
|
|
7 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, use_fast=False)
|
8 |
|
9 |
+
device = model.device
|
10 |
+
device
|
11 |
|
12 |
+
def generate(user_question,
|
|
|
13 |
temperature=0.3,
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
system_prompt_template = "システム: もちろんやで!どんどん質問してな!今日も気分ええわ!"
|
15 |
|
16 |
# one-shot
|
17 |
+
user_sample = "ユーザー:日本でよく飲まれているお茶の種類を教えて?"
|
18 |
+
system_sample = "システム: 緑茶やで!緑茶って殺菌作用もあって最高よな!"
|
19 |
+
|
20 |
user_sample = "ユーザー:日本一の高さの山は? "
|
21 |
system_sample = "システム: 富士山や!最高の眺めを拝めるで!!"
|
22 |
|
|
|
|
|
23 |
|
|
|
24 |
user_prerix = "ユーザー: "
|
25 |
system_prefix = "システム: "
|
26 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
output = tokenizer.decode(tokens[0], skip_special_tokens=True)
|
28 |
return output[len(prompt):]
|
29 |
|
30 |
+
|
31 |
+
output = generate('人工知能とは何ですか?')
|
32 |
+
output
|
33 |
+
|
34 |
with gr.Blocks() as demo:
|
35 |
chat_history = gr.Chatbot()
|
36 |
+
inputs = gr.Textbox(label="Question:", placeholder="質問を入力してください")
|
37 |
+
outputs = gr.Textbox(label="Answer:")
|
38 |
+
btn = gr.Button("Send")
|
39 |
clear = gr.ClearButton([user_message, chat_history])
|
40 |
|
41 |
+
# ボタンが押された時の動作を以下のように定義する:
|
42 |
+
# 「inputs内の値を入力としてモデルに渡し、その戻り値をoutputsの値として設定する」
|
43 |
+
btn.click(fn=generate, inputs=inputs, outputs=outputs)
|
44 |
+
|
45 |
def response(user_message, chat_history):
|
|
|
46 |
chat_history.append((user_message, system_message))
|
47 |
return "", chat_history
|
48 |
|
|
|
50 |
|
51 |
if __name__ == "__main__":
|
52 |
demo.launch()
|
53 |
+
|
54 |
+
|