abobonbobo13 commited on
Commit
e8fc194
·
verified ·
1 Parent(s): eb84408

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -105
app.py CHANGED
@@ -8,111 +8,6 @@ model = AutoModelForCausalLM.from_pretrained(
8
  device_map="auto"
9
  )
10
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, use_fast=False)
11
-
12
- device = model.device
13
- device
14
-
15
- user_prompt_template = "ユーザー: Hello, you are an assistant that helps me learn Japanese. I am going to ask you a question, so please answer *briefly*."
16
- system_prompt_template = "システム: Sure, I will answer briefly. What can I do for you?"
17
-
18
- # one-shot
19
- user_sample = "ユーザー: 日本で一番高い山は何ですか?"
20
- system_sample = "システム: 富士山です。高さは3776メートルです。"
21
-
22
- # 質問
23
- user_prerix = "ユーザー: "
24
- user_question = "人工知能とは何ですか?"
25
- system_prefix = "システム: "
26
-
27
- # プロンプトの整形
28
- prompt = user_prompt_template + "\n" + system_prompt_template + "\n"
29
- prompt += user_sample + "\n" + system_sample + "\n"
30
- prompt += user_prerix + user_question + "\n" + system_prefix
31
-
32
- inputs = tokenizer(
33
- prompt,
34
- add_special_tokens=False, # プロンプトに余計なトークンが付属するのを防ぐ
35
- return_tensors="pt"
36
- )
37
- inputs = inputs.to(model.device)
38
- with torch.no_grad():
39
- tokens = model.generate(
40
- **inputs,
41
- temperature=0.3,
42
- top_p=0.85,
43
- max_new_tokens=2048,
44
- repetition_penalty=1.05,
45
- do_sample=True,
46
- pad_token_id=tokenizer.pad_token_id,
47
- bos_token_id=tokenizer.bos_token_id,
48
- eos_token_id=tokenizer.eos_token_id
49
- )
50
-
51
- tokens
52
-
53
- output = tokenizer.decode(
54
- tokens[0],
55
- skip_special_tokens=True # 出力に余計なトークンが付属するのを防ぐ
56
- )
57
- print(output)
58
-
59
- output[len(prompt):]
60
-
61
- def generate(user_question,
62
- temperature=0.3,
63
- top_p=0.85,
64
- max_new_tokens=2048,
65
- repetition_penalty=1.05
66
- ):
67
-
68
- user_prompt_template = "ユーザー: Hello, you are an assistant that helps me learn Japanese. I am going to ask you a question, so please answer *briefly*."
69
- system_prompt_template = "システム: Sure, I will answer briefly. What can I do for you?"
70
-
71
- user_sample = "ユーザー: 日本で一番高い山は何ですか?"
72
- system_sample = "システム: 富士山です。高さは3776メートルです。"
73
-
74
- user_prerix = "ユーザー: "
75
- system_prefix = "システム: "
76
-
77
- prompt = user_prompt_template + "\n" + system_prompt_template + "\n"
78
- prompt += user_sample + "\n" + system_sample + "\n"
79
- prompt += user_prerix + user_question + "\n" + system_prefix
80
-
81
- inputs = tokenizer(prompt, add_special_tokens=False, return_tensors="pt")
82
- inputs = inputs.to(model.device)
83
- with torch.no_grad():
84
- tokens = model.generate(
85
- **inputs,
86
- temperature=temperature,
87
- top_p=top_p,
88
- max_new_tokens=max_new_tokens,
89
- repetition_penalty=repetition_penalty,
90
- do_sample=True,
91
- pad_token_id=tokenizer.pad_token_id,
92
- bos_token_id=tokenizer.bos_token_id,
93
- eos_token_id=tokenizer.eos_token_id
94
- )
95
- output = tokenizer.decode(tokens[0], skip_special_tokens=True)
96
- return output[len(prompt):]
97
-
98
- output = generate('人工知能とは何ですか?')
99
- output
100
-
101
-
102
- import gradio as gr # 慣習としてgrと略記
103
-
104
- with gr.Blocks() as demo:
105
- inputs = gr.Textbox(label="Question:", placeholder="人工知能とは何ですか?")
106
- outputs = gr.Textbox(label="Answer:")
107
- btn = gr.Button("Send")
108
-
109
- # ボタンが押された時の動作を以下のように定義する:
110
- # 「inputs内の値を入力としてモデルに渡し、その戻り値をoutputsの値として設定する」
111
- btn.click(fn=generate, inputs=inputs, outputs=outputs)
112
-
113
- if __name__ == "__main__":
114
- demo.launch()
115
-
116
  def generate_response(user_question,
117
  chat_history,
118
  temperature=0.3,
@@ -158,7 +53,9 @@ def generate_response(user_question,
158
  output = tokenizer.decode(tokens[0], skip_special_tokens=True)
159
  return output[len(prompt):]
160
 
 
161
 
 
162
  with gr.Blocks() as demo:
163
  chat_history = gr.Chatbot()
164
  user_message = gr.Textbox(label="Question:", placeholder="人工知能とは何ですか?")
@@ -174,3 +71,4 @@ with gr.Blocks() as demo:
174
  if __name__ == "__main__":
175
  demo.launch()
176
 
 
 
8
  device_map="auto"
9
  )
10
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, use_fast=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  def generate_response(user_question,
12
  chat_history,
13
  temperature=0.3,
 
53
  output = tokenizer.decode(tokens[0], skip_special_tokens=True)
54
  return output[len(prompt):]
55
 
56
+ import gradio as gr # 慣習としてgrと略記
57
 
58
+
59
  with gr.Blocks() as demo:
60
  chat_history = gr.Chatbot()
61
  user_message = gr.Textbox(label="Question:", placeholder="人工知能とは何ですか?")
 
71
  if __name__ == "__main__":
72
  demo.launch()
73
 
74
+