Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,21 +1,21 @@
|
|
| 1 |
import torch
|
| 2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
|
| 3 |
|
| 4 |
-
MODEL_ID = "rinna/bilingual-gpt-neox-4b-instruction-ppo"
|
| 5 |
model = AutoModelForCausalLM.from_pretrained(
|
| 6 |
"rinna/bilingual-gpt-neox-4b-instruction-ppo",
|
| 7 |
-
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
|
| 8 |
|
| 9 |
device = model.device
|
| 10 |
-
|
| 11 |
|
| 12 |
def generate(user_question,
|
| 13 |
temperature=0.3,
|
| 14 |
system_prompt_template = "システム: もちろんやで!どんどん質問してな!今日も気分ええわ!"
|
| 15 |
|
| 16 |
# one-shot
|
| 17 |
-
|
| 18 |
-
|
| 19 |
|
| 20 |
user_sample = "ユーザー:日本一の高さの山は? "
|
| 21 |
system_sample = "システム: 富士山や!最高の眺めを拝めるで!!"
|
|
@@ -28,27 +28,25 @@ def generate(user_question,
|
|
| 28 |
return output[len(prompt):]
|
| 29 |
|
| 30 |
|
| 31 |
-
|
| 32 |
-
|
| 33 |
|
| 34 |
with gr.Blocks() as demo:
|
| 35 |
chat_history = gr.Chatbot()
|
| 36 |
inputs = gr.Textbox(label="Question:", placeholder="質問を入力してください")
|
| 37 |
outputs = gr.Textbox(label="Answer:")
|
| 38 |
btn = gr.Button("Send")
|
| 39 |
-
clear = gr.ClearButton([
|
|
|
|
|
|
|
| 40 |
|
| 41 |
-
# ボタンが押された時の動作を以下のように定義する:
|
| 42 |
-
# 「inputs内の値を入力としてモデルに渡し、その戻り値をoutputsの値として設定する」
|
| 43 |
btn.click(fn=generate, inputs=inputs, outputs=outputs)
|
| 44 |
|
| 45 |
def response(user_message, chat_history):
|
| 46 |
chat_history.append((user_message, system_message))
|
| 47 |
return "", chat_history
|
| 48 |
|
| 49 |
-
|
| 50 |
|
| 51 |
if __name__ == "__main__":
|
| 52 |
-
demo.launch()
|
| 53 |
-
|
| 54 |
-
|
|
|
|
| 1 |
import torch
|
| 2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 3 |
+
import gradio as gr
|
| 4 |
|
|
|
|
| 5 |
model = AutoModelForCausalLM.from_pretrained(
|
| 6 |
"rinna/bilingual-gpt-neox-4b-instruction-ppo",
|
| 7 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, use_fast=False)
|
| 8 |
|
| 9 |
device = model.device
|
| 10 |
+
|
| 11 |
|
| 12 |
def generate(user_question,
|
| 13 |
temperature=0.3,
|
| 14 |
system_prompt_template = "システム: もちろんやで!どんどん質問してな!今日も気分ええわ!"
|
| 15 |
|
| 16 |
# one-shot
|
| 17 |
+
|
| 18 |
+
|
| 19 |
|
| 20 |
user_sample = "ユーザー:日本一の高さの山は? "
|
| 21 |
system_sample = "システム: 富士山や!最高の眺めを拝めるで!!"
|
|
|
|
| 28 |
return output[len(prompt):]
|
| 29 |
|
| 30 |
|
| 31 |
+
|
| 32 |
+
|
| 33 |
|
| 34 |
with gr.Blocks() as demo:
|
| 35 |
chat_history = gr.Chatbot()
|
| 36 |
inputs = gr.Textbox(label="Question:", placeholder="質問を入力してください")
|
| 37 |
outputs = gr.Textbox(label="Answer:")
|
| 38 |
btn = gr.Button("Send")
|
| 39 |
+
clear = gr.ClearButton([inputs, chat_history])
|
| 40 |
+
|
| 41 |
+
# ボタンが押された時の動作を以下のように定義する:
|
| 42 |
|
|
|
|
|
|
|
| 43 |
btn.click(fn=generate, inputs=inputs, outputs=outputs)
|
| 44 |
|
| 45 |
def response(user_message, chat_history):
|
| 46 |
chat_history.append((user_message, system_message))
|
| 47 |
return "", chat_history
|
| 48 |
|
| 49 |
+
inputs.submit(response, inputs=[inputs, chat_history], outputs=[inputs, chat_history])
|
| 50 |
|
| 51 |
if __name__ == "__main__":
|
| 52 |
+
demo.launch()
|
|
|
|
|
|