Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,17 +1,15 @@
|
|
1 |
import torch
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
-
import gradio as gr
|
4 |
|
|
|
5 |
model = AutoModelForCausalLM.from_pretrained(
|
6 |
-
|
7 |
-
|
8 |
-
device_map="
|
9 |
)
|
10 |
-
|
11 |
-
MODEL_ID = "rinna/bilingual-gpt-neox-4b-instruction-ppo"
|
12 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, use_fast=False)
|
13 |
|
14 |
-
|
15 |
|
16 |
def generate_response(user_question,
|
17 |
chat_history,
|
@@ -63,4 +61,17 @@ def generate_response(user_question,
|
|
63 |
output = tokenizer.decode(tokens[0], skip_special_tokens=True)
|
64 |
return output[len(prompt):]
|
65 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
|
|
|
|
|
|
1 |
import torch
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
3 |
|
4 |
+
MODEL_ID = "rinna/bilingual-gpt-neox-4b-instruction-ppo"
|
5 |
model = AutoModelForCausalLM.from_pretrained(
|
6 |
+
MODEL_ID,
|
7 |
+
load_in_8bit=True,
|
8 |
+
device_map="auto"
|
9 |
)
|
|
|
|
|
10 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, use_fast=False)
|
11 |
|
12 |
+
import gradio as gr
|
13 |
|
14 |
def generate_response(user_question,
|
15 |
chat_history,
|
|
|
61 |
output = tokenizer.decode(tokens[0], skip_special_tokens=True)
|
62 |
return output[len(prompt):]
|
63 |
|
64 |
+
with gr.Blocks() as demo:
|
65 |
+
chat_history = gr.Chatbot()
|
66 |
+
user_message = gr.Textbox(label="Question:", placeholder="人工知能とは何ですか?")
|
67 |
+
clear = gr.ClearButton([user_message, chat_history])
|
68 |
+
|
69 |
+
def response(user_message, chat_history):
|
70 |
+
system_message = generate_response(user_message, chat_history)
|
71 |
+
chat_history.append((user_message, system_message))
|
72 |
+
return "", chat_history
|
73 |
+
|
74 |
+
user_message.submit(response, inputs=[user_message, chat_history], outputs=[user_message, chat_history])
|
75 |
|
76 |
+
if __name__ == "__main__":
|
77 |
+
demo.launch()
|