File size: 1,503 Bytes
92c62ca
 
059c42c
92c62ca
d0ea771
e87cafc
059c42c
92c62ca
e87cafc
059c42c
92c62ca
e87cafc
92c62ca
 
 
 
059c42c
 
e87cafc
92c62ca
 
 
faafa45
92c62ca
 
 
 
 
 
e87cafc
059c42c
 
e87cafc
86938fd
 
e87cafc
 
 
059c42c
 
 
86938fd
e87cafc
 
86938fd
 
 
 
059c42c
92c62ca
86938fd
059c42c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
import gradio as gr

model = AutoModelForCausalLM.from_pretrained(
    "rinna/bilingual-gpt-neox-4b-instruction-ppo",
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, use_fast=False)

device = model.device


def generate(user_question,
             temperature=0.3,
    system_prompt_template = "システム: もちろんやで!どんどん質問してな!今日も気分ええわ!"

    # one-shot



    user_sample = "ユーザー:日本一の高さの山は? "
    system_sample = "システム: 富士山や!最高の眺めを拝めるで!!"


    user_prerix = "ユーザー: "
    system_prefix = "システム: "

    output = tokenizer.decode(tokens[0], skip_special_tokens=True)
    return output[len(prompt):]





with gr.Blocks() as demo:
    chat_history = gr.Chatbot()
    inputs = gr.Textbox(label="Question:", placeholder="質問を入力してください")
    outputs = gr.Textbox(label="Answer:")
    btn = gr.Button("Send")
    clear = gr.ClearButton([inputs, chat_history])

    # ボタンが押された時の動作を以下のように定義する:

    btn.click(fn=generate, inputs=inputs, outputs=outputs)

    def response(user_message, chat_history):
        chat_history.append((user_message, system_message))
        return "", chat_history

    inputs.submit(response, inputs=[inputs, chat_history], outputs=[inputs, chat_history])

if __name__ == "__main__":
    demo.launch()