File size: 3,948 Bytes
9c078e7
 
7ebf5ca
5b3b6f3
9c078e7
 
 
 
7ebf5ca
 
 
9c078e7
7ebf5ca
 
9c078e7
 
59340b6
 
 
 
 
9c078e7
59340b6
 
 
 
 
 
7c5f508
 
 
 
 
 
 
 
 
 
 
 
 
 
7ebf5ca
 
 
 
59340b6
 
9c078e7
59340b6
 
 
 
 
9c078e7
59340b6
9c078e7
59340b6
9c078e7
59340b6
 
 
 
 
 
 
 
 
 
 
9c078e7
 
 
 
 
59340b6
 
 
 
 
 
 
 
 
 
 
 
2a6aa53
9c078e7
0d6980b
 
9c078e7
0d6980b
59340b6
 
 
 
 
 
 
 
 
9c078e7
59340b6
9c078e7
 
 
 
59340b6
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
import gradio as gr
from huggingface_hub import InferenceClient
from transformers import AutoModelForCausalLM, AutoTokenizer
# import spaces

"""
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
# client = InferenceClient("BatsResearch/bonito-v1")
model = AutoModelForCausalLM.from_pretrained("BatsResearch/bonito-v1")
tokenizer = AutoTokenizer.from_pretrained("BatsResearch/bonito-v1")

# move to cuda
model.to("cuda")

def respond(
    context: str,
    task_type: str,
    max_tokens: int = 256,
    temperature: float = 0.5,
    top_p: float = 0.95,
):
    task_type = "extractive question answering"
    input_text = "<|tasktype|>\n" + task_type.strip()
    input_text += (
                "\n<|context|>\n" + context.strip() + "\n<|task|>\n"
    )

    input_ids = tokenizer.encode(input_text, return_tensors="pt").to("cuda")
    outputs = model.generate(
        input_ids,
        max_new_tokens=max_tokens,
        temperature=temperature,
        do_sample=True,
        top_p=top_p,
    )
    pred_start = int(input_ids.shape[-1])
    pred = tokenizer.decode(outputs[pred_start:], skip_special_tokens=True)

    # replace the context

    return pred


    # for token in client.text_generation(input_text, max_tokens=max_tokens, temperature=temperature, top_p=top_p, stream=True):
    #     yield token

    # messages = [{"role": "system", "content": system_message}]

    # for val in history:
    #     if val[0]:
    #         messages.append({"role": "user", "content": val[0]})
    #     if val[1]:
    #         messages.append({"role": "assistant", "content": val[1]})

    # messages.append({"role": "user", "content": message})

    # response = ""

    # for message in client.chat_completion(
    #     messages,
    #     max_tokens=max_tokens,
    #     stream=True,
    #     temperature=temperature,
    #     top_p=top_p,
    # ):
    #     token = message.choices[0].delta.content

    #     response += token
    #     yield response


"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""

description = """
This is Bonito.
"""


examples = [
    "Hugging Face, Inc. is a French-American company incorporated under the Delaware General Corporation Law[1] and based in New York City that develops computation tools for building applications using machine learning. It is most notable for its transformers library built for natural language processing applications and its platform that allows users to share machine learning models and datasets and showcase their work.",
    "In order to make your Space work with ZeroGPU you need to decorate the Python functions that actually require a GPU with @spaces.GPU \n During the time when a decorated function is invoked, the Space will be attributed a GPU, and it will release it upon completion of the function.",
    "A spectre is haunting Europe – the spectre of communism. All the powers of old Europe have entered into a holy alliance to exorcise this spectre: Pope and Tsar, Metternich and Guizot, French Radicals and German police-spies"
]

demo = gr.Interface(
    respond,
    inputs=gr.Textbox(lines=5, label="Enter context here"),
    outputs=gr.Textbox(lines=20, label="Generated Instruction-Response Pairs"),
    additional_inputs=[
        # gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
        # gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
        # gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
        # gr.Slider(
        #     minimum=0.1,
        #     maximum=1.0,
        #     value=0.95,
        #     step=0.05,
        #     label="Top-p (nucleus sampling)",
        # ),
    ],
    description=description,
)


if __name__ == "__main__":
    demo.launch()