File size: 1,002 Bytes
3965925
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline

# Load model
tokenizer = AutoTokenizer.from_pretrained("./mtpe-model")
model = AutoModelForSeq2SeqLM.from_pretrained("./mtpe-model")
pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer)

def predict(task, prompt, context, auto_cot):
    input_str = f"[TASK: {task.upper()}] {prompt}"
    if context:
        input_str += f" Context: {context}"
    if auto_cot:
        input_str += "\nLet's think step by step."

    output = pipe(input_str, max_new_tokens=128)[0]["generated_text"]
    return output

iface = gr.Interface(
    fn=predict,
    inputs=[
        gr.Textbox(label="Task", value="qa"),
        gr.Textbox(label="Prompt"),
        gr.Textbox(label="Context (optional)", lines=2),
        gr.Checkbox(label="Enable Auto-CoT")
    ],
    outputs="text",
    title="Prompt Playground Inference API",
    description="Runs your trained mtpe-model from HF Spaces"
)

iface.launch()