jayanth7111's picture
Create app.py
3965925 verified
raw
history blame
1 kB
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
# Load model
tokenizer = AutoTokenizer.from_pretrained("./mtpe-model")
model = AutoModelForSeq2SeqLM.from_pretrained("./mtpe-model")
pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
def predict(task, prompt, context, auto_cot):
input_str = f"[TASK: {task.upper()}] {prompt}"
if context:
input_str += f" Context: {context}"
if auto_cot:
input_str += "\nLet's think step by step."
output = pipe(input_str, max_new_tokens=128)[0]["generated_text"]
return output
iface = gr.Interface(
fn=predict,
inputs=[
gr.Textbox(label="Task", value="qa"),
gr.Textbox(label="Prompt"),
gr.Textbox(label="Context (optional)", lines=2),
gr.Checkbox(label="Enable Auto-CoT")
],
outputs="text",
title="Prompt Playground Inference API",
description="Runs your trained mtpe-model from HF Spaces"
)
iface.launch()