jayanth7111 commited on
Commit
3965925
·
verified ·
1 Parent(s): eb8bcde

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -0
app.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
3
+
4
+ # Load model
5
+ tokenizer = AutoTokenizer.from_pretrained("./mtpe-model")
6
+ model = AutoModelForSeq2SeqLM.from_pretrained("./mtpe-model")
7
+ pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
8
+
9
+ def predict(task, prompt, context, auto_cot):
10
+ input_str = f"[TASK: {task.upper()}] {prompt}"
11
+ if context:
12
+ input_str += f" Context: {context}"
13
+ if auto_cot:
14
+ input_str += "\nLet's think step by step."
15
+
16
+ output = pipe(input_str, max_new_tokens=128)[0]["generated_text"]
17
+ return output
18
+
19
+ iface = gr.Interface(
20
+ fn=predict,
21
+ inputs=[
22
+ gr.Textbox(label="Task", value="qa"),
23
+ gr.Textbox(label="Prompt"),
24
+ gr.Textbox(label="Context (optional)", lines=2),
25
+ gr.Checkbox(label="Enable Auto-CoT")
26
+ ],
27
+ outputs="text",
28
+ title="Prompt Playground Inference API",
29
+ description="Runs your trained mtpe-model from HF Spaces"
30
+ )
31
+
32
+ iface.launch()