jayanth7111 commited on
Commit
d9a9784
·
verified ·
1 Parent(s): 18ba46f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -16
app.py CHANGED
@@ -1,32 +1,22 @@
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
3
 
4
- # Load model
5
  tokenizer = AutoTokenizer.from_pretrained("./mtpe-model")
6
  model = AutoModelForSeq2SeqLM.from_pretrained("./mtpe-model")
7
  pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
8
 
9
- def predict(task, prompt, context, auto_cot):
10
  input_str = f"[TASK: {task.upper()}] {prompt}"
11
  if context:
12
  input_str += f" Context: {context}"
13
  if auto_cot:
14
  input_str += "\nLet's think step by step."
 
15
 
16
- output = pipe(input_str, max_new_tokens=128)[0]["generated_text"]
17
- return output
18
-
19
- iface = gr.Interface(
20
  fn=predict,
21
- inputs=[
22
- gr.Textbox(label="Task", value="qa"),
23
- gr.Textbox(label="Prompt"),
24
- gr.Textbox(label="Context (optional)", lines=2),
25
- gr.Checkbox(label="Enable Auto-CoT")
26
- ],
27
- outputs="text",
28
- title="Prompt Playground Inference API",
29
- description="Runs your trained mtpe-model from HF Spaces"
30
  )
31
 
32
- iface.launch()
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
3
 
 
4
  tokenizer = AutoTokenizer.from_pretrained("./mtpe-model")
5
  model = AutoModelForSeq2SeqLM.from_pretrained("./mtpe-model")
6
  pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
7
 
8
+ def predict(task, prompt, context="", auto_cot=False):
9
  input_str = f"[TASK: {task.upper()}] {prompt}"
10
  if context:
11
  input_str += f" Context: {context}"
12
  if auto_cot:
13
  input_str += "\nLet's think step by step."
14
+ return pipe(input_str, max_new_tokens=128)[0]["generated_text"]
15
 
16
+ app = gr.Interface(
 
 
 
17
  fn=predict,
18
+ inputs=["text", "text", "text", "checkbox"],
19
+ outputs="text"
 
 
 
 
 
 
 
20
  )
21
 
22
+ app.launch()