jayanth7111 commited on
Commit
ad63f3a
·
verified ·
1 Parent(s): 8e19006

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -10
app.py CHANGED
@@ -1,26 +1,24 @@
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
3
 
4
- # Load model from local files
5
  tokenizer = AutoTokenizer.from_pretrained(".", local_files_only=True)
6
  model = AutoModelForSeq2SeqLM.from_pretrained(".", local_files_only=True)
7
  pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
8
 
9
  def predict(task, prompt, context="", auto_cot=False):
10
- full_prompt = f"[TASK: {task.upper()}] {prompt}"
11
  if context:
12
- full_prompt += f" Context: {context}"
13
  if auto_cot:
14
- full_prompt += "\nLet's think step by step."
15
 
16
- output = pipe(full_prompt, max_new_tokens=128)[0]["generated_text"]
17
- return output
18
 
19
- # API-only launch
20
- app = gr.Interface(
21
  fn=predict,
22
  inputs=["text", "text", "text", "checkbox"],
23
- outputs="text",
24
  )
25
 
26
- app.launch(share=True, inline=False, show_api=True)
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
3
 
 
4
  tokenizer = AutoTokenizer.from_pretrained(".", local_files_only=True)
5
  model = AutoModelForSeq2SeqLM.from_pretrained(".", local_files_only=True)
6
  pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
7
 
8
  def predict(task, prompt, context="", auto_cot=False):
9
+ input_str = f"[TASK: {task.upper()}] {prompt}"
10
  if context:
11
+ input_str += f" Context: {context}"
12
  if auto_cot:
13
+ input_str += "\nLet's think step by step."
14
 
15
+ result = pipe(input_str, max_new_tokens=128)[0]["generated_text"]
16
+ return result
17
 
18
+ demo = gr.Interface(
 
19
  fn=predict,
20
  inputs=["text", "text", "text", "checkbox"],
21
+ outputs="text"
22
  )
23
 
24
+ demo.launch()