jayanth7111 commited on
Commit
8e19006
·
verified ·
1 Parent(s): 8318b40

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -6
app.py CHANGED
@@ -1,22 +1,26 @@
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
3
 
 
4
  tokenizer = AutoTokenizer.from_pretrained(".", local_files_only=True)
5
  model = AutoModelForSeq2SeqLM.from_pretrained(".", local_files_only=True)
6
  pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
7
 
8
  def predict(task, prompt, context="", auto_cot=False):
9
- input_str = f"[TASK: {task.upper()}] {prompt}"
10
  if context:
11
- input_str += f" Context: {context}"
12
  if auto_cot:
13
- input_str += "\nLet's think step by step."
14
- return pipe(input_str, max_new_tokens=128)[0]["generated_text"]
15
 
 
 
 
 
16
  app = gr.Interface(
17
  fn=predict,
18
  inputs=["text", "text", "text", "checkbox"],
19
- outputs="text"
20
  )
21
 
22
- app.launch()
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
3
 
4
+ # Load model from local files
5
  tokenizer = AutoTokenizer.from_pretrained(".", local_files_only=True)
6
  model = AutoModelForSeq2SeqLM.from_pretrained(".", local_files_only=True)
7
  pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
8
 
9
  def predict(task, prompt, context="", auto_cot=False):
10
+ full_prompt = f"[TASK: {task.upper()}] {prompt}"
11
  if context:
12
+ full_prompt += f" Context: {context}"
13
  if auto_cot:
14
+ full_prompt += "\nLet's think step by step."
 
15
 
16
+ output = pipe(full_prompt, max_new_tokens=128)[0]["generated_text"]
17
+ return output
18
+
19
+ # ✅ API-only launch
20
  app = gr.Interface(
21
  fn=predict,
22
  inputs=["text", "text", "text", "checkbox"],
23
+ outputs="text",
24
  )
25
 
26
+ app.launch(share=True, inline=False, show_api=True)