jayanth7111 commited on
Commit
1c6ad86
·
verified ·
1 Parent(s): 9d2acd8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -2
app.py CHANGED
@@ -1,11 +1,13 @@
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
3
 
 
4
  tokenizer = AutoTokenizer.from_pretrained(".", local_files_only=True)
5
  model = AutoModelForSeq2SeqLM.from_pretrained(".", local_files_only=True)
6
  pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
7
 
8
- def predict(task, prompt, context="", auto_cot=False):
 
9
  full_prompt = f"[TASK: {task.upper()}] {prompt}"
10
  if context:
11
  full_prompt += f" Context: {context}"
@@ -14,10 +16,12 @@ def predict(task, prompt, context="", auto_cot=False):
14
  output = pipe(full_prompt, max_new_tokens=128)[0]["generated_text"]
15
  return output
16
 
 
17
  demo = gr.Interface(
18
  fn=predict,
19
  inputs=["text", "text", "text", "checkbox"],
20
  outputs="text"
21
  )
22
 
23
- demo.launch()
 
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
3
 
4
+ # Load model and tokenizer from local files in the root (already uploaded)
5
  tokenizer = AutoTokenizer.from_pretrained(".", local_files_only=True)
6
  model = AutoModelForSeq2SeqLM.from_pretrained(".", local_files_only=True)
7
  pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
8
 
9
+ # Inference function
10
+ def predict(task, prompt, context, auto_cot):
11
  full_prompt = f"[TASK: {task.upper()}] {prompt}"
12
  if context:
13
  full_prompt += f" Context: {context}"
 
16
  output = pipe(full_prompt, max_new_tokens=128)[0]["generated_text"]
17
  return output
18
 
19
+ # Create Interface
20
  demo = gr.Interface(
21
  fn=predict,
22
  inputs=["text", "text", "text", "checkbox"],
23
  outputs="text"
24
  )
25
 
26
+ # ⚠️ Do NOT set share=True or inbrowser=True
27
+ demo.launch()