jayanth7111 commited on
Commit
9d2acd8
·
verified ·
1 Parent(s): ad63f3a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -6
app.py CHANGED
@@ -6,14 +6,13 @@ model = AutoModelForSeq2SeqLM.from_pretrained(".", local_files_only=True)
6
  pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
7
 
8
  def predict(task, prompt, context="", auto_cot=False):
9
- input_str = f"[TASK: {task.upper()}] {prompt}"
10
  if context:
11
- input_str += f" Context: {context}"
12
  if auto_cot:
13
- input_str += "\nLet's think step by step."
14
-
15
- result = pipe(input_str, max_new_tokens=128)[0]["generated_text"]
16
- return result
17
 
18
  demo = gr.Interface(
19
  fn=predict,
 
6
  pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
7
 
8
  def predict(task, prompt, context="", auto_cot=False):
9
+ full_prompt = f"[TASK: {task.upper()}] {prompt}"
10
  if context:
11
+ full_prompt += f" Context: {context}"
12
  if auto_cot:
13
+ full_prompt += "\nLet's think step by step."
14
+ output = pipe(full_prompt, max_new_tokens=128)[0]["generated_text"]
15
+ return output
 
16
 
17
  demo = gr.Interface(
18
  fn=predict,