research14 commited on
Commit
97f5365
·
1 Parent(s): 488b0ed
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -94,7 +94,7 @@ with open('demonstration_3_42_parse.txt', 'r') as f:
94
  theme = gr.themes.Soft()
95
 
96
 
97
- gpt_pipeline = pipeline("fill-mask", model="gpt2")
98
  #vicuna7b_pipeline = pipeline(task="text2text-generation", model="lmsys/vicuna-7b-v1.3")
99
  #vicuna13b_pipeline = pipeline(task="text2text-generation", model="lmsys/vicuna-13b-v1.3")
100
  #vicuna33b_pipeline = pipeline(task="text2text-generation", model="lmsys/vicuna-33b-v1.3")
@@ -126,9 +126,9 @@ def process_text(model_name, task, text):
126
  result2 = gpt_pipeline(strategy2_format)
127
  result3 = gpt_pipeline(strategy3_format)
128
 
129
- generated_text1 = result1[0]['sequence']
130
- generated_text2 = result2[0]['sequence']
131
- generated_text3 = result3[0]['sequence']
132
 
133
  return (generated_text1, generated_text2, generated_text3)
134
  # elif task == 'Chunking':
 
94
  theme = gr.themes.Soft()
95
 
96
 
97
+ gpt_pipeline = pipeline(task="text2text-generation", model="gpt2")
98
  #vicuna7b_pipeline = pipeline(task="text2text-generation", model="lmsys/vicuna-7b-v1.3")
99
  #vicuna13b_pipeline = pipeline(task="text2text-generation", model="lmsys/vicuna-13b-v1.3")
100
  #vicuna33b_pipeline = pipeline(task="text2text-generation", model="lmsys/vicuna-33b-v1.3")
 
126
  result2 = gpt_pipeline(strategy2_format)
127
  result3 = gpt_pipeline(strategy3_format)
128
 
129
+ generated_text1 = result1[0]['generated_text']
130
+ generated_text2 = result2[0]['generated_text']
131
+ generated_text3 = result3[0]['generated_text']
132
 
133
  return (generated_text1, generated_text2, generated_text3)
134
  # elif task == 'Chunking':