research14 commited on
Commit
795f139
·
1 Parent(s): 143f8e6
Files changed (1) hide show
  1. app.py +18 -18
app.py CHANGED
@@ -94,7 +94,7 @@ with open('demonstration_3_42_parse.txt', 'r') as f:
94
  theme = gr.themes.Soft()
95
 
96
 
97
- gpt_pipeline = pipeline(task="text2text-generation", model="gpt2")
98
  #vicuna7b_pipeline = pipeline(task="text2text-generation", model="lmsys/vicuna-7b-v1.3")
99
  #vicuna13b_pipeline = pipeline(task="text2text-generation", model="lmsys/vicuna-13b-v1.3")
100
  #vicuna33b_pipeline = pipeline(task="text2text-generation", model="lmsys/vicuna-33b-v1.3")
@@ -131,24 +131,24 @@ def process_text(model_name, task, text):
131
  generated_text3 = result3[0]['sequence']
132
 
133
  return (generated_text1, generated_text2, generated_text3)
134
- elif task == 'Chunking':
135
- strategy1_format = template_all.format(text)
136
- strategy2_format = prompt2_chunk.format(text)
137
- strategy3_format = demon_chunk
138
-
139
- result1 = gpt_pipeline(strategy1_format)[0]['generated_text']
140
- result2 = gpt_pipeline(strategy2_format)[0]['generated_text']
141
- result3 = gpt_pipeline(strategy3_format)[0]['generated_text']
142
- return (result1, result2, result3)
143
- elif task == 'Parsing':
144
- strategy1_format = template_all.format(text)
145
- strategy2_format = prompt2_parse.format(text)
146
- strategy3_format = demon_parse
147
 
148
- result1 = gpt_pipeline(strategy1_format)[0]['generated_text']
149
- result2 = gpt_pipeline(strategy2_format)[0]['generated_text']
150
- result3 = gpt_pipeline(strategy3_format)[0]['generated_text']
151
- return (result1, result2, result3)
152
 
153
  # Gradio interface
154
  iface = gr.Interface(
 
94
  theme = gr.themes.Soft()
95
 
96
 
97
+ gpt_pipeline = pipeline("fill-mask", model="gpt2")
98
  #vicuna7b_pipeline = pipeline(task="text2text-generation", model="lmsys/vicuna-7b-v1.3")
99
  #vicuna13b_pipeline = pipeline(task="text2text-generation", model="lmsys/vicuna-13b-v1.3")
100
  #vicuna33b_pipeline = pipeline(task="text2text-generation", model="lmsys/vicuna-33b-v1.3")
 
131
  generated_text3 = result3[0]['sequence']
132
 
133
  return (generated_text1, generated_text2, generated_text3)
134
+ # elif task == 'Chunking':
135
+ # strategy1_format = template_all.format(text)
136
+ # strategy2_format = prompt2_chunk.format(text)
137
+ # strategy3_format = demon_chunk
138
+
139
+ # result1 = gpt_pipeline(strategy1_format)[0]['generated_text']
140
+ # result2 = gpt_pipeline(strategy2_format)[0]['generated_text']
141
+ # result3 = gpt_pipeline(strategy3_format)[0]['generated_text']
142
+ # return (result1, result2, result3)
143
+ # elif task == 'Parsing':
144
+ # strategy1_format = template_all.format(text)
145
+ # strategy2_format = prompt2_parse.format(text)
146
+ # strategy3_format = demon_parse
147
 
148
+ # result1 = gpt_pipeline(strategy1_format)[0]['generated_text']
149
+ # result2 = gpt_pipeline(strategy2_format)[0]['generated_text']
150
+ # result3 = gpt_pipeline(strategy3_format)[0]['generated_text']
151
+ # return (result1, result2, result3)
152
 
153
  # Gradio interface
154
  iface = gr.Interface(