BigSalmon commited on
Commit
a9cf166
·
1 Parent(s): 1ad8bb9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +139 -98
app.py CHANGED
@@ -78,47 +78,110 @@ def get_model():
78
  #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/DefinitionsSynonyms1")
79
  #model = AutoModelForCausalLM.from_pretrained("BigSalmon/DefinitionsSynonyms1")
80
 
81
- tokenizer = AutoTokenizer.from_pretrained("BigSalmon/DefinitionsSynonyms2")
82
- model = AutoModelForCausalLM.from_pretrained("BigSalmon/DefinitionsSynonyms2")
83
- tokenizer2 = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincolnMedium")
84
- model2 = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincolnMedium")
85
- return model, model2, tokenizer, tokenizer2
 
 
 
86
 
87
- model, model2, tokenizer, tokenizer2 = get_model()
88
 
89
- st.text('''For Prompt Templates: https://huggingface.co/BigSalmon/InformalToFormalLincoln82Paraphrase''')
 
 
 
 
 
 
 
 
 
 
90
 
91
- temp = st.sidebar.slider("Temperature", 0.7, 1.5)
92
- number_of_outputs = st.sidebar.slider("Number of Outputs", 5, 50)
93
- lengths = st.sidebar.slider("Length", 3, 500)
94
- bad_words = st.text_input("Words You Do Not Want Generated", " core lemon height time ")
95
- logs_outputs = st.sidebar.slider("Logit Outputs", 50, 300)
96
 
97
- def run_generate(text, bad_words):
98
- yo = []
99
- input_ids = tokenizer.encode(text, return_tensors='pt')
100
- res = len(tokenizer.encode(text))
101
- bad_words = bad_words.split()
102
- bad_word_ids = []
103
- for bad_word in bad_words:
104
- bad_word = " " + bad_word
105
- ids = tokenizer(bad_word).input_ids
106
- bad_word_ids.append(ids)
107
- sample_outputs = model.generate(
108
- input_ids,
109
- do_sample=True,
110
- max_length= res + lengths,
111
- min_length = res + lengths,
112
- top_k=50,
113
- temperature=temp,
114
- num_return_sequences=number_of_outputs,
115
- bad_words_ids=bad_word_ids
116
- )
117
- for i in range(number_of_outputs):
118
- e = tokenizer.decode(sample_outputs[i])
119
- e = e.replace(text, "")
120
- yo.append(e)
121
- return yo
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
 
123
  def BestProbs5(prompt):
124
  prompt = prompt.strip()
@@ -137,88 +200,66 @@ def BestProbs5(prompt):
137
  st.write(g)
138
  l = run_generate(g, "hey")
139
  st.write(l)
140
-
141
- def run_generate2(text, bad_words):
142
  yo = []
143
- input_ids = tokenizer2.encode(text, return_tensors='pt')
144
- res = len(tokenizer2.encode(text))
145
  bad_words = bad_words.split()
146
- bad_word_ids = []
147
  for bad_word in bad_words:
148
  bad_word = " " + bad_word
149
- ids = tokenizer2(bad_word).input_ids
150
  bad_word_ids.append(ids)
151
- sample_outputs = model2.generate(
152
  input_ids,
153
  do_sample=True,
154
- max_length= res + lengths,
155
- min_length = res + lengths,
156
  top_k=50,
157
- temperature=temp,
158
- num_return_sequences=number_of_outputs,
159
  bad_words_ids=bad_word_ids
160
  )
161
- for i in range(number_of_outputs):
162
- e = tokenizer2.decode(sample_outputs[i])
163
  e = e.replace(text, "")
164
  yo.append(e)
 
165
  return yo
166
-
167
- def prefix_format(sentence):
168
- words = sentence.split()
169
- if "[MASK]" in sentence:
170
- words2 = words.index("[MASK]")
171
- #print(words2)
172
- output = ("<Prefix> " + ' '.join(words[:words2]) + " <Prefix> " + "<Suffix> " + ' '.join(words[words2+1:]) + " <Suffix>" + " <Middle>")
173
- st.write(output)
174
- else:
175
- st.write("Add [MASK] to sentence")
176
-
177
  with st.form(key='my_form'):
178
- text = st.text_area(label='Enter sentence', value=first)
179
  submit_button = st.form_submit_button(label='Submit')
180
- submit_button2 = st.form_submit_button(label='Submit Log Probs')
181
-
182
- submit_button3 = st.form_submit_button(label='Submit Other Model')
183
- submit_button4 = st.form_submit_button(label='Submit Log Probs Other Model')
184
-
185
- submit_button5 = st.form_submit_button(label='Most Prob')
186
-
187
- submit_button6 = st.form_submit_button(label='Turn Sentence with [MASK] into <Prefix> Format')
188
-
189
  if submit_button:
190
- translated_text = run_generate(text, bad_words)
191
- st.write(translated_text if translated_text else "No translation found")
192
- if submit_button2:
193
  with torch.no_grad():
194
- text2 = str(text)
195
- print(text2)
196
- text3 = tokenizer.encode(text2)
197
- myinput, past_key_values = torch.tensor([text3]), None
198
  myinput = myinput
 
199
  logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
200
  logits = logits[0,-1]
201
  probabilities = torch.nn.functional.softmax(logits)
202
- best_logits, best_indices = logits.topk(logs_outputs)
203
- best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
 
 
 
204
  st.write(best_words)
 
 
 
 
 
 
 
205
  if submit_button3:
206
- translated_text = run_generate2(text, bad_words)
207
- st.write(translated_text if translated_text else "No translation found")
 
208
  if submit_button4:
209
- text2 = str(text)
210
- print(text2)
211
- text3 = tokenizer2.encode(text2)
212
- myinput, past_key_values = torch.tensor([text3]), None
213
- myinput = myinput
214
- logits, past_key_values = model2(myinput, past_key_values = past_key_values, return_dict=False)
215
- logits = logits[0,-1]
216
- probabilities = torch.nn.functional.softmax(logits)
217
- best_logits, best_indices = logits.topk(logs_outputs)
218
- best_words = [tokenizer2.decode([idx.item()]) for idx in best_indices]
219
- st.write(best_words)
220
- if submit_button5:
221
- BestProbs5(text)
222
- if submit_button6:
223
- text2 = str(text)
224
- prefix_format(text2)
 
78
  #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/DefinitionsSynonyms1")
79
  #model = AutoModelForCausalLM.from_pretrained("BigSalmon/DefinitionsSynonyms1")
80
 
81
+ tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln95Paraphrase")
82
+ model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln95Paraphrase")
83
+
84
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/DefinitionsSynonyms2")
85
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/DefinitionsSynonyms2")
86
+ #tokenizer2 = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincolnMedium")
87
+ #model2 = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincolnMedium")
88
+ return model, tokenizer
89
 
90
+ model, tokenizer = get_model()
91
 
92
+ g = """informal english: garage band has made people who know nothing about music good at creating music.
93
+ Translated into the Style of Abraham Lincoln: garage band ( offers the uninitiated in music the ability to produce professional-quality compositions / catapults those for whom music is an uncharted art the ability the realize masterpieces / stimulates music novice's competency to yield sublime arrangements / begets individuals of rudimentary musical talent the proficiency to fashion elaborate suites ).
94
+ informal english: chrome extensions can make doing regular tasks much easier to get done.
95
+ Translated into the Style of Abraham Lincoln: chrome extensions ( yield the boon of time-saving convenience / ( expedite the ability to / unlock the means to more readily ) accomplish everyday tasks / turbocharges the velocity with which one can conduct their obligations ).
96
+ informal english: broadband is finally expanding to rural areas, a great development that will thrust them into modern life.
97
+ Translated into the Style of Abraham Lincoln: broadband is ( ( finally / at last / after years of delay ) arriving in remote locations / springing to life in far-flung outposts / inching into even the most backwater corners of the nation ) that will leap-frog them into the twenty-first century.
98
+ informal english: google translate has made talking to people who do not share your language easier.
99
+ Translated into the Style of Abraham Lincoln: google translate ( imparts communicability to individuals whose native tongue differs / mitigates the trials of communication across linguistic barriers / hastens the bridging of semantic boundaries / mollifies the complexity of multilingual communication / avails itself to the internationalization of discussion / flexes its muscles to abet intercultural conversation / calms the tides of linguistic divergence ).
100
+ informal english: corn fields are all across illinois, visible once you leave chicago.
101
+ Translated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago.
102
+ informal english: """
103
 
104
+ number_of_outputs = st.sidebar.slider("Number of Outputs", 5, 100)
105
+ log_nums = st.sidebar.slider("How Many Log Outputs?", 50, 1000)
 
 
 
106
 
107
+ def BestProbs(prompt):
108
+ prompt = prompt.strip()
109
+ text = tokenizer.encode(prompt)
110
+ myinput, past_key_values = torch.tensor([text]), None
111
+ myinput = myinput
112
+ logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
113
+ logits = logits[0,-1]
114
+ probabilities = torch.nn.functional.softmax(logits)
115
+ best_logits, best_indices = logits.topk(10)
116
+ best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
117
+ for i in best_words[0:10]:
118
+ print("_______")
119
+ st.write(f"${i} $\n")
120
+ f = (f"${i} $\n")
121
+ m = (prompt + f"{i}")
122
+ BestProbs2(m)
123
+ return f
124
+
125
+ def BestProbs2(prompt):
126
+ prompt = prompt.strip()
127
+ text = tokenizer.encode(prompt)
128
+ myinput, past_key_values = torch.tensor([text]), None
129
+ myinput = myinput
130
+ logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
131
+ logits = logits[0,-1]
132
+ probabilities = torch.nn.functional.softmax(logits)
133
+ best_logits, best_indices = logits.topk(20)
134
+ best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
135
+ for i in best_words[0:20]:
136
+ print(i)
137
+ st.write(i)
138
+
139
+ def LogProbs(prompt):
140
+ col1 = []
141
+ col2 = []
142
+ prompt = prompt.strip()
143
+ text = tokenizer.encode(prompt)
144
+ myinput, past_key_values = torch.tensor([text]), None
145
+ myinput = myinput
146
+ logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
147
+ logits = logits[0,-1]
148
+ probabilities = torch.nn.functional.softmax(logits)
149
+ best_logits, best_indices = logits.topk(10)
150
+ best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
151
+ for i in best_words[0:10]:
152
+ print("_______")
153
+ f = i
154
+ col1.append(f)
155
+ m = (prompt + f"{i}")
156
+ #print("^^" + f + " ^^")
157
+ prompt = m.strip()
158
+ text = tokenizer.encode(prompt)
159
+ myinput, past_key_values = torch.tensor([text]), None
160
+ myinput = myinput
161
+ logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
162
+ logits = logits[0,-1]
163
+ probabilities = torch.nn.functional.softmax(logits)
164
+ best_logits, best_indices = logits.topk(20)
165
+ best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
166
+ for i in best_words[0:20]:
167
+ #print(i)
168
+ col2.append(i)
169
+ #print(col1)
170
+ #print(col2)
171
+ d = {col1[0]: [col2[0], col2[1], col2[2], col2[3], col2[4], col2[5], col2[6], col2[7], col2[8], col2[9], col2[10], col2[11], col2[12], col2[13], col2[14], col2[15], col2[16], col2[17], col2[18], col2[19]],
172
+ col1[1]: [col2[20], col2[21], col2[22], col2[23], col2[24], col2[25], col2[26], col2[27], col2[28], col2[29], col2[30], col2[31], col2[32], col2[33], col2[34], col2[35], col2[36], col2[37], col2[38], col2[39]],
173
+ col1[2]: [col2[40], col2[41], col2[42], col2[43], col2[44], col2[45], col2[46], col2[47], col2[48], col2[49], col2[50], col2[51], col2[52], col2[53], col2[54], col2[55], col2[56], col2[57], col2[58], col2[59]],
174
+ col1[3]: [col2[60], col2[61], col2[62], col2[63], col2[64], col2[65], col2[66], col2[67], col2[68], col2[69], col2[70], col2[71], col2[72], col2[73], col2[74], col2[75], col2[76], col2[77], col2[78], col2[79]],
175
+ col1[4]: [col2[80], col2[81], col2[82], col2[83], col2[84], col2[85], col2[86], col2[87], col2[88], col2[89], col2[90], col2[91], col2[92], col2[93], col2[94], col2[95], col2[96], col2[97], col2[98], col2[99]],
176
+ col1[5]: [col2[100], col2[101], col2[102], col2[103], col2[104], col2[105], col2[106], col2[107], col2[108], col2[109], col2[110], col2[111], col2[112], col2[113], col2[114], col2[115], col2[116], col2[117], col2[118], col2[119]],
177
+ col1[6]: [col2[120], col2[121], col2[122], col2[123], col2[124], col2[125], col2[126], col2[127], col2[128], col2[129], col2[130], col2[131], col2[132], col2[133], col2[134], col2[135], col2[136], col2[137], col2[138], col2[139]],
178
+ col1[7]: [col2[140], col2[141], col2[142], col2[143], col2[144], col2[145], col2[146], col2[147], col2[148], col2[149], col2[150], col2[151], col2[152], col2[153], col2[154], col2[155], col2[156], col2[157], col2[158], col2[159]],
179
+ col1[8]: [col2[160], col2[161], col2[162], col2[163], col2[164], col2[165], col2[166], col2[167], col2[168], col2[169], col2[170], col2[171], col2[172], col2[173], col2[174], col2[175], col2[176], col2[177], col2[178], col2[179]],
180
+ col1[9]: [col2[180], col2[181], col2[182], col2[183], col2[184], col2[185], col2[186], col2[187], col2[188], col2[189], col2[190], col2[191], col2[192], col2[193], col2[194], col2[195], col2[196], col2[197], col2[198], col2[199]]}
181
+ df = pd.DataFrame(data=d)
182
+ print(df)
183
+ st.write(df)
184
+ return df
185
 
186
  def BestProbs5(prompt):
187
  prompt = prompt.strip()
 
200
  st.write(g)
201
  l = run_generate(g, "hey")
202
  st.write(l)
203
+
204
+ def run_generate(text, bad_words):
205
  yo = []
206
+ input_ids = tokenizer.encode(text, return_tensors='pt')
207
+ res = len(tokenizer.encode(text))
208
  bad_words = bad_words.split()
209
+ bad_word_ids = [[7829], [40940]]
210
  for bad_word in bad_words:
211
  bad_word = " " + bad_word
212
+ ids = tokenizer(bad_word).input_ids
213
  bad_word_ids.append(ids)
214
+ sample_outputs = model.generate(
215
  input_ids,
216
  do_sample=True,
217
+ max_length= res + 5,
218
+ min_length = res + 5,
219
  top_k=50,
220
+ temperature=1.0,
221
+ num_return_sequences=3,
222
  bad_words_ids=bad_word_ids
223
  )
224
+ for i in range(3):
225
+ e = tokenizer.decode(sample_outputs[i])
226
  e = e.replace(text, "")
227
  yo.append(e)
228
+ print(yo)
229
  return yo
230
+
 
 
 
 
 
 
 
 
 
 
231
  with st.form(key='my_form'):
232
+ prompt = st.text_area(label='Enter sentence', value=g, height=500)
233
  submit_button = st.form_submit_button(label='Submit')
234
+ submit_button2 = st.form_submit_button(label='Fast Forward')
235
+ submit_button3 = st.form_submit_button(label='Fast Forward 2.0')
236
+ submit_button4 = st.form_submit_button(label='Get Top')
237
+
 
 
 
 
 
238
  if submit_button:
 
 
 
239
  with torch.no_grad():
240
+ text = tokenizer.encode(prompt)
241
+ myinput, past_key_values = torch.tensor([text]), None
 
 
242
  myinput = myinput
243
+ myinput= myinput.to(device)
244
  logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
245
  logits = logits[0,-1]
246
  probabilities = torch.nn.functional.softmax(logits)
247
+ best_logits, best_indices = logits.topk(log_nums)
248
+ best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
249
+ text.append(best_indices[0].item())
250
+ best_probabilities = probabilities[best_indices].tolist()
251
+ words = []
252
  st.write(best_words)
253
+ if submit_button2:
254
+ print("----")
255
+ st.write("___")
256
+ m = LogProbs(prompt)
257
+ st.write("___")
258
+ st.write(m)
259
+ st.write("___")
260
  if submit_button3:
261
+ print("----")
262
+ st.write("___")
263
+ st.write(BestProbs)
264
  if submit_button4:
265
+ BestProbs5(prompt)