BigSalmon commited on
Commit
a37ef4f
·
1 Parent(s): 23c047e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +100 -151
app.py CHANGED
@@ -1,23 +1,12 @@
1
  import streamlit as st
2
- import numpy as np
3
- import pandas as pd
4
- import os
5
  import torch
6
- import torch.nn as nn
7
- from transformers.activations import get_activation
8
- from transformers import AutoTokenizer, AutoModelForCausalLM
9
 
10
-
11
- st.title('GPT2: To see all prompt outlines: https://huggingface.co/BigSalmon/InformalToFormalLincoln64Paraphrase')
12
-
13
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
14
 
15
  @st.cache(allow_output_mutation=True)
16
  def get_model():
17
 
18
- tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln89Paraphrase")
19
- model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln89Paraphrase")
20
-
21
  #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln86Paraphrase")
22
  #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln86Paraphrase")
23
 
@@ -83,109 +72,47 @@ def get_model():
83
 
84
  #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/PointsToSentence")
85
  #model = AutoModelForCausalLM.from_pretrained("BigSalmon/PointsToSentence")
 
 
 
 
 
86
 
87
- return model, tokenizer
88
-
89
- model, tokenizer = get_model()
90
-
91
- g = """informal english: garage band has made people who know nothing about music good at creating music.
92
- Translated into the Style of Abraham Lincoln: garage band ( offers the uninitiated in music the ability to produce professional-quality compositions / catapults those for whom music is an uncharted art the ability the realize masterpieces / stimulates music novice's competency to yield sublime arrangements / begets individuals of rudimentary musical talent the proficiency to fashion elaborate suites ).
93
-
94
- informal english: chrome extensions can make doing regular tasks much easier to get done.
95
- Translated into the Style of Abraham Lincoln: chrome extensions ( yield the boon of time-saving convenience / ( expedite the ability to / unlock the means to more readily ) accomplish everyday tasks / turbocharges the velocity with which one can conduct their obligations ).
96
 
97
- informal english: broadband is finally expanding to rural areas, a great development that will thrust them into modern life.
98
- Translated into the Style of Abraham Lincoln: broadband is ( ( finally / at last / after years of delay ) arriving in remote locations / springing to life in far-flung outposts / inching into even the most backwater corners of the nation ) that will leap-frog them into the twenty-first century.
99
 
100
- informal english: google translate has made talking to people who do not share your language easier.
101
- Translated into the Style of Abraham Lincoln: google translate ( imparts communicability to individuals whose native tongue differs / mitigates the trials of communication across linguistic barriers / hastens the bridging of semantic boundaries / mollifies the complexity of multilingual communication / avails itself to the internationalization of discussion / flexes its muscles to abet intercultural conversation / calms the tides of linguistic divergence ).
102
-
103
- informal english: corn fields are all across illinois, visible once you leave chicago.
104
- Translated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago.
105
-
106
- informal english: """
107
-
108
- number_of_outputs = st.sidebar.slider("Number of Outputs", 5, 100)
109
- log_nums = st.sidebar.slider("How Many Log Outputs?", 50, 600)
110
-
111
- def BestProbs(prompt):
112
- prompt = prompt.strip()
113
- text = tokenizer.encode(prompt)
114
- myinput, past_key_values = torch.tensor([text]), None
115
- myinput = myinput
116
- logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
117
- logits = logits[0,-1]
118
- probabilities = torch.nn.functional.softmax(logits)
119
- best_logits, best_indices = logits.topk(10)
120
- best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
121
- for i in best_words[0:10]:
122
- print("_______")
123
- st.write(f"${i} $\n")
124
- f = (f"${i} $\n")
125
- m = (prompt + f"{i}")
126
- BestProbs2(m)
127
- return f
128
 
129
- def BestProbs2(prompt):
130
- prompt = prompt.strip()
131
- text = tokenizer.encode(prompt)
132
- myinput, past_key_values = torch.tensor([text]), None
133
- myinput = myinput
134
- logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
135
- logits = logits[0,-1]
136
- probabilities = torch.nn.functional.softmax(logits)
137
- best_logits, best_indices = logits.topk(20)
138
- best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
139
- for i in best_words[0:20]:
140
- print(i)
141
- st.write(i)
142
-
143
- def LogProbs(prompt):
144
- col1 = []
145
- col2 = []
146
- prompt = prompt.strip()
147
- text = tokenizer.encode(prompt)
148
- myinput, past_key_values = torch.tensor([text]), None
149
- myinput = myinput
150
- logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
151
- logits = logits[0,-1]
152
- probabilities = torch.nn.functional.softmax(logits)
153
- best_logits, best_indices = logits.topk(10)
154
- best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
155
- for i in best_words[0:10]:
156
- print("_______")
157
- f = i
158
- col1.append(f)
159
- m = (prompt + f"{i}")
160
- #print("^^" + f + " ^^")
161
- prompt = m.strip()
162
- text = tokenizer.encode(prompt)
163
- myinput, past_key_values = torch.tensor([text]), None
164
- myinput = myinput
165
- logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
166
- logits = logits[0,-1]
167
- probabilities = torch.nn.functional.softmax(logits)
168
- best_logits, best_indices = logits.topk(20)
169
- best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
170
- for i in best_words[0:20]:
171
- #print(i)
172
- col2.append(i)
173
- #print(col1)
174
- #print(col2)
175
- d = {col1[0]: [col2[0], col2[1], col2[2], col2[3], col2[4], col2[5], col2[6], col2[7], col2[8], col2[9], col2[10], col2[11], col2[12], col2[13], col2[14], col2[15], col2[16], col2[17], col2[18], col2[19]],
176
- col1[1]: [col2[20], col2[21], col2[22], col2[23], col2[24], col2[25], col2[26], col2[27], col2[28], col2[29], col2[30], col2[31], col2[32], col2[33], col2[34], col2[35], col2[36], col2[37], col2[38], col2[39]],
177
- col1[2]: [col2[40], col2[41], col2[42], col2[43], col2[44], col2[45], col2[46], col2[47], col2[48], col2[49], col2[50], col2[51], col2[52], col2[53], col2[54], col2[55], col2[56], col2[57], col2[58], col2[59]],
178
- col1[3]: [col2[60], col2[61], col2[62], col2[63], col2[64], col2[65], col2[66], col2[67], col2[68], col2[69], col2[70], col2[71], col2[72], col2[73], col2[74], col2[75], col2[76], col2[77], col2[78], col2[79]],
179
- col1[4]: [col2[80], col2[81], col2[82], col2[83], col2[84], col2[85], col2[86], col2[87], col2[88], col2[89], col2[90], col2[91], col2[92], col2[93], col2[94], col2[95], col2[96], col2[97], col2[98], col2[99]],
180
- col1[5]: [col2[100], col2[101], col2[102], col2[103], col2[104], col2[105], col2[106], col2[107], col2[108], col2[109], col2[110], col2[111], col2[112], col2[113], col2[114], col2[115], col2[116], col2[117], col2[118], col2[119]],
181
- col1[6]: [col2[120], col2[121], col2[122], col2[123], col2[124], col2[125], col2[126], col2[127], col2[128], col2[129], col2[130], col2[131], col2[132], col2[133], col2[134], col2[135], col2[136], col2[137], col2[138], col2[139]],
182
- col1[7]: [col2[140], col2[141], col2[142], col2[143], col2[144], col2[145], col2[146], col2[147], col2[148], col2[149], col2[150], col2[151], col2[152], col2[153], col2[154], col2[155], col2[156], col2[157], col2[158], col2[159]],
183
- col1[8]: [col2[160], col2[161], col2[162], col2[163], col2[164], col2[165], col2[166], col2[167], col2[168], col2[169], col2[170], col2[171], col2[172], col2[173], col2[174], col2[175], col2[176], col2[177], col2[178], col2[179]],
184
- col1[9]: [col2[180], col2[181], col2[182], col2[183], col2[184], col2[185], col2[186], col2[187], col2[188], col2[189], col2[190], col2[191], col2[192], col2[193], col2[194], col2[195], col2[196], col2[197], col2[198], col2[199]]}
185
- df = pd.DataFrame(data=d)
186
- print(df)
187
- st.write(df)
188
- return df
189
 
190
  def BestProbs5(prompt):
191
  prompt = prompt.strip()
@@ -204,66 +131,88 @@ def BestProbs5(prompt):
204
  st.write(g)
205
  l = run_generate(g, "hey")
206
  st.write(l)
207
-
208
- def run_generate(text, bad_words):
209
  yo = []
210
- input_ids = tokenizer.encode(text, return_tensors='pt')
211
- res = len(tokenizer.encode(text))
212
  bad_words = bad_words.split()
213
- bad_word_ids = [[7829], [40940]]
214
  for bad_word in bad_words:
215
  bad_word = " " + bad_word
216
- ids = tokenizer(bad_word).input_ids
217
  bad_word_ids.append(ids)
218
- sample_outputs = model.generate(
219
  input_ids,
220
  do_sample=True,
221
- max_length= res + 5,
222
- min_length = res + 5,
223
  top_k=50,
224
- temperature=1.0,
225
- num_return_sequences=3,
226
  bad_words_ids=bad_word_ids
227
  )
228
- for i in range(3):
229
- e = tokenizer.decode(sample_outputs[i])
230
  e = e.replace(text, "")
231
  yo.append(e)
232
- print(yo)
233
  return yo
234
-
 
 
 
 
 
 
 
 
 
 
235
  with st.form(key='my_form'):
236
- prompt = st.text_area(label='Enter sentence', value=g, height=500)
237
  submit_button = st.form_submit_button(label='Submit')
238
- submit_button2 = st.form_submit_button(label='Fast Forward')
239
- submit_button3 = st.form_submit_button(label='Fast Forward 2.0')
240
- submit_button4 = st.form_submit_button(label='Get Top')
241
-
 
 
 
 
 
242
  if submit_button:
 
 
 
243
  with torch.no_grad():
244
- text = tokenizer.encode(prompt)
245
- myinput, past_key_values = torch.tensor([text]), None
 
 
246
  myinput = myinput
247
- myinput= myinput.to(device)
248
  logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
249
  logits = logits[0,-1]
250
  probabilities = torch.nn.functional.softmax(logits)
251
- best_logits, best_indices = logits.topk(log_nums)
252
- best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
253
- text.append(best_indices[0].item())
254
- best_probabilities = probabilities[best_indices].tolist()
255
- words = []
256
  st.write(best_words)
257
- if submit_button2:
258
- print("----")
259
- st.write("___")
260
- m = LogProbs(prompt)
261
- st.write("___")
262
- st.write(m)
263
- st.write("___")
264
  if submit_button3:
265
- print("----")
266
- st.write("___")
267
- st.write(BestProbs)
268
  if submit_button4:
269
- BestProbs5(prompt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModel
 
 
3
  import torch
 
 
 
4
 
5
+ first = """informal english: corn fields are all across illinois, visible once you leave chicago.\nTranslated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago.\n\ninformal english: """
 
 
 
6
 
7
  @st.cache(allow_output_mutation=True)
8
  def get_model():
9
 
 
 
 
10
  #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln86Paraphrase")
11
  #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln86Paraphrase")
12
 
 
72
 
73
  #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/PointsToSentence")
74
  #model = AutoModelForCausalLM.from_pretrained("BigSalmon/PointsToSentence")
75
+ tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln89Paraphrase")
76
+ model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln89Paraphrase")
77
+ tokenizer2 = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln90ParaphraseMedium")
78
+ model2 = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln90ParaphraseMedium")
79
+ return model, model2, tokenizer, tokenizer2
80
 
81
+ model, model2, tokenizer, tokenizer2 = get_model()
 
 
 
 
 
 
 
 
82
 
83
+ st.text('''For Prompt Templates: https://huggingface.co/BigSalmon/InformalToFormalLincoln82Paraphrase''')
 
84
 
85
+ temp = st.sidebar.slider("Temperature", 0.7, 1.5)
86
+ number_of_outputs = st.sidebar.slider("Number of Outputs", 5, 50)
87
+ lengths = st.sidebar.slider("Length", 3, 500)
88
+ bad_words = st.text_input("Words You Do Not Want Generated", " core lemon height time ")
89
+ logs_outputs = st.sidebar.slider("Logit Outputs", 50, 300)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
 
91
+ def run_generate(text, bad_words):
92
+ yo = []
93
+ input_ids = tokenizer.encode(text, return_tensors='pt')
94
+ res = len(tokenizer.encode(text))
95
+ bad_words = bad_words.split()
96
+ bad_word_ids = []
97
+ for bad_word in bad_words:
98
+ bad_word = " " + bad_word
99
+ ids = tokenizer(bad_word).input_ids
100
+ bad_word_ids.append(ids)
101
+ sample_outputs = model.generate(
102
+ input_ids,
103
+ do_sample=True,
104
+ max_length= res + lengths,
105
+ min_length = res + lengths,
106
+ top_k=50,
107
+ temperature=temp,
108
+ num_return_sequences=number_of_outputs,
109
+ bad_words_ids=bad_word_ids
110
+ )
111
+ for i in range(number_of_outputs):
112
+ e = tokenizer.decode(sample_outputs[i])
113
+ e = e.replace(text, "")
114
+ yo.append(e)
115
+ return yo
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
 
117
  def BestProbs5(prompt):
118
  prompt = prompt.strip()
 
131
  st.write(g)
132
  l = run_generate(g, "hey")
133
  st.write(l)
134
+
135
+ def run_generate2(text, bad_words):
136
  yo = []
137
+ input_ids = tokenizer2.encode(text, return_tensors='pt')
138
+ res = len(tokenizer2.encode(text))
139
  bad_words = bad_words.split()
140
+ bad_word_ids = []
141
  for bad_word in bad_words:
142
  bad_word = " " + bad_word
143
+ ids = tokenizer2(bad_word).input_ids
144
  bad_word_ids.append(ids)
145
+ sample_outputs = model2.generate(
146
  input_ids,
147
  do_sample=True,
148
+ max_length= res + lengths,
149
+ min_length = res + lengths,
150
  top_k=50,
151
+ temperature=temp,
152
+ num_return_sequences=number_of_outputs,
153
  bad_words_ids=bad_word_ids
154
  )
155
+ for i in range(number_of_outputs):
156
+ e = tokenizer2.decode(sample_outputs[i])
157
  e = e.replace(text, "")
158
  yo.append(e)
 
159
  return yo
160
+
161
+ def prefix_format(sentence):
162
+ words = sentence.split()
163
+ if "[MASK]" in sentence:
164
+ words2 = words.index("[MASK]")
165
+ #print(words2)
166
+ output = ("<Prefix> " + ' '.join(words[:words2]) + " <Prefix> " + "<Suffix> " + ' '.join(words[words2+1:]) + " <Suffix>" + " <Middle>")
167
+ st.write(output)
168
+ else:
169
+ st.write("Add [MASK] to sentence")
170
+
171
  with st.form(key='my_form'):
172
+ text = st.text_area(label='Enter sentence', value=first)
173
  submit_button = st.form_submit_button(label='Submit')
174
+ submit_button2 = st.form_submit_button(label='Submit Log Probs')
175
+
176
+ submit_button3 = st.form_submit_button(label='Submit Other Model')
177
+ submit_button4 = st.form_submit_button(label='Submit Log Probs Other Model')
178
+
179
+ submit_button5 = st.form_submit_button(label='Most Prob')
180
+
181
+ submit_button6 = st.form_submit_button(label='Turn Sentence with [MASK] into <Prefix> Format')
182
+
183
  if submit_button:
184
+ translated_text = run_generate(text, bad_words)
185
+ st.write(translated_text if translated_text else "No translation found")
186
+ if submit_button2:
187
  with torch.no_grad():
188
+ text2 = str(text)
189
+ print(text2)
190
+ text3 = tokenizer.encode(text2)
191
+ myinput, past_key_values = torch.tensor([text3]), None
192
  myinput = myinput
 
193
  logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
194
  logits = logits[0,-1]
195
  probabilities = torch.nn.functional.softmax(logits)
196
+ best_logits, best_indices = logits.topk(logs_outputs)
197
+ best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
 
 
 
198
  st.write(best_words)
 
 
 
 
 
 
 
199
  if submit_button3:
200
+ translated_text = run_generate2(text, bad_words)
201
+ st.write(translated_text if translated_text else "No translation found")
 
202
  if submit_button4:
203
+ text2 = str(text)
204
+ print(text2)
205
+ text3 = tokenizer2.encode(text2)
206
+ myinput, past_key_values = torch.tensor([text3]), None
207
+ myinput = myinput
208
+ logits, past_key_values = model2(myinput, past_key_values = past_key_values, return_dict=False)
209
+ logits = logits[0,-1]
210
+ probabilities = torch.nn.functional.softmax(logits)
211
+ best_logits, best_indices = logits.topk(logs_outputs)
212
+ best_words = [tokenizer2.decode([idx.item()]) for idx in best_indices]
213
+ st.write(best_words)
214
+ if submit_button5:
215
+ BestProbs5(text)
216
+ if submit_button6:
217
+ text2 = str(text)
218
+ prefix_format(text2)