Simon Salmon commited on
Commit
7e845c4
·
1 Parent(s): bc7effb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -15,7 +15,7 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15
 
16
  from transformers import AutoTokenizer, AutoModelWithLMHead
17
  tokenizer = AutoTokenizer.from_pretrained("gpt2")
18
- model = AutoModelWithLMHead.from_pretrained("BigSalmon/MrLincoln2")
19
 
20
  with st.form(key='my_form'):
21
  prompt = st.text_area(label='Enter sentence')
@@ -30,7 +30,7 @@ with st.form(key='my_form'):
30
  logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
31
  logits = logits[0,-1]
32
  probabilities = torch.nn.functional.softmax(logits)
33
- best_logits, best_indices = logits.topk(50)
34
  best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
35
  text.append(best_indices[0].item())
36
  best_probabilities = probabilities[best_indices].tolist()
 
15
 
16
  from transformers import AutoTokenizer, AutoModelWithLMHead
17
  tokenizer = AutoTokenizer.from_pretrained("gpt2")
18
+ model = AutoModelWithLMHead.from_pretrained("BigSalmon/MrLincoln3")
19
 
20
  with st.form(key='my_form'):
21
  prompt = st.text_area(label='Enter sentence')
 
30
  logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
31
  logits = logits[0,-1]
32
  probabilities = torch.nn.functional.softmax(logits)
33
+ best_logits, best_indices = logits.topk(60)
34
  best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
35
  text.append(best_indices[0].item())
36
  best_probabilities = probabilities[best_indices].tolist()