Lora commited on
Commit
8b1a6a7
·
1 Parent(s): 9da80c3

try add auth

Browse files
Files changed (1) hide show
  1. app.py +2 -3
app.py CHANGED
@@ -8,13 +8,12 @@ def visualize_word(word, count=10, remove_space=False):
8
 
9
  if not remove_space:
10
  word = ' ' + word
11
- print(f"Looking up word ['{word}']")
12
 
13
  # seems very dumb to have to load the tokenizer every time, but I don't know how to pass a non-interface element into the function in gradio
14
  tokenizer = transformers.AutoTokenizer.from_pretrained('gpt2')
15
  vecs = torch.load("senses/all_vecs_mtx.pt")
16
  lm_head = torch.load("senses/lm_head.pt")
17
- print("lm_head.shape = ", lm_head.shape)
18
 
19
  token_ids = tokenizer(word)['input_ids']
20
  tokens = [tokenizer.decode(token_id) for token_id in token_ids]
@@ -87,5 +86,5 @@ with gr.Blocks() as demo:
87
  outputs= [pos_outputs, neg_outputs, token_breakdown],
88
  )
89
 
90
- demo.launch(share=False)
91
 
 
8
 
9
  if not remove_space:
10
  word = ' ' + word
11
+ print(f"Looking up word '{word}'")
12
 
13
  # seems very dumb to have to load the tokenizer every time, but I don't know how to pass a non-interface element into the function in gradio
14
  tokenizer = transformers.AutoTokenizer.from_pretrained('gpt2')
15
  vecs = torch.load("senses/all_vecs_mtx.pt")
16
  lm_head = torch.load("senses/lm_head.pt")
 
17
 
18
  token_ids = tokenizer(word)['input_ids']
19
  tokens = [tokenizer.decode(token_id) for token_id in token_ids]
 
86
  outputs= [pos_outputs, neg_outputs, token_breakdown],
87
  )
88
 
89
+ demo.launch(auth=("admin", "pass1234"))
90