coeuslearning commited on
Commit
0005f3b
·
1 Parent(s): c1ded61

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -7,7 +7,8 @@ import spaces
7
  import torch
8
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
9
 
10
- !huggingface-cli login --token "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"
 
11
 
12
  MAX_MAX_NEW_TOKENS = 2048
13
  DEFAULT_MAX_NEW_TOKENS = 1024
@@ -33,8 +34,8 @@ if not torch.cuda.is_available():
33
 
34
  if torch.cuda.is_available():
35
  model_id = "meta-llama/Llama-2-7b-chat-hf"
36
- model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto")
37
- tokenizer = AutoTokenizer.from_pretrained(model_id)
38
  tokenizer.use_default_system_prompt = False
39
 
40
 
 
7
  import torch
8
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
9
 
10
+ # !huggingface-cli login --token "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"
11
+ HF_TOKEN = "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"
12
 
13
  MAX_MAX_NEW_TOKENS = 2048
14
  DEFAULT_MAX_NEW_TOKENS = 1024
 
34
 
35
  if torch.cuda.is_available():
36
  model_id = "meta-llama/Llama-2-7b-chat-hf"
37
+ model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto", use_auth_token=HF_TOKEN)
38
+ tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=HF_TOKEN)
39
  tokenizer.use_default_system_prompt = False
40
 
41