Wisdom Chen commited on
Commit
21b3792
·
unverified ·
1 Parent(s): 6900a5f

Update model.py

Browse files

Add the token hugging login package

Files changed (1) hide show
  1. model.py +11 -7
model.py CHANGED
@@ -53,6 +53,13 @@ def initialize_models() -> bool:
53
  try:
54
  print(f"Initializing models on device: {device}")
55
 
 
 
 
 
 
 
 
56
  # Initialize CLIP model with error handling
57
  try:
58
  clip_model, _, clip_preprocess = open_clip.create_model_and_transforms(
@@ -75,16 +82,12 @@ def initialize_models() -> bool:
75
  bnb_4bit_quant_type="nf4"
76
  )
77
 
78
- # Get token from Streamlit secrets
79
- hf_token = st.secrets["HUGGINGFACE_TOKEN"]
80
- if not hf_token:
81
- raise ValueError("HUGGINGFACE_TOKEN not found in Streamlit secrets")
82
-
83
  # Initialize tokenizer with specific version requirements
84
  llm_tokenizer = AutoTokenizer.from_pretrained(
85
  model_name,
86
  token=hf_token,
87
- trust_remote_code=True
 
88
  )
89
  llm_tokenizer.pad_token = llm_tokenizer.eos_token
90
 
@@ -94,7 +97,8 @@ def initialize_models() -> bool:
94
  quantization_config=quantization_config,
95
  device_map="auto",
96
  torch_dtype=torch.float16,
97
- trust_remote_code=True
 
98
  )
99
  llm_model.eval()
100
  print("LLM initialized successfully")
 
53
  try:
54
  print(f"Initializing models on device: {device}")
55
 
56
+ # Add explicit Hugging Face login
57
+ from huggingface_hub import login
58
+ hf_token = st.secrets["HUGGINGFACE_TOKEN"]
59
+ if not hf_token:
60
+ raise ValueError("HUGGINGFACE_TOKEN not found in Streamlit secrets")
61
+ login(token=hf_token)
62
+
63
  # Initialize CLIP model with error handling
64
  try:
65
  clip_model, _, clip_preprocess = open_clip.create_model_and_transforms(
 
82
  bnb_4bit_quant_type="nf4"
83
  )
84
 
 
 
 
 
 
85
  # Initialize tokenizer with specific version requirements
86
  llm_tokenizer = AutoTokenizer.from_pretrained(
87
  model_name,
88
  token=hf_token,
89
+ trust_remote_code=True,
90
+ use_auth_token=True # Add this line
91
  )
92
  llm_tokenizer.pad_token = llm_tokenizer.eos_token
93
 
 
97
  quantization_config=quantization_config,
98
  device_map="auto",
99
  torch_dtype=torch.float16,
100
+ trust_remote_code=True,
101
+ use_auth_token=True # Add this line
102
  )
103
  llm_model.eval()
104
  print("LLM initialized successfully")