Wisdom Chen commited on
Commit
5e45981
·
unverified ·
1 Parent(s): af1f755

Update model.py

Browse files

add the streamlit package

Files changed (1) hide show
  1. model.py +8 -5
model.py CHANGED
@@ -1,4 +1,5 @@
1
  # Standard libraries
 
2
  import os
3
  import io
4
  import json
@@ -75,22 +76,24 @@ def initialize_models() -> bool:
75
  )
76
 
77
  # Get token from Streamlit secrets
78
- hf_token = st.secrets["HUGGINGFACE_TOKEN"]
 
 
79
 
80
  llm_tokenizer = AutoTokenizer.from_pretrained(
81
  model_name,
 
82
  padding_side="left",
83
- truncation_side="left",
84
- token=hf_token # Add token here
85
  )
86
  llm_tokenizer.pad_token = llm_tokenizer.eos_token
87
 
88
  llm_model = AutoModelForCausalLM.from_pretrained(
89
  model_name,
 
90
  quantization_config=quantization_config,
91
  device_map="auto",
92
- torch_dtype=torch.float16,
93
- token=hf_token # Add token here
94
  )
95
  llm_model.eval()
96
  print("LLM initialized successfully")
 
1
  # Standard libraries
2
+ import streamlit as st
3
  import os
4
  import io
5
  import json
 
76
  )
77
 
78
  # Get token from Streamlit secrets
79
+ hf_token = st.secrets.get("HUGGINGFACE_TOKEN")
80
+ if not hf_token:
81
+ raise ValueError("HUGGINGFACE_TOKEN not found in Streamlit secrets")
82
 
83
  llm_tokenizer = AutoTokenizer.from_pretrained(
84
  model_name,
85
+ token=hf_token,
86
  padding_side="left",
87
+ truncation_side="left"
 
88
  )
89
  llm_tokenizer.pad_token = llm_tokenizer.eos_token
90
 
91
  llm_model = AutoModelForCausalLM.from_pretrained(
92
  model_name,
93
+ token=hf_token,
94
  quantization_config=quantization_config,
95
  device_map="auto",
96
+ torch_dtype=torch.float16
 
97
  )
98
  llm_model.eval()
99
  print("LLM initialized successfully")