Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -12,11 +12,11 @@ MAX_MAX_NEW_TOKENS = 2048
|
|
| 12 |
DEFAULT_MAX_NEW_TOKENS = 1024
|
| 13 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
| 14 |
|
| 15 |
-
|
| 16 |
-
model_id = "stabilityai/ar-stablelm-2-chat"
|
| 17 |
-
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True)
|
| 18 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
|
| 19 |
-
tokenizer.use_default_system_prompt = False
|
| 20 |
|
| 21 |
|
| 22 |
def generate(
|
|
@@ -113,6 +113,7 @@ with gr.Blocks(css_paths="style.css", fill_height=True) as demo:
|
|
| 113 |
try:
|
| 114 |
# Validate token using Hugging Face Hub API
|
| 115 |
login(token = hf_token)
|
|
|
|
| 116 |
return f"Authenticated successfully"
|
| 117 |
except HfHubHTTPError:
|
| 118 |
return "Invalid token. Please try again."
|
|
|
|
| 12 |
DEFAULT_MAX_NEW_TOKENS = 1024
|
| 13 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
| 14 |
|
| 15 |
+
def load_model():
|
| 16 |
+
model_id = "stabilityai/ar-stablelm-2-chat"
|
| 17 |
+
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True)
|
| 18 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
|
| 19 |
+
tokenizer.use_default_system_prompt = False
|
| 20 |
|
| 21 |
|
| 22 |
def generate(
|
|
|
|
| 113 |
try:
|
| 114 |
# Validate token using Hugging Face Hub API
|
| 115 |
login(token = hf_token)
|
| 116 |
+
load_model()
|
| 117 |
return f"Authenticated successfully"
|
| 118 |
except HfHubHTTPError:
|
| 119 |
return "Invalid token. Please try again."
|