Spaces:
Sleeping
Sleeping
Hugo
commited on
Commit
·
e17a026
1
Parent(s):
3094d88
bug fixed
Browse files
app.py
CHANGED
@@ -1,17 +1,22 @@
|
|
1 |
import streamlit as st
|
|
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
import os
|
4 |
import os.path
|
5 |
import pickle
|
6 |
import torch
|
7 |
|
|
|
8 |
model_id = "HiGenius/Headline-Generation-Model"
|
|
|
|
|
9 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
10 |
|
11 |
@st.cache_resource
|
12 |
def load_model():
|
13 |
-
|
14 |
-
|
|
|
15 |
tokenizer.pad_token = tokenizer.eos_token
|
16 |
tokenizer.padding_side='left'
|
17 |
|
|
|
1 |
import streamlit as st
|
2 |
+
from peft import PeftModel
|
3 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
4 |
import os
|
5 |
import os.path
|
6 |
import pickle
|
7 |
import torch
|
8 |
|
9 |
+
base_model_id = "meta-llama/Llama-3.2-3B-Instruct"
|
10 |
model_id = "HiGenius/Headline-Generation-Model"
|
11 |
+
hf_token = os.environ.get('HF_TOKEN')
|
12 |
+
|
13 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
14 |
|
15 |
@st.cache_resource
|
16 |
def load_model():
|
17 |
+
base_model = AutoModelForCausalLM.from_pretrained(base_model_id, use_auth_token=hf_token)
|
18 |
+
model = PeftModel.from_pretrained(base_model, model_id).to(device)
|
19 |
+
tokenizer = AutoTokenizer.from_pretrained(base_model_id, use_auth_token=hf_token)
|
20 |
tokenizer.pad_token = tokenizer.eos_token
|
21 |
tokenizer.padding_side='left'
|
22 |
|