Update app.py
Browse files
app.py
CHANGED
@@ -13,11 +13,14 @@ dtype = None
|
|
13 |
load_in_4bit = True
|
14 |
hf_token = os.getenv("HF_TOKEN")
|
15 |
|
|
|
|
|
|
|
16 |
print("Starting model and tokenizer loading...")
|
17 |
|
18 |
# Load the model and tokenizer
|
19 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
20 |
-
model_name=
|
21 |
max_seq_length=max_seq_length,
|
22 |
dtype=dtype,
|
23 |
load_in_4bit=load_in_4bit,
|
@@ -138,7 +141,7 @@ print("Model saved successfully.")
|
|
138 |
|
139 |
print("Pushing the model to the hub...")
|
140 |
model.push_to_hub_merged(
|
141 |
-
|
142 |
tokenizer,
|
143 |
save_method="merged_16bit",
|
144 |
token=hf_token
|
|
|
13 |
load_in_4bit = True
|
14 |
hf_token = os.getenv("HF_TOKEN")
|
15 |
|
16 |
+
pre_models = "dad1909/CyberSentinel-3"
|
17 |
+
uploads_models = "CyberSentinel-4"
|
18 |
+
|
19 |
print("Starting model and tokenizer loading...")
|
20 |
|
21 |
# Load the model and tokenizer
|
22 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
23 |
+
model_name=pre_models,
|
24 |
max_seq_length=max_seq_length,
|
25 |
dtype=dtype,
|
26 |
load_in_4bit=load_in_4bit,
|
|
|
141 |
|
142 |
print("Pushing the model to the hub...")
|
143 |
model.push_to_hub_merged(
|
144 |
+
uploads_models,
|
145 |
tokenizer,
|
146 |
save_method="merged_16bit",
|
147 |
token=hf_token
|