Update app.py
Browse files
app.py
CHANGED
@@ -10,7 +10,7 @@ from huggingface_hub import HfApi
|
|
10 |
max_seq_length = 4096
|
11 |
dtype = None
|
12 |
load_in_4bit = True
|
13 |
-
hf_token = os.getenv("
|
14 |
|
15 |
print("Starting model and tokenizer loading...")
|
16 |
|
@@ -20,7 +20,7 @@ model, tokenizer = FastLanguageModel.from_pretrained(
|
|
20 |
max_seq_length=max_seq_length,
|
21 |
dtype=dtype,
|
22 |
load_in_4bit=load_in_4bit,
|
23 |
-
token=
|
24 |
)
|
25 |
print("Model and tokenizer loaded successfully.")
|
26 |
|
@@ -117,7 +117,7 @@ trainer = SFTTrainer(
|
|
117 |
bf16=is_bfloat16_supported(),
|
118 |
warmup_steps=5,
|
119 |
logging_steps=10,
|
120 |
-
max_steps=
|
121 |
optim="adamw_8bit",
|
122 |
weight_decay=0.01,
|
123 |
lr_scheduler_type="linear",
|
|
|
10 |
max_seq_length = 4096
|
11 |
dtype = None
|
12 |
load_in_4bit = True
|
13 |
+
hf_token = os.getenv("HF_TOKEN")
|
14 |
|
15 |
print("Starting model and tokenizer loading...")
|
16 |
|
|
|
20 |
max_seq_length=max_seq_length,
|
21 |
dtype=dtype,
|
22 |
load_in_4bit=load_in_4bit,
|
23 |
+
token=True
|
24 |
)
|
25 |
print("Model and tokenizer loaded successfully.")
|
26 |
|
|
|
117 |
bf16=is_bfloat16_supported(),
|
118 |
warmup_steps=5,
|
119 |
logging_steps=10,
|
120 |
+
max_steps=50,
|
121 |
optim="adamw_8bit",
|
122 |
weight_decay=0.01,
|
123 |
lr_scheduler_type="linear",
|