Spaces:
Paused
Paused
Update finetune.py
Browse files- finetune.py +2 -5
finetune.py
CHANGED
|
@@ -17,7 +17,7 @@ from peft import (
|
|
| 17 |
|
| 18 |
HF_TOKEN = os.environ.get("TRL_TOKEN", None)
|
| 19 |
if HF_TOKEN:
|
| 20 |
-
|
| 21 |
repo = Repository(
|
| 22 |
local_dir="./checkpoints/", clone_from="gustavoaq/llama_ft", use_auth_token=HF_TOKEN, repo_type="models"
|
| 23 |
)
|
|
@@ -159,9 +159,6 @@ trainer = transformers.Trainer(
|
|
| 159 |
save_total_limit=100,
|
| 160 |
load_best_model_at_end=True if VAL_SET_SIZE > 0 else False,
|
| 161 |
ddp_find_unused_parameters=False if ddp else None,
|
| 162 |
-
push_to_hub=True,
|
| 163 |
-
push_to_hub_model_id='llama_ft'
|
| 164 |
-
|
| 165 |
),
|
| 166 |
data_collator=transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False),
|
| 167 |
)
|
|
@@ -175,6 +172,6 @@ print("Training")
|
|
| 175 |
trainer.train()
|
| 176 |
|
| 177 |
model.save_pretrained(OUTPUT_DIR)
|
| 178 |
-
|
| 179 |
|
| 180 |
|
|
|
|
| 17 |
|
| 18 |
HF_TOKEN = os.environ.get("TRL_TOKEN", None)
|
| 19 |
if HF_TOKEN:
|
| 20 |
+
print(HF_TOKEN)
|
| 21 |
repo = Repository(
|
| 22 |
local_dir="./checkpoints/", clone_from="gustavoaq/llama_ft", use_auth_token=HF_TOKEN, repo_type="models"
|
| 23 |
)
|
|
|
|
| 159 |
save_total_limit=100,
|
| 160 |
load_best_model_at_end=True if VAL_SET_SIZE > 0 else False,
|
| 161 |
ddp_find_unused_parameters=False if ddp else None,
|
|
|
|
|
|
|
|
|
|
| 162 |
),
|
| 163 |
data_collator=transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False),
|
| 164 |
)
|
|
|
|
| 172 |
trainer.train()
|
| 173 |
|
| 174 |
model.save_pretrained(OUTPUT_DIR)
|
| 175 |
+
repo.push_to_hub(OUTPUT_DIR, commit_message="Ft model")
|
| 176 |
|
| 177 |
|