Spaces:
Runtime error
Runtime error
from transformers import AutoModelForCausalLM, AutoTokenizer, Trainer, TrainingArguments | |
from huggingface_hub import login | |
token1="hf_" | |
token2="rPlNHzkJScHYmtGSaQPcaoKcjJGYQEpjLu" | |
login(token=token1+token2) | |
# Load pre-trained model and tokenizer (replace with desired model name) | |
model_name = "meta-llama/Llama-2-7b-chat-hf" | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
# Define training arguments (hyperparameters) | |
training_args = TrainingArguments( | |
output_dir='output', # Output directory for checkpoints etc. | |
per_device_train_batch_size=8, # Adjust based on your hardware | |
save_steps=10_000, | |
num_train_epochs=3, # Adjust training epochs as needed | |
) | |
# Load your training and validation data (specific to your chosen library) | |
train_dataset = "data/train.csv" | |
val_dataset = "data/val.csv" | |
# Create a Trainer object for fine-tuning | |
trainer = Trainer( | |
model=model, | |
args=training_args, | |
train_dataset=train_dataset, # Replace with your training data loader | |
eval_dataset=val_dataset, # Replace with your validation data loader | |
) | |
# Start fine-tuning | |
trainer.train() | |