ysn-rfd's picture
Upload 38 files
c1fcc58 verified
raw
history blame
619 Bytes
from transformers import GPT2Tokenizer, GPT2LMHeadModel, Trainer, TrainingArguments, DataCollatorForLanguageModeling
from datasets import Dataset
# Load dataset
def load_dataset(file_path):
with open(file_path, "r", encoding="utf-8") as f:
text = f.read()
return [text]
# Load tokenizer and model
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
tokenizer.pad_token = tokenizer.eos_token
model = GPT2LMHeadModel.from_pretrained("gpt2")
# Save final model
model.save_pretrained("./finetuned_gpt2")
tokenizer.save_pretrained("./finetuned_gpt2")
print("Fine-tuning completed.")