#!/usr/bin/env python import os import logging import torch from transformers import ( AutoTokenizer, AutoModelForCausalLM, Trainer, TrainingArguments, DataCollatorForLanguageModeling, get_cosine_schedule_with_warmup, ) from datasets import load_dataset # Setup logging for progress messages logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) # 1. Load the pre-trained tokenizer and model model_name = "sshleifer/tiny-gpt2" # Using GPT-2 as a small language model example tokenizer = AutoTokenizer.from_pretrained(model_name) # Check if a padding token is defined; if not, set it. if tokenizer.pad_token is None: # Option 1: Use the end-of-sequence token as the padding token. tokenizer.pad_token = tokenizer.eos_token # Option 2 (uncomment to use a dedicated PAD token): tokenizer.add_special_tokens({'pad_token': '[PAD]'}) # After adding special tokens, resize model embeddings: # model.resize_token_embeddings(len(tokenizer)) # Load the pre-trained model. model = AutoModelForCausalLM.from_pretrained(model_name) # 2. Prepare the dataset # For demonstration, we use the Wikitext-2 raw dataset. dataset = load_dataset("wikitext", "wikitext-2-raw-v1", split="train") # Define a tokenization function. def tokenize_function(examples): # Tokenize texts with padding (to max_length) and truncation. # Here, we set max_length=32. Adjust as needed. return tokenizer( examples["text"], truncation=True, max_length=32, padding="max_length" ) # Apply the tokenization function over the dataset. tokenized_dataset = dataset.map(tokenize_function, batched=True, remove_columns=["text"]) # 3. Create a data collator for language modeling (no masked LM). data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False) # 4. Setup training arguments training_args = TrainingArguments( output_dir="./gpt2-finetuned", overwrite_output_dir=True, num_train_epochs=1, # Adjust the number of epochs as needed per_device_train_batch_size=8, # Adjust based on your available GPU memory save_steps=1000, save_total_limit=2, logging_steps=100, prediction_loss_only=True, # Useful for language modeling tasks ) # 5. Create the custom optimizer and scheduler # Calculate the total number of training steps num_update_steps_per_epoch = len(tokenized_dataset) // training_args.per_device_train_batch_size max_train_steps = training_args.num_train_epochs * num_update_steps_per_epoch # Create AdamW optimizer with a custom learning rate and weight decay. optimizer = torch.optim.AdamW(model.parameters(), lr=0.1, weight_decay=0.1) # Create a cosine learning rate scheduler with warmup. scheduler = get_cosine_schedule_with_warmup( optimizer, num_warmup_steps=100, # Number of warmup steps, adjust as needed num_training_steps=max_train_steps ) # 6. Initialize the Trainer with the custom optimizer and scheduler. trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_dataset, data_collator=data_collator, optimizers=(optimizer, scheduler) # Pass the optimizer and scheduler as a tuple. ) # 7. Start training logger.info("Starting training...") trainer.train() # 8. Save the fine-tuned model and tokenizer model.save_pretrained("./gpt2-finetuned") tokenizer.save_pretrained("./gpt2-finetuned") logger.info("Training complete and model saved.")