|
import torch
|
|
import torch.nn as nn
|
|
import torch.nn.functional as F
|
|
from torch.utils.data import Dataset, DataLoader, random_split
|
|
from torch.utils.tensorboard import SummaryWriter
|
|
import numpy as np
|
|
from tqdm import tqdm
|
|
import os
|
|
from datetime import datetime
|
|
from typing import Tuple, Optional, List
|
|
import math
|
|
|
|
|
|
class Config:
|
|
"""Configuration class for model parameters and training settings"""
|
|
def __init__(self):
|
|
self.file_path = 'dataset.txt'
|
|
self.seq_length = 32
|
|
self.batch_size = 8
|
|
self.effective_batch = 64
|
|
self.epochs = 1
|
|
self.embedding_dim = 128
|
|
self.hidden_dim = 256
|
|
self.num_heads = 8
|
|
self.num_layers = 6
|
|
self.dropout = 0.1
|
|
self.learning_rate = 0.01
|
|
self.weight_decay = 0.01
|
|
self.clip_grad = 1.0
|
|
self.lr_patience = 3
|
|
self.val_split = 0.1
|
|
self.early_stop_patience = 5
|
|
self.model_save_path = "transformer_lm_model.pth"
|
|
self.temperature = 0.7
|
|
self.top_k = 50
|
|
self.top_p = 0.95
|
|
self.beam_width = 5
|
|
self.label_smoothing = 0.1
|
|
self.accum_steps = self.effective_batch // self.batch_size
|
|
self.device = 'cpu'
|
|
self.log_dir = 'runs/' + datetime.now().strftime("%Y%m%d-%H%M%S")
|
|
|
|
CONFIG = Config()
|
|
|
|
|
|
class TextProcessor:
|
|
"""Handles text encoding/decoding and vocabulary management"""
|
|
def __init__(self, text: str):
|
|
self.chars = sorted(list(set(text)))
|
|
self.vocab_size = len(self.chars)
|
|
self.char_to_idx = {ch: i for i, ch in enumerate(self.chars)}
|
|
self.idx_to_char = {i: ch for i, ch in enumerate(self.chars)}
|
|
|
|
def encode(self, text: str) -> np.ndarray:
|
|
return np.array([self.char_to_idx[ch] for ch in text])
|
|
|
|
def decode(self, indices: List[int]) -> str:
|
|
return ''.join([self.idx_to_char[i] for i in indices])
|
|
|
|
|
|
class TextDataset(Dataset):
|
|
"""Efficient text dataset with memory mapping and caching"""
|
|
def __init__(self, data: np.ndarray, seq_length: int):
|
|
self.data = torch.from_numpy(data).long()
|
|
self.seq_length = seq_length
|
|
|
|
def __len__(self) -> int:
|
|
return len(self.data) - self.seq_length - 1
|
|
|
|
def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor]:
|
|
x = self.data[idx:idx+self.seq_length]
|
|
y = self.data[idx+1:idx+self.seq_length+1]
|
|
return x, y
|
|
|
|
|
|
class TransformerLM(nn.Module):
|
|
"""Transformer-based language model with positional encoding"""
|
|
def __init__(self, processor: TextProcessor):
|
|
super().__init__()
|
|
self.vocab_size = processor.vocab_size
|
|
self.embed = nn.Embedding(processor.vocab_size, CONFIG.embedding_dim)
|
|
self.pos_encoder = PositionalEncoding(CONFIG.embedding_dim, CONFIG.dropout)
|
|
encoder_layer = nn.TransformerEncoderLayer(
|
|
d_model=CONFIG.embedding_dim,
|
|
nhead=CONFIG.num_heads,
|
|
dim_feedforward=CONFIG.hidden_dim,
|
|
dropout=CONFIG.dropout,
|
|
activation='gelu'
|
|
)
|
|
self.transformer = nn.TransformerEncoder(encoder_layer, CONFIG.num_layers)
|
|
self.fc = nn.Linear(CONFIG.embedding_dim, processor.vocab_size)
|
|
self.init_weights()
|
|
|
|
def init_weights(self) -> None:
|
|
"""Initialize weights with Xavier uniform"""
|
|
for p in self.parameters():
|
|
if p.dim() > 1:
|
|
nn.init.xavier_uniform_(p)
|
|
|
|
def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
|
|
"""Forward pass with optional attention mask"""
|
|
x = self.embed(x) * math.sqrt(CONFIG.embedding_dim)
|
|
x = self.pos_encoder(x)
|
|
x = self.transformer(x, mask)
|
|
return self.fc(x)
|
|
|
|
class PositionalEncoding(nn.Module):
|
|
"""Positional encoding for transformer models"""
|
|
def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 5000):
|
|
super().__init__()
|
|
self.dropout = nn.Dropout(p=dropout)
|
|
position = torch.arange(max_len).unsqueeze(1)
|
|
div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
|
|
pe = torch.zeros(max_len, d_model)
|
|
pe[:, 0::2] = torch.sin(position * div_term)
|
|
pe[:, 1::2] = torch.cos(position * div_term)
|
|
self.register_buffer('pe', pe)
|
|
|
|
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
|
x = x + self.pe[:x.size(1)]
|
|
return self.dropout(x)
|
|
|
|
|
|
class Trainer:
|
|
"""Handles model training and evaluation with advanced features"""
|
|
def __init__(self, model: nn.Module, processor: TextProcessor):
|
|
self.model = model.to(CONFIG.device)
|
|
self.processor = processor
|
|
self.writer = SummaryWriter(CONFIG.log_dir)
|
|
self.optimizer = torch.optim.AdamW(
|
|
model.parameters(),
|
|
lr=CONFIG.learning_rate,
|
|
weight_decay=CONFIG.weight_decay
|
|
)
|
|
self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
|
|
self.optimizer, 'min', patience=CONFIG.lr_patience
|
|
)
|
|
self.scaler = torch.cuda.amp.GradScaler()
|
|
self.criterion = nn.CrossEntropyLoss(label_smoothing=CONFIG.label_smoothing)
|
|
|
|
def train_epoch(self, loader: DataLoader) -> float:
|
|
"""Train model for one epoch with gradient accumulation"""
|
|
self.model.train()
|
|
total_loss = 0.0
|
|
accum_steps = CONFIG.accum_steps
|
|
progress = tqdm(loader, desc="Training", leave=False)
|
|
|
|
for i, (inputs, targets) in enumerate(progress):
|
|
inputs, targets = inputs.to(CONFIG.device), targets.to(CONFIG.device)
|
|
|
|
with torch.cuda.amp.autocast():
|
|
outputs = self.model(inputs)
|
|
loss = self.criterion(outputs.view(-1, self.processor.vocab_size),
|
|
targets.view(-1)) / accum_steps
|
|
|
|
self.scaler.scale(loss).backward()
|
|
|
|
if (i + 1) % accum_steps == 0:
|
|
self.scaler.unscale_(self.optimizer)
|
|
nn.utils.clip_grad_norm_(self.model.parameters(), CONFIG.clip_grad)
|
|
self.scaler.step(self.optimizer)
|
|
self.scaler.update()
|
|
self.optimizer.zero_grad()
|
|
|
|
total_loss += loss.item() * accum_steps
|
|
progress.set_postfix({'loss': total_loss/(i+1)})
|
|
|
|
return total_loss / len(loader)
|
|
|
|
@torch.no_grad()
|
|
def evaluate(self, loader: DataLoader) -> float:
|
|
"""Evaluate model on validation set"""
|
|
self.model.eval()
|
|
total_loss = 0.0
|
|
for inputs, targets in tqdm(loader, desc="Evaluating", leave=False):
|
|
inputs, targets = inputs.to(CONFIG.device), targets.to(CONFIG.device)
|
|
outputs = self.model(inputs)
|
|
loss = self.criterion(outputs.view(-1, self.processor.vocab_size),
|
|
targets.view(-1))
|
|
total_loss += loss.item()
|
|
return total_loss / len(loader)
|
|
|
|
|
|
class TextGenerator:
|
|
"""Advanced text generator with multiple sampling strategies"""
|
|
def __init__(self, model: nn.Module, processor: TextProcessor):
|
|
self.model = model
|
|
self.processor = processor
|
|
self.model.eval()
|
|
|
|
def generate(self, prompt: str, length: int = 200, **kwargs) -> str:
|
|
"""Generate text with given decoding parameters"""
|
|
method = kwargs.get('method', 'sampling')
|
|
if method == 'beam':
|
|
return self._beam_search(prompt, length, **kwargs)
|
|
return self._sample_text(prompt, length, **kwargs)
|
|
|
|
def _sample_text(self, prompt: str, length: int,
|
|
temperature: float = CONFIG.temperature,
|
|
top_k: int = CONFIG.top_k,
|
|
top_p: float = CONFIG.top_p) -> str:
|
|
"""Generate text using temperature sampling with top-k/p filtering"""
|
|
input_seq = torch.tensor([self.processor.char_to_idx[ch]
|
|
for ch in prompt]).unsqueeze(0).to(CONFIG.device)
|
|
generated = list(prompt)
|
|
|
|
for _ in tqdm(range(length), desc="Generating"):
|
|
with torch.no_grad():
|
|
logits = self.model(input_seq)[0, -1]
|
|
|
|
logits = self._apply_sampling_constraints(logits, temperature, top_k, top_p)
|
|
probs = F.softmax(logits, dim=-1)
|
|
next_idx = torch.multinomial(probs, num_samples=1).item()
|
|
generated.append(self.processor.idx_to_char[next_idx])
|
|
input_seq = torch.cat([input_seq[:, 1:],
|
|
torch.tensor([[next_idx]]).to(CONFIG.device)], dim=1)
|
|
|
|
return ''.join(generated)
|
|
|
|
def _beam_search(self, prompt: str, length: int,
|
|
beam_width: int = CONFIG.beam_width) -> str:
|
|
"""Beam search decoding for improved coherence"""
|
|
|
|
pass
|
|
|
|
def _apply_sampling_constraints(self, logits: torch.Tensor,
|
|
temperature: float,
|
|
top_k: int,
|
|
top_p: float) -> torch.Tensor:
|
|
"""Apply temperature scaling and top-k/p filtering"""
|
|
logits = logits / temperature
|
|
if top_k > 0:
|
|
top_k = min(top_k, logits.size(-1))
|
|
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
|
|
logits[indices_to_remove] = -float('Inf')
|
|
if top_p > 0.0:
|
|
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
|
|
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
|
|
sorted_indices_to_remove = cumulative_probs > top_p
|
|
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
|
|
sorted_indices_to_remove[..., 0] = 0
|
|
indices_to_remove = sorted_indices_to_remove.scatter(
|
|
-1, sorted_indices, sorted_indices_to_remove)
|
|
logits[indices_to_remove] = -float('Inf')
|
|
return logits
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
with open(CONFIG.file_path, 'r', encoding='utf-8') as f:
|
|
text = f.read()
|
|
|
|
processor = TextProcessor(text)
|
|
encoded = processor.encode(text)
|
|
dataset = TextDataset(encoded, CONFIG.seq_length)
|
|
train_size = int(len(dataset) * (1 - CONFIG.val_split))
|
|
train_set, val_set = random_split(dataset, [train_size, len(dataset) - train_size])
|
|
|
|
train_loader = DataLoader(train_set, batch_size=CONFIG.batch_size,
|
|
shuffle=True, pin_memory=True)
|
|
val_loader = DataLoader(val_set, batch_size=CONFIG.batch_size*2)
|
|
|
|
|
|
model = TransformerLM(processor)
|
|
trainer = Trainer(model, processor)
|
|
best_loss = float('inf')
|
|
patience = 0
|
|
|
|
|
|
for epoch in range(CONFIG.epochs):
|
|
train_loss = trainer.train_epoch(train_loader)
|
|
val_loss = trainer.evaluate(val_loader)
|
|
trainer.scheduler.step(val_loss)
|
|
|
|
|
|
trainer.writer.add_scalar('Loss/train', train_loss, epoch)
|
|
trainer.writer.add_scalar('Loss/val', val_loss, epoch)
|
|
trainer.writer.add_scalar('LR', trainer.optimizer.param_groups[0]['lr'], epoch)
|
|
|
|
|
|
if val_loss < best_loss:
|
|
best_loss = val_loss
|
|
patience = 0
|
|
torch.save(model.state_dict(), CONFIG.model_save_path)
|
|
else:
|
|
patience += 1
|
|
if patience >= CONFIG.early_stop_patience:
|
|
print(f"Early stopping at epoch {epoch}")
|
|
break
|
|
|
|
print(f"Epoch {epoch+1}/{CONFIG.epochs} | "
|
|
f"Train Loss: {train_loss:.4f} | Val Loss: {val_loss:.4f}")
|
|
|
|
|
|
generator = TextGenerator(model, processor)
|
|
print("\nGenerated text (temperature=0.7):")
|
|
print(generator.generate("The ", temperature=0.7, top_k=50)) |