File size: 3,373 Bytes
c1fcc58 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 |
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
# Hyperparameters
sequence_length = 100
batch_size = 64
hidden_size = 128
num_layers = 1
learning_rate = 0.01
num_epochs = 100
device = torch.device('cpu')
# Sample text data
text = "Hello, this is a simple language model for text generation using PyTorch."
chars = sorted(list(set(text)))
vocab_size = len(chars)
char_to_idx = {ch: i for i, ch in enumerate(chars)}
idx_to_char = {i: ch for i, ch in enumerate(chars)}
# Prepare data
data = [char_to_idx[ch] for ch in text]
data = torch.tensor(data, dtype=torch.long).to(device)
# Create batches
def create_batches(data, batch_size, sequence_length):
num_batches = len(data) // (batch_size * sequence_length)
data = data[:num_batches * batch_size * sequence_length]
data = data.view(batch_size, -1)
for i in range(0, data.size(1), sequence_length):
x = data[:, i:i+sequence_length]
y = data[:, (i+1):(i+1)+sequence_length]
yield x, y
# Define the model
class CharRNN(nn.Module):
def __init__(self, vocab_size, hidden_size, num_layers):
super(CharRNN, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.embedding = nn.Embedding(vocab_size, hidden_size)
self.lstm = nn.LSTM(hidden_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, vocab_size)
def forward(self, x, hidden):
x = self.embedding(x)
out, hidden = self.lstm(x, hidden)
out = self.fc(out)
return out, hidden
def init_hidden(self, batch_size):
return (torch.zeros(self.num_layers, batch_size, self.hidden_size).to(device),
torch.zeros(self.num_layers, batch_size, self.hidden_size).to(device))
# Initialize model, loss, and optimizer
model = CharRNN(vocab_size, hidden_size, num_layers).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# Training loop
for epoch in range(num_epochs):
hidden = model.init_hidden(batch_size)
for i, (x, y) in enumerate(create_batches(data, batch_size, sequence_length)):
x, y = x.to(device), y.to(device)
hidden = tuple(h.detach() for h in hidden) # Detach hidden state
optimizer.zero_grad()
output, hidden = model(x, hidden)
loss = criterion(output.transpose(1, 2), y)
loss.backward()
optimizer.step()
if (i+1) % 10 == 0:
print(f'Epoch [{epoch+1}/{num_epochs}], Step [{i+1}], Loss: {loss.item():.4f}')
# Generate text
def generate_text(model, start_str, length=100):
model.eval()
chars = [char_to_idx[ch] for ch in start_str]
hidden = model.init_hidden(1)
for i in range(length):
x = torch.tensor([chars[-1]], dtype=torch.long).unsqueeze(0).to(device)
with torch.no_grad():
output, hidden = model(x, hidden)
prob = torch.softmax(output.squeeze(), dim=0).cpu().numpy()
next_char = np.random.choice(vocab_size, p=prob)
chars.append(next_char)
return ''.join([idx_to_char[ch] for ch in chars])
# Test text generation
start_str = "Hello"
generated_text = generate_text(model, start_str, length=200)
print(generated_text) |