ysn-rfd's picture
Upload 38 files
c1fcc58 verified
raw
history blame
4.69 kB
import torch
import numpy as np
# Configuration (MUST match training configuration)
CONFIG = {
"FILE_PATH": 'dataset.txt',
"SEQ_LENGTH": 32,
"EMBEDDING_DIM": 64,
"HIDDEN_DIM": 64,
"NUM_LAYERS": 1,
"DROPOUT": 0.2,
"MODEL_SAVE_PATH": "char_lm_advanced.pth",
"TEMPERATURE": 0.7,
"TOP_K": 5,
"TOP_P": 0.95
}
# Load vocabulary
with open(CONFIG["FILE_PATH"], 'r', encoding='utf-8') as f:
text = f.read()
chars = sorted(list(set(text)))
char_to_idx = {ch: i for i, ch in enumerate(chars)}
idx_to_char = {i: ch for i, ch in enumerate(chars)}
vocab_size = len(chars)
# Model definition (must match training architecture)
class CharLM(torch.nn.Module):
def __init__(self):
super(CharLM, self).__init__()
self.embedding = torch.nn.Embedding(vocab_size, CONFIG["EMBEDDING_DIM"])
self.lstm = torch.nn.LSTM(
CONFIG["EMBEDDING_DIM"],
CONFIG["HIDDEN_DIM"],
num_layers=CONFIG["NUM_LAYERS"],
dropout=CONFIG["DROPOUT"] if CONFIG["NUM_LAYERS"] > 1 else 0,
batch_first=True
)
self.dropout = torch.nn.Dropout(CONFIG["DROPOUT"])
self.fc = torch.nn.Linear(CONFIG["HIDDEN_DIM"], vocab_size)
def forward(self, x, hidden=None):
x = self.embedding(x)
out, hidden = self.lstm(x, hidden)
out = self.dropout(out)
out = self.fc(out)
return out, hidden
# Load trained model
model = CharLM()
model.load_state_dict(torch.load(CONFIG["MODEL_SAVE_PATH"]))
model.eval()
def generate_text(model, start_str, length=200, temperature=CONFIG["TEMPERATURE"],
top_k=CONFIG["TOP_K"], top_p=CONFIG["TOP_P"]):
"""
Generate text with temperature scaling, top-k, and nucleus (top-p) sampling
"""
model.eval()
chars = list(start_str)
input_seq = torch.tensor([char_to_idx[ch] for ch in chars]).unsqueeze(0)
hidden = None
with torch.no_grad():
for _ in range(length):
outputs, hidden = model(input_seq, hidden)
logits = outputs[0, -1] / temperature
# Apply top-k filtering
if top_k > 0:
top_vals, top_idx = torch.topk(logits, top_k)
logits[logits < top_vals[-1]] = -float('Inf')
# Apply nucleus (top-p) filtering
if top_p > 0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(torch.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cumulative_probs > top_p
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = -float('Inf')
probs = torch.softmax(logits, dim=-1)
next_char = torch.multinomial(probs, num_samples=1).item()
chars.append(idx_to_char[next_char])
input_seq = torch.tensor([[next_char]])
return ''.join(chars)
# Interactive loop
while True:
try:
print("\n" + "="*50)
prompt = input("Enter your starting text (or 'exit' to quit):\n> ")
if prompt.lower() == 'exit':
print("Goodbye!")
break
# Filter invalid characters
valid_prompt = [c for c in prompt if c in char_to_idx]
if not valid_prompt:
print("Please use characters from the training data.")
continue
# Get generation parameters
length = int(input("Output length (50-500 recommended): ")) or 200
temp = float(input(f"Temperature [{CONFIG['TEMPERATURE']}]: ") or CONFIG["TEMPERATURE"])
top_k = int(input(f"Top-K [{CONFIG['TOP_K']}]: ") or CONFIG["TOP_K"])
top_p = float(input(f"Top-P [{CONFIG['TOP_P']}]: ") or CONFIG["TOP_P"])
# Generate and display
print("\nGenerating...")
generated = generate_text(
model,
''.join(valid_prompt),
length=length,
temperature=temp,
top_k=top_k,
top_p=top_p
)
print("\nGenerated Text:")
print(generated)
print("="*50)
except ValueError:
print("Invalid input! Please enter valid numbers for parameters.")
except KeyboardInterrupt:
print("\nExiting...")
break