|
|
|
import torch
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
|
|
|
|
model_dir = "./gpt2-finetuned"
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_dir)
|
|
model = AutoModelForCausalLM.from_pretrained(model_dir)
|
|
|
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
model.to(device)
|
|
|
|
print("Chat with the model! Type 'exit' or 'quit' to end the conversation.")
|
|
|
|
while True:
|
|
|
|
user_input = input("You: ")
|
|
if user_input.lower() in ["exit", "quit"]:
|
|
print("Exiting chat.")
|
|
break
|
|
|
|
|
|
inputs = tokenizer(user_input, return_tensors="pt", padding=True, truncation=True)
|
|
input_ids = inputs["input_ids"].to(device)
|
|
attention_mask = inputs["attention_mask"].to(device)
|
|
|
|
|
|
output_ids = model.generate(
|
|
input_ids,
|
|
attention_mask=attention_mask,
|
|
max_length=100,
|
|
do_sample=True,
|
|
top_p=0.95,
|
|
top_k=50,
|
|
pad_token_id=tokenizer.eos_token_id
|
|
)
|
|
|
|
|
|
response = tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
|
print("Bot:", response)
|
|
|