|
import torch
|
|
from transformers import GPT2LMHeadModel, GPT2Tokenizer
|
|
|
|
|
|
model_name = "gpt2"
|
|
model = GPT2LMHeadModel.from_pretrained(model_name)
|
|
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
|
|
|
|
|
|
def chat_with_gpt2(input_text):
|
|
|
|
inputs = tokenizer.encode(input_text + tokenizer.eos_token, return_tensors='pt')
|
|
|
|
|
|
with torch.no_grad():
|
|
outputs = model.generate(inputs, max_length=150, num_return_sequences=1, no_repeat_ngram_size=2, pad_token_id=tokenizer.eos_token_id)
|
|
|
|
|
|
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
|
return response
|
|
|
|
|
|
print("سلام! از من سوال بپرسید.")
|
|
while True:
|
|
user_input = input("شما: ")
|
|
if user_input.lower() == "خروج":
|
|
print("خداحافظ!")
|
|
break
|
|
response = chat_with_gpt2(user_input)
|
|
print("GPT-2: " + response)
|
|
|