tamilkavithai / loadmodel.py
Vikram Thangaraj
Initial Tamil Kavithai chatbot deployment
c6927d3
raw
history blame
419 Bytes
from transformers import AutoTokenizer, AutoModelForCausalLM
# Model name
model_name = "abinayam/gpt-2-tamil"
# Load from Hugging Face
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Save to local folder called 'model'
model.save_pretrained("model")
tokenizer.save_pretrained("model")
print("βœ… Model and tokenizer saved successfully in './model'")