File size: 199 Bytes
ad49fb5
 
 
 
 
 
 
1
2
3
4
5
6
7
import torch

# Load the model
model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")

# Save the model state dictionary
torch.save(model.state_dict(), "model.pth")