Spaces:
Runtime error
Runtime error
| import models | |
| import torch | |
| import torchvision.transforms as transforms | |
| import cv2 | |
| # initialize the computation device | |
| device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | |
| #intialize the model | |
| model = models.model(pretrained=False, requires_grad=False).to(device) | |
| # load the model checkpoint | |
| checkpoint = torch.load('../outputs/model.pth') | |
| # load model weights state_dict | |
| model.load_state_dict(checkpoint['model_state_dict']) | |
| model.eval() | |
| transform = transforms.Compose([ | |
| transforms.ToPILImage(), | |
| transforms.ToTensor(), | |
| ]) | |
| genres = ['Action', 'Adventure', 'Animation', 'Biography', 'Comedy', 'Crime', | |
| 'Documentary', 'Drama', 'Family', 'Fantasy', 'History', 'Horror', 'Music', | |
| 'Musical', 'Mystery', 'N/A', 'News', 'Reality-TV', 'Romance', 'Sci-Fi', 'Short', | |
| 'Sport', 'Thriller', 'War', 'Western'] | |
| image = cv2.imread(f"../input/movie-classifier/Multi_Label_dataset/Images/tt0084058.jpg") | |
| image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) | |
| image = transform(image) | |
| image = torch.tensor(image, dtype=torch.float32) | |
| image = image.to(device) | |
| image = torch.unsqueeze(image, dim=0) | |
| # get the predictions by passing the image through the model | |
| outputs = model(image) | |
| outputs = torch.sigmoid(outputs) | |
| outputs = outputs.detach().cpu() | |
| out_dict = {k: v for k, v in zip(genres, outputs.tolist()[0])} | |
| print(out_dict) | |