Spaces:
Sleeping
Sleeping
from transformers import AlbertTokenizer, AlbertForSequenceClassification | |
import torch | |
import numpy as np | |
class Model: | |
def __init__(self, model_weights): | |
self.tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') | |
self.model = AlbertForSequenceClassification.from_pretrained('albert-base-v2', num_labels=5) | |
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
# Load the checkpoint | |
checkpoint = torch.load(model_weights, map_location=self.device) | |
# Load the model's state dictionary | |
self.model.load_state_dict(checkpoint['model_state_dict'],strict=False) | |
self.currepoch = checkpoint['epoch'] | |
self.loss = checkpoint['loss'] | |
print(f"Loaded model state: Current epoch {self.currepoch}, current loss {self.loss}") | |
self.model.to(self.device) | |
self.model.eval() | |
def predict(self, text): | |
inputs = self.tokenizer(text, return_tensors='pt', padding=True, truncation=True, max_length=512) | |
inputs = {k: v.to(self.device) for k, v in inputs.items()} | |
with torch.no_grad(): | |
outputs = self.model(**inputs) | |
logits = outputs.logits | |
predictions = torch.nn.functional.softmax(logits, dim=-1) | |
Labels = ["No", "Yes"] | |
return predictions[0].tolist()[:2],Labels[np.argmax(predictions)] | |
model_instance = None | |
model_weights = "assets/albert_sentiment_checkpoint_58.pt" | |
def get_model(): | |
global model_instance | |
if model_instance is None: | |
model_instance = Model(model_weights) | |
return model_instance | |