EssayScoring / model /mistral_7b_ielts_evaluator.py
JacobLinCool's picture
feat: mistral_7b_ielts_evaluator
7226b69
raw
history blame
781 Bytes
from typing import *
import torch
import numpy as np
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import spaces
model_name = "chillies/mistral-7b-ielts-evaluator-q4"
model = AutoModelForSequenceClassification.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
@spaces.GPU()
@torch.no_grad()
def grade_mistral_7b_ielts_evaluator(question: str, answer: str) -> Tuple[float, str]:
text = f"{question} {answer}"
inputs = tokenizer(
text,
return_tensors="pt",
padding=True,
truncation=True,
)
outputs = model(**inputs)
score = outputs.logits.argmax(dim=-1).item()
print(score)
overall_score = float(score)
comment = ""
return overall_score, comment