Spaces:
Running
Running
File size: 860 Bytes
6364f02 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
import torch.nn.functional as F
import const
def load_model():
return AutoModelForSequenceClassification.from_pretrained(
"koshin2001/Japanese-to-emotions"
).eval()
def load_tokenizer():
return AutoTokenizer.from_pretrained("koshin2001/Japanese-to-emotions")
def exec(text):
model = load_model()
tokenizer = load_tokenizer()
inputs = tokenizer(
text,
return_tensors="pt",
truncation=True,
return_token_type_ids=False,
max_length=512,
)
output = model(**inputs)
output_logits = torch.tensor(output.logits).clone().detach().requires_grad_(True)
probs = F.softmax(output_logits, dim=-1).tolist()[0]
emotion_probs = dict(zip(const.EMOTIONS, probs))
return emotion_probs
|