File size: 2,203 Bytes
126108c f77676e 126108c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
import gradio as gr
import numpy as np
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
labels = ['ๅใณ', 'ๆฒใใฟ', 'ๆๅพ
', '้ฉใ', 'ๆใ', 'ไฟก้ ผ', 'ๆฒใใฟ', 'ๅซๆช']
def np_softmax(x):
x_exp = torch.exp(torch.tensor(x) - torch.max(torch.tensor(x)))
f_x = x_exp / x_exp.sum()
return f_x
def emotion_clf(text):
model.eval()
tokens = tokenizer(text, truncation=True, return_tensors="pt")
tokens.to(model.device)
preds = model(**tokens)
prob = np_softmax(preds.logits.cpu().detach().numpy()[0])
out_dict = {n: p.item() for n, p in zip(labels, prob)}
return out_dict
tokenizer = AutoTokenizer.from_pretrained("cl-tohoku/bert-base-japanese-whole-word-masking")
model = AutoModelForSequenceClassification.from_pretrained("jingwora/language-emotion-classification-ja", num_labels=8)
title = "Emotion Classification"
description = "Enter Japanese text and get the emotion probabilities."
text = gr.Textbox(placeholder="Enter japanese text", label="text", lines=3)
label = gr.Label(num_top_classes=8)
examples = [
["ๅคงๅใชๅไบบใ็ตๅฉใ็บ่กจใใใจใใๅฟใๆบใก่ถณใใๅนธใใๆใใพใใใ"],
["ๆใใ็ฅๆฏใไบกใใชใฃใใจใใ่ธใซๆทฑใๆฒใใฟใๅบใใใพใใใ"],
["ๆฐใใไปไบใๅงใใๅๅคใๆๆฅใใใฎๆช็ฅใฎไธ็ใซ่ธใ้ซ้ณดใใพใใใ"],
["ๅฑฑ้ใๆญฉใใฆใใใใ็พใใๆปใ็พใใๆใใๆฏใ้ฃฒใ้ฉใใๆใใพใใใ"],
["ๅ
ฌๅนณใใๆฌ ใใๆฑบๅฎใๅใใใจใใๆคใใๆฑใใฆๆใใๆนงใไธใใใพใใใ"],
["้ฃใใๅฑ้ขใงไปฒ้ใใใฃใใใจๆฏใใฆใใใฆใๅฟใใใฎไฟก้ ผใๆใใพใใใ"],
["ๅคฑๆใใใจใใๅฟใซ้ใๆฒใใฟใๅบใใใไฝใๆใซใคใใชใ็ถๆณใงใใใ"],
["ๆฑใใๅ ดๆใง้ทๆ้้ใใใใจใใไธๅฟซๆใจๅซๆชๆใๅใใพใใใ"],
]
demo = gr.Interface(
fn=emotion_clf,
inputs=text,
outputs=label,
title=title,
description=description,
examples=examples,
interpretation="default",
theme="freddyaboulton/dracula_revamped",
)
demo.launch()
|