| import gradio as gr | |
| import numpy as np | |
| import torch | |
| from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
| labels = ['ๅใณ', 'ๆฒใใฟ', 'ๆๅพ ', '้ฉใ', 'ๆใ', 'ไฟก้ ผ', 'ๆฒใใฟ', 'ๅซๆช'] | |
| def np_softmax(x): | |
| x_exp = torch.exp(torch.tensor(x) - torch.max(torch.tensor(x))) | |
| f_x = x_exp / x_exp.sum() | |
| return f_x | |
| def emotion_clf(text): | |
| model.eval() | |
| tokens = tokenizer(text, truncation=True, return_tensors="pt") | |
| tokens.to(model.device) | |
| preds = model(**tokens) | |
| prob = np_softmax(preds.logits.cpu().detach().numpy()[0]) | |
| out_dict = {n: p.item() for n, p in zip(labels, prob)} | |
| return out_dict | |
| tokenizer = AutoTokenizer.from_pretrained("cl-tohoku/bert-base-japanese-whole-word-masking") | |
| model = AutoModelForSequenceClassification.from_pretrained("jingwora/language-emotion-classification-ja", num_labels=8) | |
| title = "Emotion Classification" | |
| description = "Enter Japanese text and get the emotion probabilities." | |
| text = gr.Textbox(placeholder="Enter japanese text", label="text", lines=3) | |
| label = gr.Label(num_top_classes=8) | |
| examples = [ | |
| ["ๅคงๅใชๅไบบใ็ตๅฉใ็บ่กจใใใจใใๅฟใๆบใก่ถณใใๅนธใใๆใใพใใใ"], | |
| ["ๆใใ็ฅๆฏใไบกใใชใฃใใจใใ่ธใซๆทฑใๆฒใใฟใๅบใใใพใใใ"], | |
| ["ๆฐใใไปไบใๅงใใๅๅคใๆๆฅใใใฎๆช็ฅใฎไธ็ใซ่ธใ้ซ้ณดใใพใใใ"], | |
| ["ๅฑฑ้ใๆญฉใใฆใใใใ็พใใๆปใ็พใใๆใใๆฏใ้ฃฒใ้ฉใใๆใใพใใใ"], | |
| ["ๅ ฌๅนณใใๆฌ ใใๆฑบๅฎใๅใใใจใใๆคใใๆฑใใฆๆใใๆนงใไธใใใพใใใ"], | |
| ["้ฃใใๅฑ้ขใงไปฒ้ใใใฃใใใจๆฏใใฆใใใฆใๅฟใใใฎไฟก้ ผใๆใใพใใใ"], | |
| ["ๅคฑๆใใใจใใๅฟใซ้ใๆฒใใฟใๅบใใใไฝใๆใซใคใใชใ็ถๆณใงใใใ"], | |
| ["ๆฑใใๅ ดๆใง้ทๆ้้ใใใใจใใไธๅฟซๆใจๅซๆชๆใๅใใพใใใ"], | |
| ] | |
| demo = gr.Interface( | |
| fn=emotion_clf, | |
| inputs=text, | |
| outputs=label, | |
| title=title, | |
| description=description, | |
| examples=examples, | |
| theme="freddyaboulton/dracula_revamped", | |
| ) | |
| demo.launch() | |