sergeyfeldman's picture
Trying to skip the weird chat templates thing
9565ec3 verified
from transformers import RobertaTokenizer, AutoModelForSequenceClassification
from scipy.special import expit
import numpy as np
import os
import gradio as gr
import requests
from datetime import datetime
import transformers.utils.hub as _hub
_hub.list_repo_templates = lambda *args, **kwargs: [] # no-op
# set up model
authtoken = os.environ.get("TOKEN")
tokenizer = RobertaTokenizer.from_pretrained(
"guidecare/feelings_and_issues_large_v2",
use_safetensors=True,
use_auth_token=authtoken
)
tokenizer.do_lower_case = True
model = AutoModelForSequenceClassification.from_pretrained(
"guidecare/feelings_and_issues_large_v2",
use_safetensors=True,
use_auth_token=authtoken
)
all_label_names = list(model.config.id2label.values())
def predict(text):
probs = expit(model(**tokenizer([text], return_tensors="pt", padding=True)).logits.detach().numpy())
probs = [float(np.round(i, 2)) for i in probs[0]]
zipped_list = list(zip(all_label_names, probs))
print(text, zipped_list)
issues = [(i, j) for i, j in zipped_list if i.startswith('issue')]
feelings = [(i, j) for i, j in zipped_list if i.startswith('feeling')]
harm = [(i, j) for i, j in zipped_list if i.startswith('harm')]
sentiment = [(i, j) for i, j in zipped_list if i.startswith('sentiment')]
issues = sorted(issues, key=lambda x: x[1], reverse=True)
feelings = sorted(feelings, key=lambda x: x[1], reverse=True)
harm = sorted(harm, key=lambda x: x[1], reverse=True)
sentiment = sorted(sentiment, key=lambda x: x[1], reverse=True)
top = issues + feelings + harm + sentiment
d = {i: j for i, j in top}
return d
iface = gr.Interface(
fn=predict,
inputs=gr.Textbox(label="Enter text"),
outputs=gr.Label(label="Predictions"),
title="Emotion and Issue Predictor",
description="Enter a text to predict emotions and issues.",
)
if __name__ == "__main__":
iface.launch()