from transformers import AutoTokenizer, AutoModelForSequenceClassification from scipy.special import expit import numpy as np import os import gradio as gr # set up model auth_token = os.environ.get("TOKEN") or True tokenizer = AutoTokenizer.from_pretrained("guidecare/feelings_and_issues", use_auth_token=auth_token ) model = AutoModelForSequenceClassification.from_pretrained("guidecare/feelings_and_issues", use_auth_token=auth_token ) all_label_names = list(model.config.id2label.values()) def predict(text): probs = expit(model(**tokenizer([text], return_tensors="pt", padding=True)).logits.detach().numpy()) # can't use numpy for whatever reason probs = [float(np.round(i, 2)) for i in probs[0]] # break out issue, harm, sentiment, feeling zipped_list = list(zip(all_label_names, probs)) issues = [(i, j) for i, j in zipped_list if i.startswith('issue')] feelings = [(i, j) for i, j in zipped_list if i.startswith('feeling')] harm = [(i, j) for i, j in zipped_list if i.startswith('harm')] # keep tops for each one issues = sorted(issues)[::-1][:3] feelings = sorted(feelings)[::-1][:3] harm = sorted(harm)[::-1][:1] # top is the combo of these top = issues + feelings + harm d = {i: j for i, j in top} return d iface = gr.Interface( fn=predict, inputs="text", outputs="label", #examples=["This test tomorrow is really freaking me out."] ) iface.launch()