File size: 1,438 Bytes
d74be1e
 
 
b523b2d
e0faa7c
d74be1e
 
 
 
 
 
 
 
431582d
46bb59f
ac68060
 
 
 
 
 
 
 
 
 
 
 
 
1501319
89465fa
d74be1e
 
 
6d53da0
 
89465fa
d74be1e
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from scipy.special import expit
import numpy as np
import os
import gradio as gr

# set up model
auth_token = os.environ.get("TOKEN") or True
tokenizer = AutoTokenizer.from_pretrained("guidecare/feelings_and_issues", use_auth_token=auth_token )
model = AutoModelForSequenceClassification.from_pretrained("guidecare/feelings_and_issues", use_auth_token=auth_token )
all_label_names = list(model.config.id2label.values())


def predict(text):
    probs = expit(model(**tokenizer([text], return_tensors="pt", padding=True)).logits.detach().numpy())
    # can't use numpy for whatever reason
    probs = [float(np.round(i, 2)) for i in probs[0]]
    # break out issue, harm, sentiment, feeling
    zipped_list = list(zip(all_label_names, probs))
    issues = [(i, j) for i, j in zipped_list if i.startswith('issue')]
    feelings = [(i, j) for i, j in zipped_list if i.startswith('feeling')]
    harm = [(i, j) for i, j in zipped_list if i.startswith('harm')]
    # keep tops for each one
    issues = sorted(issues)[::-1][:3]
    feelings = sorted(feelings)[::-1][:3]
    harm = sorted(harm)[::-1][:1]
    # top is the combo of these
    top = issues + feelings + harm
    d = {i: j for i, j in top}
    return d

iface = gr.Interface(
  fn=predict, 
  inputs="text",
  outputs="label",
  #examples=["This test tomorrow is really freaking me out."]
)

iface.launch()