File size: 2,710 Bytes
d74be1e
 
 
b523b2d
e0faa7c
d763fab
0d92287
d74be1e
 
5695be6
 
 
d74be1e
 
 
431582d
46bb59f
ac68060
 
 
 
4e33088
ac68060
 
 
 
236f4b8
 
 
ac68060
 
d763fab
 
 
1501319
89465fa
d74be1e
d763fab
 
 
 
ca5403a
 
 
d763fab
 
 
 
0d92287
d763fab
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ca5403a
d763fab
 
 
 
 
a6f2a71
d763fab
 
 
 
d74be1e
 
6d53da0
 
89465fa
d74be1e
 
5695be6
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from scipy.special import expit
import numpy as np
import os
import gradio as gr
import requests
from datetime import datetime

# set up model
authtoken = os.environ.get("TOKEN") or True
tokenizer = AutoTokenizer.from_pretrained("guidecare/feelings_and_issues", use_auth_token=authtoken)
model = AutoModelForSequenceClassification.from_pretrained("guidecare/feelings_and_issues", use_auth_token=authtoken)
all_label_names = list(model.config.id2label.values())


def predict(text):
    probs = expit(model(**tokenizer([text], return_tensors="pt", padding=True)).logits.detach().numpy())
    # can't use numpy for whatever reason
    probs = [float(np.round(i, 2)) for i in probs[0]]
    # break out issue, harm, sentiment, feeling
    zipped_list = list(zip(all_label_names, probs))
    print(text, zipped_list)
    issues = [(i, j) for i, j in zipped_list if i.startswith('issue')]
    feelings = [(i, j) for i, j in zipped_list if i.startswith('feeling')]
    harm = [(i, j) for i, j in zipped_list if i.startswith('harm')]
    # keep tops for each one
    issues = sorted(issues, key=lambda x: x[1])[::-1][:3]
    feelings = sorted(feelings, key=lambda x: x[1])[::-1][:3]
    harm = sorted(harm, key=lambda x: x[1])[::-1][:1]
    # top is the combo of these
    top = issues + feelings + harm

    logToNotion(text, top)
    
    d = {i: j for i, j in top}
    return d

def logToNotion(text, top):
    url = "https://api.notion.com/v1/pages"

    payload = {
        "parent": {
            "database_id": "4a220773ac694851811e87f4571ec41d"
        },
        "properties": {
            "title": {
                "title": [{
                    "text": {
                        "content": datetime.now().strftime("%d/%m/%Y %H:%M:%S")
                    }
                }]
            },
            "input": {
                "rich_text": [{
                    "text": {
                        "content": text
                    }
                }]
            },
            "output": {
                "rich_text": [{
                    "text": {
                        "content": ", ".join(str(x) for x in top)
                    }
                }]
            }
        }
    }
    headers = {
        "Accept": "application/json",
        "Notion-Version": "2022-02-22",
        "Content-Type": "application/json",
        "Authorization": "Bearer " + os.environ.get("NotionToken")
    }
    
    response = requests.post(url, json=payload, headers=headers)

iface = gr.Interface(
  fn=predict, 
  inputs="text",
  outputs="label",
  #examples=["This test tomorrow is really freaking me out."]
)

iface.launch()