Update app.py
Browse files
app.py
CHANGED
@@ -7,11 +7,17 @@ model_name = "MoritzLaurer/mDeBERTa-v3-base-mnli-xnli"
|
|
7 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
8 |
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
def classify(text):
|
11 |
input = tokenizer(text, truncation=True, return_tensors="pt")
|
12 |
output = model(input["input_ids"].to(device)) # device = "cuda:0" or "cpu"
|
13 |
prediction = torch.softmax(output["logits"][0], -1).tolist()
|
14 |
-
label_names = ["θυμός", "χαρά", "λύπη"]
|
15 |
prediction = {name: round(float(pred) * 100, 1) for pred, name in zip(prediction, label_names)}
|
16 |
return prediction
|
17 |
|
|
|
7 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
8 |
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
9 |
|
10 |
+
with open('articles_list.pkl', 'rb') as file:
|
11 |
+
articles_list = pickle.load(file)
|
12 |
+
|
13 |
+
label_names = []
|
14 |
+
for i in articles_list:
|
15 |
+
label_names.append(i[0:15])
|
16 |
+
|
17 |
def classify(text):
|
18 |
input = tokenizer(text, truncation=True, return_tensors="pt")
|
19 |
output = model(input["input_ids"].to(device)) # device = "cuda:0" or "cpu"
|
20 |
prediction = torch.softmax(output["logits"][0], -1).tolist()
|
|
|
21 |
prediction = {name: round(float(pred) * 100, 1) for pred, name in zip(prediction, label_names)}
|
22 |
return prediction
|
23 |
|