Spaces:
Runtime error
Runtime error
Jeffrey Rathgeber Jr
commited on
testmodelsonpre
Browse files
app.py
CHANGED
@@ -23,50 +23,50 @@ if option == 'MILESTONE 3':
|
|
23 |
tokenizer_0 = AutoTokenizer.from_pretrained(model_name_0)
|
24 |
classifier_0 = pipeline(task="sentiment-analysis", model=model_0, tokenizer=tokenizer_0)
|
25 |
|
26 |
-
|
27 |
-
#
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
#
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
#
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
#
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
#
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
X_train = [textIn]
|
61 |
-
|
62 |
-
|
63 |
-
with torch.no_grad():
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
st.write(predictions['label'])
|
70 |
|
71 |
|
72 |
col = ['Tweet', 'Highest_Toxicity_Class_Overall', 'Score_Overall', 'Highest_Toxicity_Class_Except_Toxic', 'Score_Except_Toxic']
|
@@ -87,15 +87,18 @@ if option == 'MILESTONE 3':
|
|
87 |
HTCET = [0]*10
|
88 |
SET = [0]*10
|
89 |
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
|
|
|
|
|
|
99 |
|
100 |
|
101 |
|
|
|
23 |
tokenizer_0 = AutoTokenizer.from_pretrained(model_name_0)
|
24 |
classifier_0 = pipeline(task="sentiment-analysis", model=model_0, tokenizer=tokenizer_0)
|
25 |
|
26 |
+
model_name_1 = "Rathgeberj/milestone3_1"
|
27 |
+
# model_1 = AutoModelForSequenceClassification.from_pretrained(model_name_1)
|
28 |
+
model_1 = BertForMaskedLM.from_pretrained(model_name_1)
|
29 |
+
tokenizer_1 = AutoTokenizer.from_pretrained(model_name_1)
|
30 |
+
classifier_1 = pipeline(task="sentiment-analysis", model=model_1, tokenizer=tokenizer_1)
|
31 |
+
|
32 |
+
model_name_2 = "Rathgeberj/milestone3_2"
|
33 |
+
# model_2 = AutoModelForSequenceClassification.from_pretrained(model_name_2)
|
34 |
+
model_2 = BertForMaskedLM.from_pretrained(model_name_2)
|
35 |
+
tokenizer_2 = AutoTokenizer.from_pretrained(model_name_2)
|
36 |
+
classifier_2 = pipeline(task="sentiment-analysis", model=model_2, tokenizer=tokenizer_2)
|
37 |
+
|
38 |
+
model_name_3 = "Rathgeberj/milestone3_3"
|
39 |
+
# model_3 = AutoModelForSequenceClassification.from_pretrained(model_name_3)
|
40 |
+
model_3 = BertForMaskedLM.from_pretrained(model_name_3)
|
41 |
+
tokenizer_3 = AutoTokenizer.from_pretrained(model_name_3)
|
42 |
+
classifier_3 = pipeline(task="sentiment-analysis", model=model_3, tokenizer=tokenizer_3)
|
43 |
+
|
44 |
+
model_name_4 = "Rathgeberj/milestone3_4"
|
45 |
+
# model_4 = AutoModelForSequenceClassification.from_pretrained(model_name_4)
|
46 |
+
model_4 = BertForMaskedLM.from_pretrained(model_name_4)
|
47 |
+
tokenizer_4 = AutoTokenizer.from_pretrained(model_name_4)
|
48 |
+
classifier_4 = pipeline(task="sentiment-analysis", model=model_4, tokenizer=tokenizer_4)
|
49 |
+
|
50 |
+
model_name_5 = "Rathgeberj/milestone3_5"
|
51 |
+
# model_5 = AutoModelForSequenceClassification.from_pretrained(model_name_5)
|
52 |
+
model_5 = BertForMaskedLM.from_pretrained(model_name_5)
|
53 |
+
tokenizer_5 = AutoTokenizer.from_pretrained(model_name_5)
|
54 |
+
classifier_5 = pipeline(task="sentiment-analysis", model=model_5, tokenizer=tokenizer_5)
|
55 |
+
|
56 |
+
models = [model_0, model_1, model_2, model_3, model_4, model_5]
|
57 |
+
tokenizers = [tokenizer_0, tokenizer_1, tokenizer_2, tokenizer_3, tokenizer_4, tokenizer_5]
|
58 |
+
classifiers = [classifier_0, classifier_1, classifier_2, classifier_3, classifier_4, classifier_5]
|
59 |
+
|
60 |
+
# X_train = [textIn]
|
61 |
+
# batch = tokenizer_0(X_train, padding=True, truncation=True, max_length=512, return_tensors="pt")
|
62 |
+
|
63 |
+
# with torch.no_grad():
|
64 |
+
# outputs = model_0(**batch_0, labels=torch.tensor([1, 0]))
|
65 |
+
# predictions = F.softmax(outputs.logits, dim=1)
|
66 |
+
# labels = torch.argmax(predictions, dim=1)
|
67 |
+
# labels = [model.config.id2label[label_id] for label_id in labels.tolist()]
|
68 |
+
|
69 |
+
# st.write(predictions['label'])
|
70 |
|
71 |
|
72 |
col = ['Tweet', 'Highest_Toxicity_Class_Overall', 'Score_Overall', 'Highest_Toxicity_Class_Except_Toxic', 'Score_Except_Toxic']
|
|
|
87 |
HTCET = [0]*10
|
88 |
SET = [0]*10
|
89 |
|
90 |
+
pred_data = []
|
91 |
+
|
92 |
+
for i in range(10):
|
93 |
+
X_train = pre_populated_tweets[i]
|
94 |
+
for j in range(6):
|
95 |
+
batch = tokenizers[j](X_train, padding=True, truncation=True, max_length=512, return_tensors="pt")
|
96 |
+
with torch.no_grad():
|
97 |
+
outputs = models[j](**batch, labels=torch.tensor([1, 0]))
|
98 |
+
predictions = F.softmax(outputs.logits, dim=1)
|
99 |
+
labels = torch.argmax(predictions, dim=1)
|
100 |
+
labels = [model.config.id2label[label_id] for label_id in labels.tolist()]
|
101 |
+
pred_data.append(predictions)
|
102 |
|
103 |
|
104 |
|