Spaces:
Paused
Paused
Update main.py
Browse files
main.py
CHANGED
@@ -1,10 +1,16 @@
|
|
1 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
2 |
-
|
3 |
tokenizer = AutoTokenizer.from_pretrained("vikram71198/distilroberta-base-finetuned-fake-news-detection")
|
4 |
model = AutoModelForSequenceClassification.from_pretrained("vikram71198/distilroberta-base-finetuned-fake-news-detection")
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
2 |
+
import torch.nn as nn
|
3 |
tokenizer = AutoTokenizer.from_pretrained("vikram71198/distilroberta-base-finetuned-fake-news-detection")
|
4 |
model = AutoModelForSequenceClassification.from_pretrained("vikram71198/distilroberta-base-finetuned-fake-news-detection")
|
5 |
+
#Following the same truncation & padding strategy used while training
|
6 |
+
encoded_input = tokenizer("Enter any news article to be classified. Can be a list of articles too.", truncation = True, padding = "max_length", max_length = 512, return_tensors='pt')
|
7 |
+
output = model(**encoded_input)["logits"]
|
8 |
+
#detaching the output from the computation graph
|
9 |
+
detached_output = output.detach()
|
10 |
+
#Applying softmax here for single label classification
|
11 |
+
softmax = nn.Softmax(dim = 1)
|
12 |
+
prediction_probabilities = list(softmax(detached_output).detach().numpy())
|
13 |
+
predictions = []
|
14 |
+
for x,y in prediction_probabilities:
|
15 |
+
predictions.append("not_fake_news") if x > y else predictions.append("fake_news")
|
16 |
+
print(predictions)
|