Update app.py
Browse files
app.py
CHANGED
|
@@ -86,6 +86,43 @@ class TweetPreprocessor:
|
|
| 86 |
"""Normalize popularity scores using min-max scaling."""
|
| 87 |
popularity = self.data['Retweets'] + self.data['Likes']
|
| 88 |
return (popularity - popularity.min()) / (popularity.max() - popularity.min() + 1e-6)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 89 |
|
| 90 |
class RecommendationSystem:
|
| 91 |
def __init__(self, data_path: Path, model_name: str):
|
|
|
|
| 86 |
"""Normalize popularity scores using min-max scaling."""
|
| 87 |
popularity = self.data['Retweets'] + self.data['Likes']
|
| 88 |
return (popularity - popularity.min()) / (popularity.max() - popularity.min() + 1e-6)
|
| 89 |
+
class FakeNewsClassifier:
|
| 90 |
+
def __init__(self, model_name: str):
|
| 91 |
+
"""Initialize the fake news classifier."""
|
| 92 |
+
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 93 |
+
self.model_name = model_name
|
| 94 |
+
self.model, self.tokenizer = self._load_model()
|
| 95 |
+
|
| 96 |
+
def _load_model(self) -> Tuple[AutoModelForSequenceClassification, AutoTokenizer]:
|
| 97 |
+
"""Load the model and tokenizer."""
|
| 98 |
+
try:
|
| 99 |
+
tokenizer = AutoTokenizer.from_pretrained(self.model_name)
|
| 100 |
+
model = AutoModelForSequenceClassification.from_pretrained(self.model_name).to(self.device)
|
| 101 |
+
return model, tokenizer
|
| 102 |
+
except Exception as e:
|
| 103 |
+
logger.error(f"Error loading model: {e}")
|
| 104 |
+
raise
|
| 105 |
+
|
| 106 |
+
@torch.no_grad()
|
| 107 |
+
def predict_batch(self, texts: List[str], batch_size: int = 32) -> np.ndarray:
|
| 108 |
+
"""Predict fake news probability for a batch of texts."""
|
| 109 |
+
predictions = []
|
| 110 |
+
|
| 111 |
+
for i in range(0, len(texts), batch_size):
|
| 112 |
+
batch_texts = texts[i:i + batch_size]
|
| 113 |
+
inputs = self.tokenizer(
|
| 114 |
+
batch_texts,
|
| 115 |
+
return_tensors="pt",
|
| 116 |
+
padding=True,
|
| 117 |
+
truncation=True,
|
| 118 |
+
max_length=128
|
| 119 |
+
).to(self.device)
|
| 120 |
+
|
| 121 |
+
outputs = self.model(**inputs)
|
| 122 |
+
batch_predictions = outputs.logits.argmax(dim=1).cpu().numpy()
|
| 123 |
+
predictions.extend(batch_predictions)
|
| 124 |
+
|
| 125 |
+
return np.array(predictions)
|
| 126 |
|
| 127 |
class RecommendationSystem:
|
| 128 |
def __init__(self, data_path: Path, model_name: str):
|