|
from transformers import AutoTokenizer, AutoModelForSequenceClassification |
|
import torch |
|
from huggingface_hub import HfApi |
|
import torch.nn.functional as F |
|
from peft import PeftModel |
|
|
|
HfApi().set_access_token("HUGGINGFACE_HUB_TOKEN") |
|
|
|
|
|
model_name = "munzirmuneer/phishing_url_gemma_pytorch" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=True) |
|
model = AutoModelForSequenceClassification.from_pretrained(model_name, use_auth_token=True) |
|
model = PeftModel.from_pretrained(model, model_name, use_auth_token=True) |
|
|
|
def predict(input_text): |
|
|
|
inputs = tokenizer(input_text, return_tensors="pt", truncation=True, padding=True) |
|
|
|
|
|
with torch.no_grad(): |
|
outputs = model(**inputs) |
|
|
|
|
|
logits = outputs.logits |
|
probs = F.softmax(logits, dim=-1) |
|
|
|
|
|
pred_class = torch.argmax(probs, dim=-1) |
|
return pred_class.item(), probs[0].tolist() |
|
|