Rename inference.py to handler.py
Browse files
inference.py → handler.py
RENAMED
@@ -3,19 +3,12 @@ import torch
|
|
3 |
from huggingface_hub import HfApi
|
4 |
import torch.nn.functional as F
|
5 |
from peft import PeftModel
|
6 |
-
import os
|
7 |
|
8 |
-
hf_token = os.getenv("HF_TOKEN")
|
9 |
|
10 |
-
|
11 |
-
if not hf_token:
|
12 |
-
raise ValueError("Hugging Face token not found in environment variables!")
|
13 |
-
|
14 |
-
HfApi().set_access_token(hf_token)
|
15 |
# Load model and tokenizer
|
16 |
model_name = "munzirmuneer/phishing_url_gemma_pytorch" # Replace with your specific model
|
17 |
-
model_name2 = "google/gemma-2b"
|
18 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
19 |
model = AutoModelForSequenceClassification.from_pretrained(model_name, use_auth_token=True)
|
20 |
model = PeftModel.from_pretrained(model, model_name, use_auth_token=True)
|
21 |
|
|
|
3 |
from huggingface_hub import HfApi
|
4 |
import torch.nn.functional as F
|
5 |
from peft import PeftModel
|
|
|
6 |
|
|
|
7 |
|
|
|
|
|
|
|
|
|
|
|
8 |
# Load model and tokenizer
|
9 |
model_name = "munzirmuneer/phishing_url_gemma_pytorch" # Replace with your specific model
|
10 |
+
#model_name2 = "google/gemma-2b"
|
11 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=True)
|
12 |
model = AutoModelForSequenceClassification.from_pretrained(model_name, use_auth_token=True)
|
13 |
model = PeftModel.from_pretrained(model, model_name, use_auth_token=True)
|
14 |
|