Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,12 +1,15 @@
|
|
1 |
import streamlit as st
|
2 |
-
from transformers import pipeline
|
3 |
|
4 |
@st.cache_resource
|
5 |
def load_classifier(model_path: str):
|
6 |
-
|
7 |
-
|
|
|
|
|
|
|
|
|
8 |
|
9 |
-
# App Title and description
|
10 |
st.title("URL Typosquatting Detection with URLGuardian")
|
11 |
st.markdown(
|
12 |
"This app uses the **URLGuardian** classifier by Anvilogic from the Hugging Face Hub to detect potential typosquatting. "
|
@@ -16,20 +19,14 @@ st.markdown(
|
|
16 |
model_path = "./URLGuardian"
|
17 |
classifier = load_classifier(model_path)
|
18 |
|
19 |
-
# URL inputs
|
20 |
url = st.text_input("Enter the URL:", value="https://example.com")
|
21 |
|
22 |
-
# Typosquatting detection on button click
|
23 |
if st.button("Check Safety of the url"):
|
24 |
if url:
|
25 |
-
# Run the classifier on the input URL
|
26 |
result = classifier(url)[0]
|
27 |
label = result["label"]
|
28 |
score = result["score"]
|
29 |
-
|
30 |
-
# Display result based on the label
|
31 |
-
# Adjust the label checking logic based on the model's documentation.
|
32 |
-
if "safe" in label.lower():
|
33 |
st.success(
|
34 |
f"The URL '{url}' is considered safe with a confidence of {score * 100:.2f}%."
|
35 |
)
|
|
|
1 |
import streamlit as st
|
2 |
+
from transformers import pipeline, AutoConfig, AutoModelForSequenceClassification, AutoTokenizer
|
3 |
|
4 |
@st.cache_resource
|
5 |
def load_classifier(model_path: str):
|
6 |
+
id2label = {0: "Safe", 1: "Unsafe"}
|
7 |
+
label2id = {"Safe": 0, "Unsafe": 1}
|
8 |
+
config = AutoConfig.from_pretrained(model_path, id2label=id2label, label2id=label2id)
|
9 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_path, config=config)
|
10 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
11 |
+
return pipeline("text-classification", model=model, tokenizer=tokenizer)
|
12 |
|
|
|
13 |
st.title("URL Typosquatting Detection with URLGuardian")
|
14 |
st.markdown(
|
15 |
"This app uses the **URLGuardian** classifier by Anvilogic from the Hugging Face Hub to detect potential typosquatting. "
|
|
|
19 |
model_path = "./URLGuardian"
|
20 |
classifier = load_classifier(model_path)
|
21 |
|
|
|
22 |
url = st.text_input("Enter the URL:", value="https://example.com")
|
23 |
|
|
|
24 |
if st.button("Check Safety of the url"):
|
25 |
if url:
|
|
|
26 |
result = classifier(url)[0]
|
27 |
label = result["label"]
|
28 |
score = result["score"]
|
29 |
+
if label=='Safe':
|
|
|
|
|
|
|
30 |
st.success(
|
31 |
f"The URL '{url}' is considered safe with a confidence of {score * 100:.2f}%."
|
32 |
)
|