chgrdj commited on
Commit
5c95703
·
verified ·
1 Parent(s): 724bd54

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -19
app.py CHANGED
@@ -13,34 +13,31 @@ st.markdown(
13
  "Enter a legitimate URL and a potentially typosquatted URL to see the classifier's prediction."
14
  )
15
 
16
- # Load the classifier model from Hugging Face
17
- model_path = "./URLGuardian" # Model repository on Hugging Face
18
- st.markdown(model_path
19
- )
20
  classifier = load_classifier(model_path)
21
 
22
  # URL inputs
23
  url = st.text_input("Enter the URL:", value="https://example.com")
24
 
25
  # Typosquatting detection on button click
26
- if st.button("Check Typosquatting"):
27
- if legit_url and suspect_url:
28
-
29
  result = classifier(url)[0]
30
- label = result['label']
31
- score = result['score']
32
-
33
- # Adjust the label names as per the model's documentation.
34
- # This example assumes the label for a typosquatted URL might include "typo".
35
- if "typo" in label.lower():
36
  st.success(
37
- f"The model predicts that '{suspect_url}' is likely a typosquatted version of '{legit_url}' "
38
- f"with a confidence of {score * 100:.2f}%."
39
  )
40
  else:
41
- st.warning(
42
- f"The model predicts that '{suspect_url}' is NOT likely a typosquatted version of '{legit_url}' "
43
- f"with a confidence of {score * 100:.2f}%."
44
  )
 
 
45
  else:
46
- st.error("Please enter both a legitimate URL and a potentially typosquatted URL.")
 
13
  "Enter a legitimate URL and a potentially typosquatted URL to see the classifier's prediction."
14
  )
15
 
16
+ model_path = "./URLGuardian"
 
 
 
17
  classifier = load_classifier(model_path)
18
 
19
  # URL inputs
20
  url = st.text_input("Enter the URL:", value="https://example.com")
21
 
22
  # Typosquatting detection on button click
23
+ if st.button("Check Safety of the url"):
24
+ if url:
25
+ # Run the classifier on the input URL
26
  result = classifier(url)[0]
27
+ label = result["label"]
28
+ score = result["score"]
29
+
30
+ # Display result based on the label
31
+ # Adjust the label checking logic based on the model's documentation.
32
+ if "safe" in label.lower():
33
  st.success(
34
+ f"The URL '{url}' is considered safe with a confidence of {score * 100:.2f}%."
 
35
  )
36
  else:
37
+ st.error(
38
+ f"The URL '{url}' is considered suspicious with a confidence of {score * 100:.2f}%."
 
39
  )
40
+ # Optionally, you can display the full result for debugging purposes:
41
+ st.write("Full classification output:", result)
42
  else:
43
+ st.error("Please enter a URL.")