yasserrmd commited on
Commit
0a4d4a6
·
verified ·
1 Parent(s): 4cafc7d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -15,7 +15,7 @@ pipe.to("cuda")
15
 
16
  # Load the NSFW classifier
17
  image_classifier = pipeline("image-classification", model="Falconsai/nsfw_image_detection",device=device)
18
- text_classifier = pipeline("sentiment-analysis", model="michellejieli/NSFW_text_classification",device=device)
19
  classifier("I see you’ve set aside this special time to humiliate yourself in public.")
20
  NSFW_THRESHOLD = 0.5
21
 
@@ -24,7 +24,7 @@ NSFW_THRESHOLD = 0.5
24
  def generate_sketch(prompt, num_inference_steps, guidance_scale):
25
  # Classify the text for NSFW content
26
  text_classification = text_classifier(prompt)
27
-
28
  # Check the classification results
29
  for result in text_classification:
30
  if result['label'] == 'nsfw' and result['score'] > NSFW_THRESHOLD:
 
15
 
16
  # Load the NSFW classifier
17
  image_classifier = pipeline("image-classification", model="Falconsai/nsfw_image_detection",device=device)
18
+ text_classifier = pipeline("text-classification", model="eliasalbouzidi/distilbert-nsfw-text-classifier",device=device)
19
  classifier("I see you’ve set aside this special time to humiliate yourself in public.")
20
  NSFW_THRESHOLD = 0.5
21
 
 
24
  def generate_sketch(prompt, num_inference_steps, guidance_scale):
25
  # Classify the text for NSFW content
26
  text_classification = text_classifier(prompt)
27
+ print(text_classification)
28
  # Check the classification results
29
  for result in text_classification:
30
  if result['label'] == 'nsfw' and result['score'] > NSFW_THRESHOLD: