drishya1 commited on
Commit
ab8e25c
·
verified ·
1 Parent(s): 993f80a

checking for toxicchat-t5-large

Browse files
Files changed (1) hide show
  1. app.py +6 -1
app.py CHANGED
@@ -102,7 +102,12 @@ def predict_toxicity(text, model, tokenizer, device, model_name):
102
  outputs = model.generate(inputs, max_new_tokens=5)
103
 
104
  prediction = tokenizer.decode(outputs[0], skip_special_tokens=True).strip().lower()
105
- prediction = "Toxic" if prediction == "positive" else "Not Toxic"
 
 
 
 
 
106
  else:
107
  inputs = tokenizer(text, return_tensors="pt", truncation=True, max_length=128, padding="max_length").to(device)
108
 
 
102
  outputs = model.generate(inputs, max_new_tokens=5)
103
 
104
  prediction = tokenizer.decode(outputs[0], skip_special_tokens=True).strip().lower()
105
+
106
+ # Print raw output for debugging
107
+ print(f"Raw model output: {prediction}")
108
+
109
+ # Adjust the condition to check for both "positive" and potential variations
110
+ prediction = "Toxic" if prediction in ["positive", "pos", "toxic", "yes"] else "Not Toxic"
111
  else:
112
  inputs = tokenizer(text, return_tensors="pt", truncation=True, max_length=128, padding="max_length").to(device)
113