Spaces:
Sleeping
Sleeping
third attempt
Browse files
app.py
CHANGED
@@ -96,18 +96,13 @@ def predict_toxicity(text, model, tokenizer, device, model_name):
|
|
96 |
|
97 |
if model_name == "lmsys/toxicchat-t5-large-v1.0":
|
98 |
prefix = "ToxicChat: "
|
99 |
-
inputs = tokenizer
|
100 |
|
101 |
with torch.no_grad():
|
102 |
-
outputs = model.generate(inputs
|
103 |
|
104 |
prediction = tokenizer.decode(outputs[0], skip_special_tokens=True).strip().lower()
|
105 |
-
|
106 |
-
# Print raw output for debugging
|
107 |
-
print(f"Raw model output: {prediction}")
|
108 |
-
|
109 |
-
# Adjust the condition to check for both "positive" and potential variations
|
110 |
-
prediction == "Toxic" if prediction == "positive" else "Not Toxic"
|
111 |
else:
|
112 |
inputs = tokenizer(text, return_tensors="pt", truncation=True, max_length=128, padding="max_length").to(device)
|
113 |
|
|
|
96 |
|
97 |
if model_name == "lmsys/toxicchat-t5-large-v1.0":
|
98 |
prefix = "ToxicChat: "
|
99 |
+
inputs = tokenizer(prefix + text, return_tensors="pt", max_length=512, truncation=True).to(device)
|
100 |
|
101 |
with torch.no_grad():
|
102 |
+
outputs = model.generate(**inputs)
|
103 |
|
104 |
prediction = tokenizer.decode(outputs[0], skip_special_tokens=True).strip().lower()
|
105 |
+
prediction = "Toxic" if prediction == "positive" else "Not Toxic"
|
|
|
|
|
|
|
|
|
|
|
106 |
else:
|
107 |
inputs = tokenizer(text, return_tensors="pt", truncation=True, max_length=128, padding="max_length").to(device)
|
108 |
|