import gradio as gr import torch from transformers import AutoTokenizer, AutoModelForSequenceClassification tokenizer = AutoTokenizer.from_pretrained("Sk1306/student_chat_toxicity_classifier_model") model = AutoModelForSequenceClassification.from_pretrained("Sk1306/student_chat_toxicity_classifier_model") def predict_toxicity(text): inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=128) outputs = model(**inputs) logits = outputs.logits # Apply softmax to get probabilities probabilities = torch.nn.functional.softmax(logits, dim=-1) # Get the predicted class (index 0 for non-toxic, index 1 for toxic) predicted_class = torch.argmax(probabilities, dim=-1).item() # Map the prediction to the label (0 = Non-toxic, 1 = Toxic) if predicted_class == 0: return "Non-toxic" else: return "Toxic" interface = gr.Interface( fn=predict_toxicity, inputs="text", # Text input from the user outputs="text", # Text output for the prediction title="Student Chat Toxicity Classifier", description="Enter a message", theme="dark", examples=[ "You can copy in exam to pass!", "Study well.Hardwork pays off!", "Take these drugs.It will boost your memory", ], ) interface.launch(inline=False)