ADRv2024 / app.py
paragon-analytics's picture
Update app.py
a3f40f0 verified
raw
history blame
1.05 kB
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("MarkAdamsMSBA24/ADRv2024")
model = AutoModelForSequenceClassification.from_pretrained("MarkAdamsMSBA24/ADRv2024")
# Define the prediction function
def get_prediction(text):
X_test = str(text).lower()
encoded_input = tokenizer(X_test, return_tensors='pt')
output = model(**encoded_input)
scores = output[0][0].detach()
scores = torch.nn.functional.softmax(scores)
return {"Severe Reaction": float(scores.numpy()[1]), "Non-severe Reaction": float(scores.numpy()[0])}
iface = gr.Interface(
fn=get_prediction,
inputs=gr.Textbox(lines=4, placeholder="Type your text..."),
outputs=[gr.Textbox(label="Prediction"), gr.Dataframe(label="Scores")],
title="BERT Sequence Classification Demo",
description="This demo uses a BERT model hosted on Hugging Face to classify text sequences."
)
if __name__ == "__main__":
iface.launch()