MaawaKhalid's picture
Update app.py
fb1b355 verified
raw
history blame
833 Bytes
import gradio as gr
from transformers import pipeline
# Load the model and tokenizer from Hugging Face
qa_pipeline = pipeline('question-answering', model='MaawaKhalid/Extractive-QA-Bot')
# Define the function to answer questions
def answer_question(context, question):
result = qa_pipeline(question=question, context=context)
return result['answer']
# Create a simple Gradio interface
interface = gr.Interface(
fn=answer_question,
inputs=[
gr.inputs.Textbox(label="Context", placeholder="Enter some text to ask questions from"),
gr.inputs.Textbox(label="Question", placeholder="Ask a question based on the context")
],
outputs="text",
title="Extractive QA Bot",
description="Ask questions and get answers based on the provided context."
)
# Launch the interface
interface.launch()