File size: 833 Bytes
9311971 e373b8e fb1b355 e373b8e 9311971 e373b8e 9311971 e373b8e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 |
import gradio as gr
from transformers import pipeline
# Load the model and tokenizer from Hugging Face
qa_pipeline = pipeline('question-answering', model='MaawaKhalid/Extractive-QA-Bot')
# Define the function to answer questions
def answer_question(context, question):
result = qa_pipeline(question=question, context=context)
return result['answer']
# Create a simple Gradio interface
interface = gr.Interface(
fn=answer_question,
inputs=[
gr.inputs.Textbox(label="Context", placeholder="Enter some text to ask questions from"),
gr.inputs.Textbox(label="Question", placeholder="Ask a question based on the context")
],
outputs="text",
title="Extractive QA Bot",
description="Ask questions and get answers based on the provided context."
)
# Launch the interface
interface.launch()
|