File size: 1,542 Bytes
3c63390
a626dd6
49ea6d4
116a19e
43e821a
 
49ea6d4
d7215ca
49ea6d4
 
 
3c63390
6257cd7
49ea6d4
a626dd6
 
 
 
 
 
 
 
 
 
 
49ea6d4
a626dd6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import streamlit as st
import pandas as pd
from transformers import pipeline, AutoTokenizer, AutoModelForQuestionAnswering

@st.cache(allow_output_mutation=True)
def load_qa_model():
    model_name = "google/mobilebert-uncased"
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    model = AutoModelForQuestionAnswering.from_pretrained(model_name)
    qa_pipeline = pipeline("question-answering", model=model, tokenizer=tokenizer)
    return qa_pipeline

qa = load_qa_model()

def process_batch(data):
    results = []
    for index, row in data.iterrows():
        answer = qa(question=row['Question'], context=row['Article'])
        results.append({
            'Question': row['Question'],
            'Article': row['Article'],
            'Answer': answer['answer'],
            'Score': answer['score']
        })
    return results

st.title("Batch Question Answering App")

uploaded_file = st.file_uploader("Upload a CSV file", type="csv")

if uploaded_file is not None:
    data = pd.read_csv(uploaded_file)
    st.write("Uploaded file:")
    st.write(data)

    if st.button("Process Batch"):
        with st.spinner("Processing Batch..."):
            results = process_batch(data)
            st.write("Batch Processing Results:")
            for result in results:
                st.write("Question:", result['Question'])
                st.write("Article:", result['Article'])
                st.write("Answer:", result['Answer'])
                st.write("Score:", result['Score'])
                st.write("------")