import streamlit as st
import pandas as pd
import torch
from transformers import pipeline
import datetime
# Load the data
df = pd.read_excel("discrepantes.xlsx")
df = df.fillna('').astype(str)
# Function to generate a response using the TAPEX model
def response(user_question, df):
a = datetime.datetime.now()
tqa = pipeline(task="table-question-answering", model="google/tapas-large-finetuned-wtq")
answer = tqa(table=df, query=user_question)['answer']
query_result = {
"Resposta": answer
}
b = datetime.datetime.now()
print(b - a)
return query_result
# Streamlit interface
st.markdown("""
""", unsafe_allow_html=True)
# Chat history
if 'history' not in st.session_state:
st.session_state['history'] = []
# Input box for user question
user_question = st.text_input("Escreva sua questΓ£o aqui:", "")
if user_question:
# Add human emoji when user asks a question
st.session_state['history'].append(('π€', user_question))
st.markdown(f"**π€ {user_question}**")
# Generate the response
bot_response = response(user_question, df)["Resposta"]
# Add robot emoji when generating response and align to the right
st.session_state['history'].append(('π€', bot_response))
st.markdown(f"**π€ {bot_response}**
", unsafe_allow_html=True)
# Clear history button
if st.button("Limpar"):
st.session_state['history'] = []
# Display chat history
for sender, message in st.session_state['history']:
if sender == 'π€':
st.markdown(f"**π€ {message}**")
elif sender == 'π€':
st.markdown(f"**π€ {message}**
", unsafe_allow_html=True)