Spaces:
Running
Running
File size: 2,586 Bytes
c6dc269 6c96c7d 34e67a1 6c96c7d acf8351 10379c1 14ed90d 9bbf8c6 c6dc269 9bbf8c6 acf8351 6c96c7d acf8351 6c96c7d acf8351 6c96c7d acf8351 c6dc269 23171a7 4ade980 86088c2 9b8124b 86088c2 4ade980 c6dc269 23171a7 c6dc269 23171a7 c6dc269 23171a7 c6dc269 31a3643 c6dc269 23171a7 0850865 c6dc269 23171a7 c6dc269 31a3643 c6dc269 23171a7 c6dc269 23171a7 c6dc269 31a3643 c6dc269 31a3643 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 |
import streamlit as st
import pandas as pd
import torch
from transformers import pipeline
from transformers import TapasTokenizer, TapasForQuestionAnswering
import datetime
df = pd.read_excel('discrepantes.xlsx', index_col='Unnamed: 0')
df.fillna(0, inplace=True)
table_data = df.astype(str)
print(table_data.head())
def response(user_question, table_data):
a = datetime.datetime.now()
model_name = "microsoft/tapex-large-finetuned-wtq"
model = BartForConditionalGeneration.from_pretrained(model_name)
tokenizer = TapexTokenizer.from_pretrained(model_name)
queries = [user_question]
encoding = tokenizer(table=table_data, query=queries, padding=True, return_tensors="pt",truncation=True)
outputs = model.generate(**encoding)
ans = tokenizer.batch_decode(outputs, skip_special_tokens=True)
query_result = {
"query": query,
"answer": ans[0]
}
b = datetime.datetime.now()
print(b - a)
return query_result, table
# Streamlit interface
st.markdown("""
<div style='display: flex; align-items: center;'>
<div style='width: 40px; height: 40px; background-color: green; border-radius: 50%; margin-right: 5px;'></div>
<div style='width: 40px; height: 40px; background-color: red; border-radius: 50%; margin-right: 5px;'></div>
<div style='width: 40px; height: 40px; background-color: yellow; border-radius: 50%; margin-right: 5px;'></div>
<span style='font-size: 40px; font-weight: bold;'>Chatbot do Tesouro RS</span>
</div>
""", unsafe_allow_html=True)
# Chat history
if 'history' not in st.session_state:
st.session_state['history'] = []
# Input box for user question
user_question = st.text_input("Escreva sua questão aqui:", "")
if user_question:
# Add person emoji when typing question
st.session_state['history'].append(('👤', user_question))
st.markdown(f"**👤 {user_question}**")
# Generate the response
bot_response = response(user_question, table_data)
# Add robot emoji when generating response and align to the right
st.session_state['history'].append(('🤖', bot_response))
st.markdown(f"<div style='text-align: right'>**🤖 {bot_response}**</div>", unsafe_allow_html=True)
# Clear history button
if st.button("Limpar"):
st.session_state['history'] = []
# Display chat history
for sender, message in st.session_state['history']:
if sender == '👤':
st.markdown(f"**👤 {message}**")
elif sender == '🤖':
st.markdown(f"<div style='text-align: right'>**🤖 {message}**</div>", unsafe_allow_html=True)
|