import streamlit as st import pandas as pd import torch from transformers import pipeline #from transformers import TapasTokenizer, TapexTokenizer, BartForConditionalGeneration from transformers import AutoTokenizer, AutoModelForTableQuestionAnswering import datetime #df = pd.read_excel('discrepantes.xlsx', index_col='Unnamed: 0') df = pd.read_excel('discrepantes.xlsx') df.fillna(0, inplace=True) table_data = df.astype(str) print(table_data.head()) def response(user_question, table_data): a = datetime.datetime.now() model_name = "google/tapas-base-finetuned-wtq" model = AutoModelForTableQuestionAnswering.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) # The query should be passed as a list encoding = tokenizer(table=table_data, queries=[user_question], padding=True, return_tensors="pt", truncation=True) # Instead of using generate, we pass the encoding through the model to get the logits outputs = model(**encoding) # Extract the answer coordinates predicted_answer_coordinates = outputs.logits.argmax(-1) # Decode the answer from the table using the coordinates answer = tokenizer.convert_logits_to_predictions( encoding.data, predicted_answer_coordinates ) # Process the answer into a readable format answer_text = answer[0][0][0] if len(answer[0]) > 0 else "Não foi possível encontrar uma resposta" query_result = { "Resposta": answer_text } b = datetime.datetime.now() print(b - a) return query_result # Streamlit interface st.markdown("""