fschwartzer commited on
Commit
525bf5b
verified
1 Parent(s): db6c811

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -116
app.py CHANGED
@@ -2,8 +2,6 @@ import streamlit as st
2
  import pandas as pd
3
  from transformers import BartForConditionalGeneration, TapexTokenizer, T5ForConditionalGeneration, T5Tokenizer
4
  from prophet import Prophet
5
- import datetime
6
- import sentencepiece as spm
7
 
8
  # Caminho para o arquivo CSS, ajuste conforme a estrutura do seu projeto
9
  css_file = "style.css"
@@ -35,106 +33,12 @@ html_content = f"""
35
  # Aplicar o markdown combinado no Streamlit
36
  st.markdown(html_content, unsafe_allow_html=True)
37
 
38
-
39
- # File upload interface
40
- uploaded_file = st.file_uploader("Carregue um arquivo CSV ou XLSX", type=['csv', 'xlsx'])
41
-
42
- if uploaded_file:
43
- if 'all_anomalies' not in st.session_state:
44
- with st.spinner('Aplicando modelo de s茅rie temporal...'):
45
- # Load the file into a DataFrame
46
- if uploaded_file.name.endswith('.csv'):
47
- df = pd.read_csv(uploaded_file, quotechar='"', encoding='utf-8')
48
- elif uploaded_file.name.endswith('.xlsx'):
49
- df = pd.read_excel(uploaded_file)
50
-
51
- # Data preprocessing for Prophet
52
- new_df = df.iloc[2:, 9:-1].fillna(0)
53
- new_df.columns = df.iloc[1, 9:-1]
54
- new_df.columns = new_df.columns.str.replace(r" \(\d+\)", "", regex=True)
55
-
56
- month_dict = {
57
- 'Jan': '01', 'Fev': '02', 'Mar': '03', 'Abr': '04',
58
- 'Mai': '05', 'Jun': '06', 'Jul': '07', 'Ago': '08',
59
- 'Set': '09', 'Out': '10', 'Nov': '11', 'Dez': '12'
60
- }
61
-
62
- def convert_column_name(column_name):
63
- if column_name == 'R贸tulos de Linha':
64
- return column_name
65
- parts = column_name.split('/')
66
- month = parts[0].strip()
67
- year = parts[1].strip()
68
- year = ''.join(filter(str.isdigit, year))
69
- month_number = month_dict.get(month, '00')
70
- return f"{month_number}/{year}"
71
-
72
- new_df.columns = [convert_column_name(col) for col in new_df.columns]
73
- new_df.columns = pd.to_datetime(new_df.columns, errors='coerce')
74
- new_df.rename(columns={new_df.columns[0]: 'Rotulo'}, inplace=True)
75
- df_clean = new_df.copy()
76
-
77
- # Create an empty DataFrame to store all anomalies
78
- all_anomalies = pd.DataFrame()
79
-
80
- # Process each row in the DataFrame
81
- for index, row in df_clean.iterrows():
82
- data = pd.DataFrame({
83
- 'ds': [col for col in df_clean.columns if isinstance(col, pd.Timestamp)],
84
- 'y': row[[isinstance(col, pd.Timestamp) for col in df_clean.columns]].values
85
- })
86
-
87
- data = data[data['y'] > 0].reset_index(drop=True)
88
- if data.empty or len(data) < 2:
89
- print(f"Skipping group {row['Rotulo']} because there are less than 2 non-zero observations.")
90
- continue
91
-
92
- try:
93
- model = Prophet(interval_width=0.95)
94
- model.fit(data)
95
- except ValueError as e:
96
- print(f"Skipping group {row['Rotulo']} due to error: {e}")
97
- continue
98
-
99
- future = model.make_future_dataframe(periods=12, freq='M')
100
- forecast = model.predict(future)
101
-
102
- num_real = len(data)
103
- num_forecast = len(forecast)
104
- real_values = list(data['y']) + [None] * (num_forecast - num_real)
105
- forecast['real'] = real_values
106
- anomalies = forecast[(forecast['real'] < forecast['yhat_lower']) | (forecast['real'] > forecast['yhat_upper'])]
107
-
108
- anomalies['Group'] = row['Rotulo']
109
- all_anomalies = pd.concat([all_anomalies, anomalies[['ds', 'real', 'Group']]], ignore_index=True)
110
-
111
- # Store the result in session state
112
- all_anomalies.rename(columns={"ds": "datetime", "real": "monetary value", "Group": "group"}, inplace=True)
113
- all_anomalies = all_anomalies[all_anomalies['monetary value'].astype(float) >= 10000000.00]
114
- all_anomalies['monetary value'] = all_anomalies['monetary value'].apply(lambda x: f"{x:.2f}")
115
- all_anomalies.sort_values(by=['monetary value'], ascending=False, inplace=True)
116
- all_anomalies = all_anomalies.fillna('').astype(str)
117
-
118
- # Store in session state
119
- st.session_state['all_anomalies'] = all_anomalies
120
-
121
- # Display the dataframe if it exists in session state
122
- if 'all_anomalies' in st.session_state:
123
- st.dataframe(st.session_state['all_anomalies'].head())
124
- else:
125
- st.warning("Ainda n茫o h谩 dados de anomalias para exibir. Por favor, carregue um arquivo.")
126
-
127
- else:
128
- st.warning("Por favor, carregue um arquivo CSV ou XLSX para come莽ar.")
129
-
130
- # Load translation models
131
  pt_en_translator = T5ForConditionalGeneration.from_pretrained("unicamp-dl/translation-pt-en-t5")
132
  en_pt_translator = T5ForConditionalGeneration.from_pretrained("unicamp-dl/translation-en-pt-t5")
133
- tokenizer = T5Tokenizer.from_pretrained("unicamp-dl/translation-pt-en-t5")
134
-
135
- # Load TAPEX model
136
  tapex_model = BartForConditionalGeneration.from_pretrained("microsoft/tapex-large-finetuned-wtq")
137
  tapex_tokenizer = TapexTokenizer.from_pretrained("microsoft/tapex-large-finetuned-wtq")
 
138
 
139
  def translate(text, model, tokenizer, source_lang="pt", target_lang="en"):
140
  input_ids = tokenizer.encode(text, return_tensors="pt", add_special_tokens=True)
@@ -150,32 +54,43 @@ def response(user_question, table_data):
150
  response_pt = translate(response_en, en_pt_translator, tokenizer, source_lang="en", target_lang="pt")
151
  return response_pt
152
 
153
- # Streamlit interface
154
- if 'all_anomalies' in st.session_state:
155
- st.dataframe(st.session_state['all_anomalies'].head())
156
- else:
157
- st.warning("Ainda n茫o h谩 dados de anomalias para exibir. Por favor, carregue um arquivo.")
 
158
 
159
- # Chat history
160
- if 'history' not in st.session_state:
161
- st.session_state['history'] = []
162
 
163
- user_question = st.text_input("Escreva sua quest茫o aqui:", "")
 
 
164
 
 
 
 
 
 
 
 
 
 
 
165
  if user_question:
166
- st.session_state['history'].append(('馃懁', user_question))
167
- st.markdown(f"**馃懁 {user_question}**")
168
-
169
  bot_response = response(user_question, st.session_state['all_anomalies'])
170
-
171
  st.session_state['history'].append(('馃', bot_response))
172
- st.markdown(f"<div style='text-align: right'>**馃 {bot_response}**</div>", unsafe_allow_html=True)
173
-
174
- if st.button("Limpar"):
175
- st.session_state['history'] = []
176
 
 
177
  for sender, message in st.session_state['history']:
178
  if sender == '馃懁':
179
  st.markdown(f"**馃懁 {message}**")
180
  elif sender == '馃':
181
- st.markdown(f"<div style='text-align: right'>**馃 {message}**</div>", unsafe_allow_html=True)
 
 
 
 
 
2
  import pandas as pd
3
  from transformers import BartForConditionalGeneration, TapexTokenizer, T5ForConditionalGeneration, T5Tokenizer
4
  from prophet import Prophet
 
 
5
 
6
  # Caminho para o arquivo CSS, ajuste conforme a estrutura do seu projeto
7
  css_file = "style.css"
 
33
  # Aplicar o markdown combinado no Streamlit
34
  st.markdown(html_content, unsafe_allow_html=True)
35
 
36
+ # Carregar os modelos de tradu莽茫o e TAPEX
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  pt_en_translator = T5ForConditionalGeneration.from_pretrained("unicamp-dl/translation-pt-en-t5")
38
  en_pt_translator = T5ForConditionalGeneration.from_pretrained("unicamp-dl/translation-en-pt-t5")
 
 
 
39
  tapex_model = BartForConditionalGeneration.from_pretrained("microsoft/tapex-large-finetuned-wtq")
40
  tapex_tokenizer = TapexTokenizer.from_pretrained("microsoft/tapex-large-finetuned-wtq")
41
+ tokenizer = T5Tokenizer.from_pretrained("unicamp-dl/translation-pt-en-t5")
42
 
43
  def translate(text, model, tokenizer, source_lang="pt", target_lang="en"):
44
  input_ids = tokenizer.encode(text, return_tensors="pt", add_special_tokens=True)
 
54
  response_pt = translate(response_en, en_pt_translator, tokenizer, source_lang="en", target_lang="pt")
55
  return response_pt
56
 
57
+ def load_data(uploaded_file):
58
+ if uploaded_file.name.endswith('.csv'):
59
+ df = pd.read_csv(uploaded_file, quotechar='"', encoding='utf-8')
60
+ elif uploaded_file.name.endswith('.xlsx'):
61
+ df = pd.read_excel(uploaded_file)
62
+ return df
63
 
64
+ def preprocess_data(df):
65
+ # Implementar as etapas de pr茅-processamento aqui
66
+ return df
67
 
68
+ def apply_prophet(df):
69
+ # Implementar o modelo Prophet aqui
70
+ return df
71
 
72
+ # Interface para carregar arquivo
73
+ uploaded_file = st.file_uploader("Carregue um arquivo CSV ou XLSX", type=['csv', 'xlsx'])
74
+ if uploaded_file and 'all_anomalies' not in st.session_state:
75
+ df = load_data(uploaded_file)
76
+ df = preprocess_data(df)
77
+ all_anomalies = apply_prophet(df)
78
+ st.session_state['all_anomalies'] = all_anomalies
79
+
80
+ # Interface para perguntas do usu谩rio
81
+ user_question = st.text_input("Escreva sua quest茫o aqui:", "")
82
  if user_question:
 
 
 
83
  bot_response = response(user_question, st.session_state['all_anomalies'])
84
+ st.session_state['history'].append(('馃懁', user_question))
85
  st.session_state['history'].append(('馃', bot_response))
 
 
 
 
86
 
87
+ # Mostrar hist贸rico de conversa
88
  for sender, message in st.session_state['history']:
89
  if sender == '馃懁':
90
  st.markdown(f"**馃懁 {message}**")
91
  elif sender == '馃':
92
+ st.markdown(f"**馃 {message}**", unsafe_allow_html=True)
93
+
94
+ # Bot茫o para limpar hist贸rico
95
+ if st.button("Limpar hist贸rico"):
96
+ st.session_state['history'] = []